-
Notifications
You must be signed in to change notification settings - Fork 174
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Summary: Setup a DQN experiment in an atari env in benchmark. Reviewed By: rodrigodesalvobraz Differential Revision: D66204020 fbshipit-source-id: 639a51529fb70cacf9f9471ec85c566a58c1f86f
- Loading branch information
1 parent
61eaf77
commit f435791
Showing
4 changed files
with
278 additions
and
2 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,185 @@ | ||
# Copyright (c) Meta Platforms, Inc. and affiliates. | ||
# All rights reserved. | ||
# | ||
# This source code is licensed under the MIT license found in the | ||
# LICENSE file in the root directory of this source tree. | ||
# | ||
# pyre-ignore-all-errors | ||
|
||
""" | ||
The code from this file is copied from https://github.com/DLR-RM/stable-baselines3/blob/master/stable_baselines3/common/atari_wrappers.py | ||
""" | ||
|
||
from typing import Any, Dict, SupportsFloat, Tuple | ||
|
||
import gymnasium as gym | ||
import numpy as np | ||
from gymnasium import spaces | ||
|
||
|
||
AtariResetReturn = Tuple[np.ndarray, Dict[str, Any]] | ||
AtariStepReturn = Tuple[np.ndarray, SupportsFloat, bool, bool, Dict[str, Any]] | ||
|
||
try: | ||
import cv2 | ||
|
||
cv2.ocl.setUseOpenCL(False) | ||
except ImportError: | ||
cv2 = None # type: ignore[assignment] | ||
|
||
|
||
class NoopResetEnv(gym.Wrapper[np.ndarray, int, np.ndarray, int]): | ||
""" | ||
Sample initial states by taking random number of no-ops on reset. | ||
No-op is assumed to be action 0. | ||
:param env: Environment to wrap | ||
:param noop_max: Maximum value of no-ops to run | ||
""" | ||
|
||
def __init__(self, env: gym.Env, noop_max: int = 30) -> None: | ||
super().__init__(env) | ||
self.noop_max = noop_max | ||
self.override_num_noops = None | ||
self.noop_action = 0 | ||
assert env.unwrapped.get_action_meanings()[0] == "NOOP" # type: ignore[attr-defined] | ||
|
||
def reset(self, **kwargs) -> AtariResetReturn: | ||
self.env.reset(**kwargs) | ||
if self.override_num_noops is not None: | ||
noops = self.override_num_noops | ||
else: | ||
noops = self.unwrapped.np_random.integers(1, self.noop_max + 1) | ||
assert noops > 0 | ||
obs = np.zeros(0) | ||
info: Dict = {} | ||
for _ in range(noops): | ||
obs, _, terminated, truncated, info = self.env.step(self.noop_action) | ||
if terminated or truncated: | ||
obs, info = self.env.reset(**kwargs) | ||
return obs, info | ||
|
||
|
||
class FireResetEnv(gym.Wrapper[np.ndarray, int, np.ndarray, int]): | ||
""" | ||
Take action on reset for environments that are fixed until firing. | ||
:param env: Environment to wrap | ||
""" | ||
|
||
def __init__(self, env: gym.Env) -> None: | ||
super().__init__(env) | ||
assert env.unwrapped.get_action_meanings()[1] == "FIRE" # type: ignore[attr-defined] | ||
assert len(env.unwrapped.get_action_meanings()) >= 3 # type: ignore[attr-defined] | ||
|
||
def reset(self, **kwargs) -> AtariResetReturn: | ||
self.env.reset(**kwargs) | ||
obs, _, terminated, truncated, _ = self.env.step(1) | ||
if terminated or truncated: | ||
self.env.reset(**kwargs) | ||
obs, _, terminated, truncated, _ = self.env.step(2) | ||
if terminated or truncated: | ||
self.env.reset(**kwargs) | ||
return obs, {} | ||
|
||
|
||
class EpisodicLifeEnv(gym.Wrapper[np.ndarray, int, np.ndarray, int]): | ||
""" | ||
Make end-of-life == end-of-episode, but only reset on true game over. | ||
Done by DeepMind for the DQN and co. since it helps value estimation. | ||
:param env: Environment to wrap | ||
""" | ||
|
||
def __init__(self, env: gym.Env) -> None: | ||
super().__init__(env) | ||
self.lives = 0 | ||
self.was_real_done = True | ||
|
||
def step(self, action: int) -> AtariStepReturn: | ||
obs, reward, terminated, truncated, info = self.env.step(action) | ||
self.was_real_done = terminated or truncated | ||
# check current lives, make loss of life terminal, | ||
# then update lives to handle bonus lives | ||
lives = self.env.unwrapped.ale.lives() # type: ignore[attr-defined] | ||
if 0 < lives < self.lives: | ||
# for Qbert sometimes we stay in lives == 0 condition for a few frames | ||
# so its important to keep lives > 0, so that we only reset once | ||
# the environment advertises done. | ||
terminated = True | ||
self.lives = lives | ||
return obs, reward, terminated, truncated, info | ||
|
||
def reset(self, **kwargs) -> AtariResetReturn: | ||
""" | ||
Calls the Gym environment reset, only when lives are exhausted. | ||
This way all states are still reachable even though lives are episodic, | ||
and the learner need not know about any of this behind-the-scenes. | ||
:param kwargs: Extra keywords passed to env.reset() call | ||
:return: the first observation of the environment | ||
""" | ||
if self.was_real_done: | ||
obs, info = self.env.reset(**kwargs) | ||
else: | ||
# no-op step to advance from terminal/lost life state | ||
obs, _, terminated, truncated, info = self.env.step(0) | ||
|
||
# The no-op step can lead to a game over, so we need to check it again | ||
# to see if we should reset the environment and avoid the | ||
# monitor.py `RuntimeError: Tried to step environment that needs reset` | ||
if terminated or truncated: | ||
obs, info = self.env.reset(**kwargs) | ||
self.lives = self.env.unwrapped.ale.lives() # type: ignore[attr-defined] | ||
return obs, info | ||
|
||
|
||
class MaxAndSkipEnv(gym.Wrapper[np.ndarray, int, np.ndarray, int]): | ||
""" | ||
Return only every ``skip``-th frame (frameskipping) | ||
and return the max between the two last frames. | ||
:param env: Environment to wrap | ||
:param skip: Number of ``skip``-th frame | ||
The same action will be taken ``skip`` times. | ||
""" | ||
|
||
def __init__(self, env: gym.Env, skip: int = 4) -> None: | ||
super().__init__(env) | ||
# most recent raw observations (for max pooling across time steps) | ||
assert ( | ||
env.observation_space.dtype is not None | ||
), "No dtype specified for the observation space" | ||
assert ( | ||
env.observation_space.shape is not None | ||
), "No shape defined for the observation space" | ||
self._obs_buffer = np.zeros( | ||
(2, *env.observation_space.shape), dtype=env.observation_space.dtype | ||
) | ||
self._skip = skip | ||
|
||
def step(self, action: int) -> AtariStepReturn: | ||
""" | ||
Step the environment with the given action | ||
Repeat action, sum reward, and max over last observations. | ||
:param action: the action | ||
:return: observation, reward, terminated, truncated, information | ||
""" | ||
total_reward = 0.0 | ||
terminated = truncated = False | ||
for i in range(self._skip): | ||
obs, reward, terminated, truncated, info = self.env.step(action) | ||
done = terminated or truncated | ||
if i == self._skip - 2: | ||
self._obs_buffer[0] = obs | ||
if i == self._skip - 1: | ||
self._obs_buffer[1] = obs | ||
total_reward += float(reward) | ||
if done: | ||
break | ||
# Note that the observation on the done=True frame | ||
# doesn't matter | ||
max_frame = self._obs_buffer.max(axis=0) | ||
|
||
return max_frame, total_reward, terminated, truncated, info |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters