How to use the gym.Wrapper.__init__ function in gym

To help you get started, we’ve selected a few gym examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github CDMCH / ddpg-curiosity-and-multi-criteria-her / ddpg_curiosity_mc_her / common / retro_wrappers.py View on Github external
def __init__(self, env, max_random_steps, on_startup=True, every_episode=False):
        gym.Wrapper.__init__(self, env)
        self.on_startup = on_startup
        self.every_episode = every_episode
        self.random_steps = max_random_steps
        self.last_obs = None
        if on_startup:
            self.some_random_steps()
github vwxyzjn / cleanrl / cleanrl / ppo_atari.py View on Github external
def __init__(self, env, skip=4):
        """Return only every `skip`-th frame"""
        gym.Wrapper.__init__(self, env)
        # most recent raw observations (for max pooling across time steps)
        self._obs_buffer = np.zeros((2,)+env.observation_space.shape, dtype=np.uint8)
        self._skip       = skip
github p-christ / Deep-Reinforcement-Learning-Algorithms-with-PyTorch / agents / hierarchical_agents / HIRO.py View on Github external
def __init__(self, env, HIRO_agent):
        Wrapper.__init__(self, env)
        self.env = env
        self.HIRO_agent = HIRO_agent
        self.action_space = self.observation_space
github thomasehuang / Reproducing-Curiosity-Driven-Exploration-By-Bootstrapping-Features / atari_wrappers.py View on Github external
def __init__(self, env):
        """Take action on reset for environments that are fixed until firing."""
        gym.Wrapper.__init__(self, env)
        assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
        assert len(env.unwrapped.get_action_meanings()) >= 3
github YuhangSong / DEHRL / deep_rl / component / atari_wrapper.py View on Github external
def __init__(self, env):
        """Take action on reset for environments that are fixed until firing."""
        gym.Wrapper.__init__(self, env)
        assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
        assert len(env.unwrapped.get_action_meanings()) >= 3
github openai / atari-demo / wrappers.py View on Github external
def __init__(self, env, skip=4):
        """Return only every `skip`-th frame"""
        gym.Wrapper.__init__(self, env)
        # most recent raw observations (for max pooling across time steps)
        self._obs_buffer = deque(maxlen=2)
        self._skip       = skip
github PCCproject / PCC-Uspace / python / rate_controllers / reinforcement_learning / open_ai / common / atari_wrappers.py View on Github external
def __init__(self, env):
        """Take action on reset for environments that are fixed until firing."""
        gym.Wrapper.__init__(self, env)
        assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
        assert len(env.unwrapped.get_action_meanings()) >= 3
github keiohta / tf2rl / tf2rl / envs / atari_wrapper.py View on Github external
def __init__(self, env, noop_max=30):
        """
        Sample initial states by taking random number of no-ops on reset.
        No-op is assumed to be action 0.
        """
        gym.Wrapper.__init__(self, env)
        self.noop_max = noop_max
        self.override_num_noops = None
        self.noop_action = 0
        assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
github heronsystems / adeptRL / adept / env / _gym_wrappers.py View on Github external
def __init__(self, env):
        """Take action on reset for env that are fixed until firing."""
        gym.Wrapper.__init__(self, env)
        self.lives = 0
        self.fire_action = env.unwrapped.get_action_meanings().index('FIRE')
        assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
        assert len(env.unwrapped.get_action_meanings()) >= 3
github PCCproject / PCC-Uspace / python / models / gym-rpc / baselines_master / common / atari_wrappers.py View on Github external
def __init__(self, env, skip=4):
        """Return only every `skip`-th frame"""
        gym.Wrapper.__init__(self, env)
        # most recent raw observations (for max pooling across time steps)
        self._obs_buffer = np.zeros((2,)+env.observation_space.shape, dtype='uint8')
        self._skip       = skip