How to use the gym.envs.registration.register function in gym

To help you get started, we’ve selected a few gym examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github smearle / gym-city / gym_city / __init__.py View on Github external
from gym.envs.registration import register

register(
    id='MicropolisEnv-v0',
    entry_point='gym_city.envs:MicropolisEnv',
    kwargs={'MAP_X':14, 'MAP_Y':14}
)
register(
    id='MicropolisPaintEnv-v0',
    entry_point='gym_city.envs:MicropolisPaintEnv',
    kwargs={'MAP_X':14, 'MAP_Y':14}
)
register(
    id='MicropolisWalkEnv-v0',
    entry_point='gym_city.envs:MicroWalkEnv',
)

register(
    id='MicropolisArcadeEnv-v0',
    entry_point='gym_city.envs:MicroArcadeEnv',
)
github microsoft / TextWorld / textworld / gym / utils.py View on Github external
Environment ID that will compose a batch.
        batch_size:
            Number of independent environments to run.
        parallel:
            If True, the environment will be executed in different processes.

    Returns:
        The corresponding gym-compatible env_id to use.
    """
    batch_env_id = "batch{}-".format(batch_size) + env_id
    env_spec = spec(env_id)
    entry_point = 'textworld.gym.envs:BatchEnv'
    if parallel and batch_size > 1:
        entry_point = 'textworld.gym.envs:ParallelBatchEnv'

    register(
        id=batch_env_id,
        entry_point=entry_point,
        max_episode_steps=env_spec.max_episode_steps,
        nondeterministic=env_spec.nondeterministic,
        reward_threshold=env_spec.reward_threshold,
        # Setting the 'vnc' tag avoid wrapping the env with a TimeLimit wrapper. See
        # https://github.com/openai/gym/blob/4c460ba6c8959dd8e0a03b13a1ca817da6d4074f/gym/envs/registration.py#L122
        tags={"vnc": "foo"},
        kwargs={'env_id': env_id, 'batch_size': batch_size}
    )

    return batch_env_id
github tomsilver / pddlgym / pddlgym / __init__.py View on Github external
def register_pddl_env(name, is_test_env, other_args):
    dir_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "pddl")
    domain_file = os.path.join(dir_path, "{}.pddl".format(name.lower()))
    gym_name = name.capitalize()
    problem_dirname = name.lower()
    if is_test_env:
        gym_name += 'Test'
        problem_dirname += '_test'
    problem_dir = os.path.join(dir_path, problem_dirname)

    register(
        id='PDDLEnv{}-v0'.format(gym_name),
        entry_point='pddlgym.core:PDDLEnv',
        kwargs=dict({'domain_file' : domain_file, 'problem_dir' : problem_dir,
                     **other_args}),
    )
github crowdAI / marLo / marlo / envs / CliffWalking / __init__.py View on Github external
def _register():
    ##########################################
    # Version 0 of env 
    ##########################################
    gym.envs.registration.register(
        id='MarLo-CliffWalking-v0',
        entry_point=MarloEnvBuilder,
        kwargs={
            "extra_params": {
            }
github rlworkgroup / metaworld / multiworld / envs / pygame / __init__.py View on Github external
def register_custom_envs():
    global _REGISTERED
    if _REGISTERED:
        return
    _REGISTERED = True

    LOGGER.info("Registering multiworld pygame gym environments")
    register(
        id='Point2DLargeEnv-offscreen-v0',
        entry_point='multiworld.envs.pygame.point2d:Point2DEnv',
        tags={
            'git-commit-hash': '166f0f3',
            'author': 'Vitchyr'
        },
        kwargs={
            'images_are_rgb': True,
            'target_radius': 1,
            'ball_radius': 1,
            'render_onscreen': False,
        },
    )
    register(
        id='Point2DLargeEnv-onscreen-v0',
        entry_point='multiworld.envs.pygame.point2d:Point2DEnv',
github hunkim / ReinforcementZeroToAll / 01_1_play_frozenlake_det.py View on Github external
# MACROS
LEFT = 0
DOWN = 1
RIGHT = 2
UP = 3

# Key mapping
arrow_keys = {
    '\x1b[A': UP,
    '\x1b[B': DOWN,
    '\x1b[C': RIGHT,
    '\x1b[D': LEFT}

# Register FrozenLake with is_slippery False
register(
    id='FrozenLake-v3',
    entry_point='gym.envs.toy_text:FrozenLakeEnv',
    kwargs={'map_name': '4x4', 'is_slippery': False}
)

env = gym.make('FrozenLake-v3')
env.render()  # Show the initial board

while True:
    # Choose an action from keyboard
    key = inkey()
    if key not in arrow_keys.keys():
        print("Game aborted!")
        break

    action = arrow_keys[key]
github neka-nat / ilqr-gym / env / __init__.py View on Github external
from gym.envs.registration import register

register(
    id='CartPoleContinuous-v0',
    entry_point='env.cartpole_continuous:CartPoleContinuousEnv',
    max_episode_steps=200,
    reward_threshold=195.0,
)
github awslabs / amazon-sagemaker-examples / reinforcement_learning / rl_hvac_coach_energyplus / src / eplus / __init__.py View on Github external
from gym.envs.registration import register

register(
    id='large-office-v0',
    entry_point='eplus.envs:LargeOfficeEnv',
)

register(
    id='data-center-v0',
    entry_point='eplus.envs:DataCenterEnv'
)
github nalsil / kimhun_rl_windows / 01_play_frozenlake_det.py View on Github external
# This is the original version, but not work on windows.
# ........

LEFT = 0
DOWN = 1
RIGHT = 2
UP = 3

arrow_keys = {
    '\x1b[A' : UP,
    '\x1b[B' : DOWN,
    '\x1b[C' : RIGHT,
    '\x1b[D' : LEFT
}

register(
    id='FrozenLake-v3',
    entry_point='gym.envs.toy_text:FrozenLakeEnv',
    kwargs={'map_name' : '4x4', 'is_slippery': False}
)

env = gym.make('FrozenLake-v3')
env.render()

while True:
    key = readchar.readkey()
    if key not in arrow_keys.keys():
        print("Game aborted!")
        break

    action = arrow_keys[key]
    state, reward, done, info = env.step(action)
github mtmoncur / deepracer_env / gym_deepracer / __init__.py View on Github external
from gym.envs.registration import register

register(
    id='deepracer-v0',
    entry_point='gym_deepracer.envs:DeepRacerEnv',
)

register(
    id='deepracerDiscrete-v0',
    entry_point='gym_deepracer.envs:DeepRacerEnvDiscrete',
)