How to use the gym.error.Error function in gym

To help you get started, we’ve selected a few gym examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github ppaquette / gym-super-mario / ppaquette_gym_super_mario / nes_env.py View on Github external
def _launch_fceux(self):
        # Making sure ROM file is valid
        if '' == self.rom_path or not os.path.isfile(self.rom_path):
            raise gym.error.Error('Unable to find ROM. Please download the game from the web and configure the rom path by ' +
                                  'calling env.configure(rom_path=path_to_file)')

        # Creating pipes
        self._create_pipes()

        # Creating temporary lua file
        self.temp_lua_path = os.path.join('/tmp', str(seeding.hash_seed(None) % 2 ** 32) + '.lua')
        temp_lua_file = open(self.temp_lua_path, 'w', 1)
        for k, v in list(self.launch_vars.items()):
            temp_lua_file.write('%s = "%s";\n' % (k, v))
        i = 0
        for script in self.lua_path:
            temp_lua_file.write('f_%d = assert (loadfile ("%s"));\n' % (i, script))
            temp_lua_file.write('f_%d ();\n' % i)
            i += 1
        temp_lua_file.close()
github Unity-Technologies / marathon-envs / marathon-envs / marathon_envs / envs / __init__.py View on Github external
import logging
import itertools
import numpy as np
from typing import Any, Dict, List, Optional, Tuple, Union

import gym
from gym import error, spaces

from mlagents_envs.environment import UnityEnvironment
from mlagents_envs.base_env import BatchedStepResult
import os

class MarathonEnvsException(error.Error):
    """
    Any error related to the gym wrapper of ml-agents.
    """

    pass


logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("marathon_envs")


GymSingleStepResult = Tuple[np.ndarray, float, bool, Dict]
GymMultiStepResult = Tuple[List[np.ndarray], List[float], List[bool], Dict]
GymStepResult = Union[GymSingleStepResult, GymMultiStepResult]
github spring01 / drlbox / wrapper / ppss.py View on Github external
def _step(self, action):
        done = False
        total_reward = 0
        current_step = 0
        while current_step < self.act_steps and not done:
            self.stepcount += 1
            obs, reward, done, info = self.env.step(action)
            self.state.pop(0)
            self.state.append(self.preprocess(obs))
            total_reward += reward
            current_step += 1
        if 'ppss.stepcount' in info:
            raise gym.error.Error('Key "ppss.stepcount" already in info. '
                                  'Make sure you are not stacking '
                                  'the PPSSWrapper wrappers.')
        info['ppss.stepcount'] = self.stepcount
        self.stack_state = np.stack(self.state, axis=2)
        return self.stack_state.astype(np.float32), total_reward, done, info
github YuhangSong / DEHRL / overcooked.py View on Github external
else:
            self.leg_indent = 0
        '''leg move distance'''
        self.leg_move_dis = self.screen_width/40
        self.body_move_dis = (int(self.screen_width/2)-int(self.min_x)-self.body_size/2-self.leg_size+self.leg_indent)/self.body_steps




        assert self.args.obs_type in ('ram', 'image')
        if self.args.obs_type == 'ram':
            self.observation_space = spaces.Box(low=0, high=1.0, dtype=np.float64, shape=(26,))
        elif self.args.obs_type == 'image':
            self.observation_space = spaces.Box(low=0, high=255, shape=(self.screen_height, self.screen_width, 1),dtype=np.uint8)
        else:
            raise error.Error('Unrecognized observation type: {}'.format(self.args.obs_type))

        if self.args.reward_level in [0]:
            self.episode_length_limit = 5
        elif self.args.reward_level in [1]:
            self.episode_length_limit = 4*6*2
        elif self.args.reward_level in [2]:
            if self.goal_num in [4]:
                # get 4 food in sequence
                self.episode_length_limit = 6+12+6+12
            elif self.goal_num in [3]:
                # get 3 food in sequence
                self.episode_length_limit = 6+12+6
            elif self.goal_num in [2]:
                # get 2 food in sequence
                self.episode_length_limit = 6+12
            elif self.goal_num in [1]:
github namidairo777 / Distributed-MADDPG / multiagent-envs / multiagent / rendering.py View on Github external
def get_display(spec):
    """Convert a display specification (such as :0) into an actual Display
    object.

    Pyglet only supports multiple Displays on Linux.
    """
    if spec is None:
        return None
    elif isinstance(spec, six.string_types):
        return pyglet.canvas.Display(spec)
    else:
        raise error.Error('Invalid display specification: {}. (Must be a string like :0 or None.)'.format(spec))
github ppaquette / gym-pull / gym_pull / scoreboard / api.py View on Github external
def upload_training_data(training_dir, api_key=None):
    # Could have multiple manifests
    results = monitoring.load_results(training_dir)
    if not results:
        raise error.Error('''Could not find any manifest files in {}.

(HINT: this usually means you did not yet close() your env.monitor and have not yet exited the process. You should call 'env.monitor.start(training_dir)' at the start of training and 'env.monitor.close()' at the end, or exit the process.)'''.format(training_dir))

    manifests = results['manifests']
    env_info = results['env_info']
    timestamps = results['timestamps']
    episode_lengths = results['episode_lengths']
    episode_rewards = results['episode_rewards']
    main_seeds = results['main_seeds']
    seeds = results['seeds']
    videos = results['videos']

# >>>>>>>>> START changes >>>>>>>>>>>>>>>>>>>>>>>>
    if '/' in env_info['env_id']:
        logger.warn('Scoreboard support for user environments is limited. Your submission will only appear for a limited number of environments.')
# <<<<<<<<< END changes <<<<<<<<<<<<<<<<<<<<<<<<<<
github openai / gym / gym / utils / seeding.py View on Github external
Args:
        a (Optional[int, str]): None seeds from an operating system specific randomness source.
        max_bytes: Maximum number of bytes to use in the seed.
    """
    # Adapted from https://svn.python.org/projects/python/tags/r32/Lib/random.py
    if a is None:
        a = _bigint_from_bytes(os.urandom(max_bytes))
    elif isinstance(a, str):
        a = a.encode('utf8')
        a += hashlib.sha512(a).digest()
        a = _bigint_from_bytes(a[:max_bytes])
    elif isinstance(a, integer_types):
        a = a % 2**(8 * max_bytes)
    else:
        raise error.Error('Invalid type for seed: {} ({})'.format(type(a), a))

    return a
github wenkesj / holdem / holdem / env.py View on Github external
FOLD = 3

    RAISE_AMT = [0, minraise]
    """
    if len(actions) != len(self._seats):
      raise error.Error('actions must be same shape as number of seats.')

    if self._current_player is None:
      raise error.Error('Round cannot be played without 2 or more players.')

    if self._round == 4:
      raise error.Error('Rounds already finished, needs to be reset.')

    players = [p for p in self._seats if p.playing_hand]
    if len(players) == 1:
      raise error.Error('Round cannot be played with one player.')

    self._last_player = self._current_player
    self._last_actions = actions

    if not self._current_player.playedthisround and len([p for p in players if not p.isallin]) >= 1:
      if self._current_player.isallin:
        self._current_player = self._next(players, self._current_player)
        return self._get_current_step_returns(False)

      move = self._current_player.player_move(
          self._output_state(self._current_player), actions[self._current_player.player_id])

      if move[0] == 'call':
        self._player_bet(self._current_player, self._tocall)
        if self._debug:
          print('Player', self._current_player.player_id, move)
github grananqvist / reinforcement-learning-super-mario-A3C / random_walk.py View on Github external
def __init__(self, env):
            super(SetPlayingModeWrapper, self).__init__(env)
            if target_mode not in ['algo', 'human']:
                raise gym.error.Error('Error - The mode "{}" is not supported. Supported options are "algo" or "human"'.format(target_mode))
            self.unwrapped.mode = target_mode
github openai / gym / gym / benchmarks / registration.py View on Github external
def __init__(self, env_id, trials, max_timesteps, max_seconds, reward_floor, reward_ceiling):
        self.env_id = env_id
        self.trials = trials
        self.max_timesteps = max_timesteps
        self.max_seconds = max_seconds
        self.reward_floor = reward_floor
        self.reward_ceiling = reward_ceiling

        if max_timesteps is None and max_seconds is None:
            raise error.Error('Must provide at least one of max_timesteps and max_seconds for {}'.format(self))