Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
with open(args.experiment_spec) as fp:
experiment_spec = json.load(fp=fp)
run_mode = experiment_spec.get("run_mode", "distributed")
if run_mode == "distributed":
ps_hosts = args.ps_hosts.split(",")
worker_hosts = args.worker_hosts.split(",") # []
cluster = {'ps': ps_hosts, 'worker': worker_hosts}
cluster_spec = tf.train.ClusterSpec(cluster)
else:
cluster_spec = None
if "environment" not in experiment_spec:
raise TensorForceError("No 'environment' configuration found in experiment-spec.")
environment_spec = experiment_spec["environment"]
# check for remote env and log it (remote envs are put into a separate container)
is_remote = environment_spec.pop("remote", False)
env_kwargs = {}
if is_remote:
img = environment_spec.pop("image", "default")
env_kwargs.update({"host": args.remote_env_host})
logger.info("Experiment is run with RemoteEnvironment {} (in separate container).".format(img))
if run_mode != "multi-threaded":
environments = [Environment.from_spec(experiment_spec["environment"], env_kwargs)]
else:
# For remote-envs in multi-threaded mode, we need to set a sequence of ports as all envs will be running
# in the same pod. For single mode: Use the default port.
environments = [Environment.from_spec(experiment_spec["environment"], env_kwargs)]
def __setattr__(self, key, value):
if key == '_config' or key == '_accessed':
super(Configuration, self).__setattr__(key, value)
elif key not in self._config:
raise TensorForceError("Value '{}' is not defined.".format(key))
else:
raise TensorForceError("Setting config attributes not allowed.")
# self._config[key] = value
def tf_apply(self, x, update):
if self.pooling_type == 'average':
x = tf.nn.avg_pool(value=x, ksize=self.window, strides=self.stride, padding=self.padding)
elif self.pooling_type == 'max':
x = tf.nn.max_pool(value=x, ksize=self.window, strides=self.stride, padding=self.padding)
else:
raise TensorForceError('Invalid pooling type: {}'.format(self.name))
return x
# Redirect output to file
sys.stdout = open('lab_output.txt', 'w')
args = parser.parse_args()
environment = DeepMindLab(args.level_id)
path = os.path.dirname(__file__)
if args.agent_config:
# Use absolute path
agent_config = json.load(path + args.agent_config)
else:
raise TensorForceError("No agent configuration provided.")
if not args.network_spec:
raise TensorForceError("No network configuration provided.")
else:
network_spec = json.load(path + args.network_config)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO) # configurable!!!
agent = Agent.from_spec(
spec=agent_config,
kwargs=dict(
states=environment.states,
actions=environment.actions,
network=network_spec
)
)
if args.load:
load_dir = os.path.dirname(args.load)
self.terminal_input = tf.placeholder(dtype=util.tf_dtype('bool'), shape=(None,), name='terminal')
# Reward
self.reward_input = tf.placeholder(dtype=util.tf_dtype('float'), shape=(None,), name='reward')
# Reward preprocessing
if self.reward_preprocessing_spec is None:
self.reward_preprocessing = None
else:
self.reward_preprocessing = PreprocessorStack.from_spec(
spec=self.reward_preprocessing_spec,
# TODO this can eventually have more complex shapes?
kwargs=dict(shape=())
)
if self.reward_preprocessing.processed_shape(shape=()) != ():
raise TensorForceError("Invalid reward preprocessing!")
# Deterministic/independent action flag (should probably be the same)
self.deterministic_input = tf.placeholder(dtype=util.tf_dtype('bool'), shape=(), name='deterministic')
self.independent_input = tf.placeholder(dtype=util.tf_dtype('bool'), shape=(), name='independent')
# TensorFlow functions
self.fn_initialize = tf.make_template(
name_='initialize',
func_=self.tf_initialize,
custom_getter_=custom_getter
)
self.fn_preprocess = tf.make_template(
name_='preprocess',
func_=self.tf_preprocess,
custom_getter_=custom_getter
)
def actions(self):
if isinstance(self.env.action_space, VNCActionSpace):
return dict(
key=dict(type='int', num_actions=len(self.env.action_space.keys)),
button=dict(type='int', num_actions=len(self.env.action_space.buttonmasks)),
position=dict(
type='int',
num_actions=self.env.action_space.screen_shape[0] * self.env.action_space.screen_shape[1]
)
)
elif isinstance(self.env.action_space, Discrete):
return dict(type='int', num_actions=self.env.action_space.n)
elif len(self.env.action_space.shape) == 1:
return {'action' + str(n): dict(type='float') for n in range(len(self.env.action_space.shape[0]))}
else:
raise TensorForceError()
def tf_apply(self, x, update):
inputs_to_merge = list()
for name in self.inputs:
# Previous input, by name or "*", like normal network_spec
# Not using named_tensors as there could be unintended outcome
if name == "*" or name == "previous":
inputs_to_merge.append(x)
elif name in self.named_tensors:
inputs_to_merge.append(self.named_tensors[name])
else:
# Failed to find key in available inputs, print out help to user, raise error
keys = list(self.named_tensors)
raise TensorForceError(
'ComplexNetwork input "{}" doesn\'t exist, Available inputs: {}'.format(name, keys)
)
# Review data for casting to more precise format so TensorFlow doesn't throw error for mixed data
# Quick & Dirty cast only promote types: bool=0,int32=10, int64=20, float32=30, double=40
cast_type_level = 0
cast_type_dict = {
'bool': 0,
'int32': 10,
'int64': 20,
'float32': 30,
'float64': 40
}
cast_type_func_dict = {
0: tf.identity,
10: tf.to_int32,
def connect(self, timeout=600):
RemoteEnvironment.connect(self, timeout)
# Get action- and state-specs from our game.
self.protocol.send({"cmd": "get_spec"}, self.socket)
response = self.protocol.recv(self.socket, "utf-8")
# Game's name
self.game_name = response.get("game_name") # keep non-mandatory for now
# Observers
if "observation_space_desc" not in response:
raise TensorForceError("Response to `get_spec` does not contain field `observation_space_desc`!")
self.observation_space_desc = response["observation_space_desc"]
# Action-mappings
if "action_space_desc" not in response:
raise TensorForceError("Response to `get_spec` does not contain field `action_space_desc`!")
self.action_space_desc = response["action_space_desc"]
if self.discretize_actions:
self.discretize_action_space_desc()
# Invalidate our states- and actions caches.
if "states" in self.__dict__:
del self.__dict__["states"]
if "actions" in self.__dict__:
del self.__dict__["actions"]
"""
Dense layer.
Args:
size: Layer size, if None than input size matches the output size of the layer
weights: Weight initialization, random if None.
bias: If true, bias is added.
activation: Type of nonlinearity, or dict with name & arguments
l2_regularization: L2 regularization weight.
l1_regularization: L1 regularization weight.
skip: Add skip connection like ResNet (https://arxiv.org/pdf/1512.03385.pdf),
doubles layers and ShortCut from Input to output
"""
self.skip = skip
if self.skip and size is not None:
raise TensorForceError(
'Dense Layer SKIP connection needs Size=None, uses input shape '
'sizes to create skip connection network, please delete "size" parameter'
)
self.linear = Linear(
size=size,
weights=weights,
bias=bias,
l2_regularization=l2_regularization,
l1_regularization=l1_regularization,
summary_labels=summary_labels,
trainable=trainable
)
if self.skip:
self.linear_skip = Linear(
size=size,
def convert_dictionary_to_string(self, data, indent=0, format_type=0, separator=None, eol=None):
data_string = ""
add_separator = ""
if eol is None:
eol = os.linesep
if separator is None:
separator = ", "
# This should not ever occur but here as a catch.
if type(data) is not dict:
raise TensorForceError(
"Error: MetaParameterRecorder Dictionary conversion was passed a type {}"
" not supported.".format(str(type(data)))
)
# TensorBoard
if format_type == 0:
label = ""
div = ""
if indent > 0:
label = " | "
div = "--- | "
data_string += label + "Key | Value" + eol + div + "--- | ----" + eol
for key in data:
key_txt = key