Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def _set_param(
self,
layer_id,
param_id,
param_type,
param,
):
if isinstance(param, numpy.ndarray):
param_temp = _Tensor.Ref('/tmp/rnn_param')
param_temp.set_value(param)
param = param_temp
else: raise ValueError('Excepted a numpy array.')
self.weights.expressions = dict() # Clear cached expressions
outputs = RNNParamSet(
inputs=[self.weights, param],
layer_id=layer_id,
param_id=param_id,
param_type=param_type,
rnn_mode=self.mode,
input_size=self.input_size,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
num_directions=self.num_directions,
)
for k, v in outputs.expressions.items():
"""
self._net = _proto_def.NetParameter()
_parse_text_proto(open(proto_txt,'r').read(), self._net)
self._phase = phase
self._layers = []
self._inputs_to_tensors = {}
if not hasattr(self, '_blobs'): self._blobs = {}
self._losses, self._trainable_vars = [], []
if len(self._net.input) > 0:
for input in self._net.input:
if not input in self._blobs:
variable = _Tensor(input).Variable()
self._blobs[input] = {
'data': variable,
'diff': _Tensor.Ref(variable.name + '_grad'),
}
self._inputs_to_tensors[input] = self._blobs[input]['data']
for layer in self._net.layer:
if not self.FilterLayer(layer): continue
self._layers.append(getattr(
_layer_factory, layer.type + 'Layer')(layer))
self.Setup()
for layer in self._net.layer:
if not self.FilterLayer(layer): continue
self.CheckBackward(layer)
def _try_get_tensor(name=None):
"""Try to create or get a tensor"""
if name is None or name == '': return _Tensor()
else: return _Tensor.Ref(name)
>>> import dragon as dg
>>> x = dg.Tensor('x').Variable()
>>> y = x * 2
>>> dx = grad(y, x)
>>> z = dg.Tensor('z').Variable()
>>> y = x + z
>>> dx, dz = grad(y, [x, z])
"""
grads = []
if not isinstance(wrt, list): wrt = [wrt]
for w in wrt:
cost.gradient.add_wrt(w.name)
w.gradient.add_cost(cost)
grads.append(_Tensor.Ref(
name=w.name + '_grad',
shape=w.shape, dtype=w.dtype))
if len(grads) == 1: return grads[0]
return grads
for layer in self._layers:
bottom = []
for bottom_name in layer._bottom:
if not bottom_name in self._blobs:
raise RuntimeError('bottom({}) is unknown.'.format(bottom_name))
bottom.append(self._blobs[bottom_name])
if bottom_name in self._net_outputs:
self._net_outputs.remove(bottom_name)
outputs = layer.Setup([blob['data'] for blob in bottom])
if not isinstance(outputs, (list, tuple)): outputs = [outputs]
for idx, top in enumerate(layer._top):
self._blobs[top] = {
'data': outputs[idx],
'diff': _Tensor.Ref(outputs[idx].name + '_grad'),
}
self._net_outputs.add(top)
def placeholder(dtype, shape=None, name=None):
# Check data type
if dtype is not None:
if not isinstance(dtype, dtypes.DType):
raise TypeError('The dtype should be a valid tensorflow data type.')
# Construct a tensor from the explicit name
return _Tensor.Ref(
_workspace.GetDummyName(
_scope.get_default_name_scope() + name
if name else 'Placeholder',
suffix=':0', domain='Tensor'),
dtype=dtype.name, shape=shape).Placeholder()
','.join([str(dim) for dim in shape]) + '), ' +
'while feed a value with (' +
','.join([str(dim) for dim in value.shape]) + ').')
value = value.reshape(shape)
# Get a available name
defined_name = \
_workspace.GetDummyName(
basename=_scope.get_default_name_scope() +
(name if name else 'Const'),
suffix=':0',
domain='Tensor',
)
# Feed into the workspace
return _Tensor.Ref(
name=defined_name,
shape=list(value.shape),
dtype=str(value.dtype),
).set_value(value)