How to use the dragon.vm.torch.tensor.Tensor function in dragon

To help you get started, we’ve selected a few dragon examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github seetaresearch / Dragon / Dragon / python / dragon / vm / torch / optim / optimizer.py View on Github external
def __init__(self, params, defaults):
        self.defaults = defaults
        if isinstance(params, _Tensor):
            raise TypeError("params argument given to the optimizer should be "
                            "an iterable of Variables or dicts, but got " +
                            str(type(params)))
        self.state = defaultdict(dict)
        self.param_groups = []
        param_groups = list(params)
        if len(param_groups) == 0:
            raise ValueError("optimizer got an empty parameter list")
        if not isinstance(param_groups[0], dict):
            param_groups = [{'params': param_groups}]
        for param_group in param_groups:
            self.add_param_group(param_group)
        self._update_type = None
        self._allow_parallel = False
        if _mpi.Is_Init():
            rank, _ = _mpi.AllowParallel()
github seetaresearch / Dragon / Dragon / python / dragon / vm / torch / optim / optimizer.py View on Github external
def _get_grad(self, param, accumulating=False):
        grad_name = param.name + (
            '_grad[acc]' if accumulating
                else '_grad')
        if _workspace.HasTensor(grad_name):
            return _Tensor(
                name=grad_name,
                    own_storage=False,
                        device=param.device)
        return None
github seetaresearch / Dragon / Dragon / python / dragon / vm / torch / autograd / variable.py View on Github external
_tensor_utils.FromArray(gradient.numpy(True), self.name + '_grad')
        input_grads.append(self.name + '_grad')

    # 3) Dispatch the backward ops
    _backward_impl(forward_ops, targets, input_grads, ignored_grads)

    # 4) Release resources
    # We should release both the operator handles and tensors
    for forward_op in forward_ops:
        _get_operator_pool().put(forward_op.name)
        for output in forward_op.output:
            if output not in forward_op.input:
                _get_tensor_pool().put(output)


_Tensor.backward = backward
_Tensor.volatile = volatile
github seetaresearch / Dragon / Dragon / python / dragon / vm / torch / nn / modules / rnn.py View on Github external
else self.hidden_size * self.num_directions
                w_ih_shape = [gate_size, layer_input_size]
                w_hh_shape = [gate_size, self.hidden_size]
                b_ih_shape, b_hh_shape = [gate_size], [gate_size]
                # W (0 ~ 3), R (4 ~ 7)
                self._matrix_shape.extend([w_ih_shape, w_hh_shape])
                # Bw (0 ~ 3), Br (4 ~ 7)
                self._bias_shape.extend([b_ih_shape, b_hh_shape])

        # 2. Compute total number of parameters
        self._weights_count = 0
        for shape in self._matrix_shape + self._bias_shape:
            self._weights_count += numpy.prod(shape)

        # 3. Register the packed weights
        self.weights = Parameter(Tensor(int(self._weights_count)))

        # 4. Create the initialization grids
        if self.mode == 'lstm': num_params_per_layer = 8
        elif self.mode == 'gru': num_params_per_layer = 6
        else: num_params_per_layer = 2
        self._matrix_init_grids = [
            [['orthogonal' for _ in range(num_params_per_layer)]
                        for _ in range(self.num_directions)]
                    for _ in range(self.num_layers)
        ]
        self._bias_init_grids = [
            [['zero' for _ in range(num_params_per_layer)]
                for _ in range(self.num_directions)]
            for _ in range(self.num_layers)
        ]
github seetaresearch / Dragon / Dragon / python / dragon / vm / torch / ops / primitive.py View on Github external
def WrapScalar(scalar, dtype, device):
    # We use (DType + Value) to hash different scalars
    # Setting a Tensor with same DType and shape will not deconstruct it
    if 'float' in dtype: scalar = float(scalar)
    if 'int' in dtype: scalar = int(scalar)
    name = '/share/scalar/{}/{}'.format(dtype, str(scalar))
    if not _workspace.HasTensor(name):
        _workspace.FeedTensor(name, numpy.array(scalar, dtype=dtype))
    t = _Tensor(name=name, dtype=dtype, device=device, own_storage=False)
    t.requires_grad = False
    return t
github seetaresearch / Dragon / Dragon / python / dragon / vm / torch / module.py View on Github external
def load_state_dict(self, state_dict, strict=True, verbose=True):
        if verbose: _logging.info('Load the state dict.')
        unexpected = []
        own_state = self.state_dict()
        for name, param in state_dict.items():
            if name in own_state:
                state_shape = own_state[name].shape
                param_shape = param.shape
                if state_shape != param_shape:
                    raise ValueError('Size of state({}) is ({}), \n'
                        'While load from Size of ({}).'.format(name,
                        ', '.join([str(d) for d in state_shape]),
                        ', '.join([str(d) for d in param_shape])))
                if isinstance(param, Tensor):
                    own_state[name].copy_(param)
                elif isinstance(param, numpy.ndarray):
                    _tensor_utils.SetArray(own_state[name], param)
                else:
                    raise ValueError('Excepted the type of source state is either '
                        'dragon.vm.torch.Tensor or numpy.ndarray, got {}.'.format(type(param)))
                if verbose:
                    _logging.info('Tensor({}) loaded, Size: ({})'.format(name,
                            ', '.join([str(d) for d in param_shape])))
            else:
                unexpected.append(name)
        if strict:
            missing = set(own_state.keys()) - set(state_dict.keys())
            error_msg = ''
            if len(unexpected) > 0:
                error_msg += 'Unexpected key(s) in state_dict: {}.\n'.format(
github seetaresearch / Dragon / Dragon / python / dragon / vm / torch / nn / modules / rnn.py View on Github external
def __init__(self, input_size, hidden_size, bias, num_chunks):
        super(RNNCellBase, self).__init__()
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.bias = bias
        self.weight_ih = Parameter(Tensor(num_chunks * hidden_size, input_size))
        self.weight_hh = Parameter(Tensor(num_chunks * hidden_size, hidden_size))
        if bias:
            self.bias_ih = Parameter(Tensor(num_chunks * hidden_size))
            self.bias_hh = Parameter(Tensor(num_chunks * hidden_size))
        else:
            self.register_parameter('bias_ih', None)
            self.register_parameter('bias_hh', None)
        self.reset_parameters()
github seetaresearch / Dragon / Dragon / python / dragon / vm / torch / autograd / variable.py View on Github external
input_grads.append(self.name + '_grad')

    # 3) Dispatch the backward ops
    _backward_impl(forward_ops, targets, input_grads, ignored_grads)

    # 4) Release resources
    # We should release both the operator handles and tensors
    for forward_op in forward_ops:
        _get_operator_pool().put(forward_op.name)
        for output in forward_op.output:
            if output not in forward_op.input:
                _get_tensor_pool().put(output)


_Tensor.backward = backward
_Tensor.volatile = volatile
github seetaresearch / Dragon / Dragon / python / dragon / vm / torch / ops / tensor.py View on Github external
gt, lt, eq, ne, ge, le,
    where, nonzero,
)


def _type_to(input, dtype='float32', inplace=False):
    if dtype == input.dtype: return input
    dev = MakeDevice(inputs=[input])
    key = 'Cast/{}/dtype:{}/inplace:{}'.format(
        dev, dtype, 'true' if inplace else 'false')
    module = get_module(Cast, key, dev, dtype=dtype, inplace=inplace)
    return module.forward(input)


Tensor.fill_ = lambda self, value: _fill(self, self.shape, value)
Tensor.masked_fill_ = lambda *args, **kwargs: _masked_assign(*args, **kwargs)
Tensor.uniform_ = lambda self, low=0, high=1: _uniform(self, self.shape, low, high)
Tensor.normal_ = lambda self, mean=0, std=1: _normal(self, self.shape, mean, std)
Tensor.multinomial = lambda *args, **kwargs: multinomial(*args, **kwargs)


Tensor.add = lambda self, value: _fundamental(self, value, 'Add')
Tensor.add_ = lambda self, value: _fundamental(self, value, 'Add', self)
Tensor.__radd__ = lambda self, value: _rfundamental(self, value, 'RAdd')
Tensor.sub = lambda self, value: _fundamental(self, value, 'Sub')
Tensor.sub_ = lambda self, value: _fundamental(self, value, 'Sub', self)
Tensor.__rsub__ = lambda self, value: _rfundamental(self, value, 'RSub')
Tensor.mul = lambda self, value: _fundamental(self, value, 'Mul')
Tensor.mul_ = lambda self, value: _fundamental(self, value, 'Mul', self)
Tensor.__rmul__ = lambda self, value: _rfundamental(self, value, 'RMul')
Tensor.div = lambda self, value: _fundamental(self, value, 'Div')
Tensor.div_ = lambda self, value: _fundamental(self, value, 'Div', self)