How to use the dragon.core.workspace.FeedTensor function in dragon

To help you get started, we’ve selected a few dragon examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github seetaresearch / Dragon / Dragon / python / dragon / vm / theano / core / function.py View on Github external
def GraphDef_Update(graph_def, updater):
    """ generate all update targets for CC Graph """
    if updater is None: return

    updater._prefix = graph_def.name + '_'
    extra_kwargs = updater._extra_kwargs
    extra_kwargs['domain'] = updater._prefix

    # wrap hyper-parameters as Tensor for CC
    for k,v in updater._hyper_params.items():
        ws.FeedTensor(updater._prefix + k, np.array([v], dtype=np.float32))

    # check data parallel if necessary
    if mpi.is_init():
        idx, group = mpi.allow_parallel()
        if idx != -1:
            extra_kwargs['comm'], extra_kwargs['group'] \
                = mpi.group(root=group[0], incl=group)
            extra_kwargs['root'] = group[0]
            extra_kwargs['mode'] = mpi.get_parallel_mode()
            extra_kwargs['group_size'] = len(group)

    for tuple in updater._tuples:
        tensors = tuple[0]; kwargs = tuple[1]
        kwargs = dict(kwargs, **extra_kwargs)
        u_target = pb.UpdateTarget()
        u_target.type = updater._type
github seetaresearch / Dragon / Dragon / python / dragon / vm / onnx / helper.py View on Github external
graph_def.arg[i].i = 0

    # Create an anonymous workspace
    ws = _workspace.Workspace()

    with ws.as_default():
        # Register all the initializer before feeding them
        for name in initializer:
            _Tensor(name=name).Variable()

        # Feed the given values if necessary
        if init_func: init_func()

        # Feed the external inputs
        for name, blob in inputs.items():
            _workspace.FeedTensor(name, blob)

        # Create and Run the graph
        graph_name = _workspace.CreateGraph(graph_def)
        _workspace.RunGraph(graph_name, return_outputs=False)

        # Fetch the outputs
        output_names = graph_def.output
        output_values = [_workspace.FetchTensor(name) for name in output_names]

        # Fetch the initializer
        initializer = [
            numpy_helper.from_array(
                _workspace.FetchTensor(name), name=name)
                    for name in initializer
        ]
github seetaresearch / Dragon / Dragon / python / dragon / vm / tensorflow / training / learning_rate_decay.py View on Github external
def set(self, tensor, value, dtype=None):
        _workspace.FeedTensor(tensor,
            value, dtype=dtype, force_cpu=True)
github seetaresearch / Dragon / Dragon / python / dragon / vm / torch / optim / optimizer.py View on Github external
def feed_parameters(self, group):
        template = group['slot'] + '/{}'
        for k, v in group.items():
            if k in self._mutable_parameters:
                _workspace.FeedTensor(
                    template.format(self._mutable_parameters[k]),
                        v, dtype='float32', force_cpu=True)
github seetaresearch / Dragon / Dragon / python / dragon / vm / torch / ops / primitive.py View on Github external
def WrapScalar(scalar, dtype, device):
    # We use (DType + Value) to hash different scalars
    # Setting a Tensor with same DType and shape will not deconstruct it
    if 'float' in dtype: scalar = float(scalar)
    if 'int' in dtype: scalar = int(scalar)
    name = '/share/scalar/{}/{}'.format(dtype, str(scalar))
    if not _workspace.HasTensor(name):
        _workspace.FeedTensor(name, numpy.array(scalar, dtype=dtype))
    t = _Tensor(name=name, dtype=dtype, device=device, own_storage=False)
    t.requires_grad = False
    return t
github seetaresearch / Dragon / Dragon / python / dragon / operators / custom / minibatch.py View on Github external
Parameters
        ----------
        inputs : sequence of str
            The name of inputs.
        outputs : sequence of str
            The name of outputs.

        Returns
        -------
        None

        """
        blobs = self._data_batch.get()
        for idx, blob in enumerate(blobs):
            _workspace.FeedTensor(outputs[idx], blob)
github seetaresearch / Dragon / Dragon / python / dragon / vm / theano / core / function.py View on Github external
all_exprs = sorted(all_exprs.items(), key=lambda d:d[0])
    forward_ops = copy.deepcopy([v for k,v in all_exprs])

    # handle swap
    if swaps is not None:
        name_dict = {}
        external_input_exprs = {}

        for old_tenosr, new_tensor in swaps.items():
            if isinstance(new_tensor, Tensor):
                name_dict[old_tenosr.name] = new_tensor._name
                if sys.version_info >= (3, 0):
                    external_input_exprs = OrderedDict(external_input_exprs, **new_tensor.expressions)
                else:
                    external_input_exprs = dict(external_input_exprs, **new_tensor.expressions)
            elif isinstance(new_tensor, np.ndarray): ws.FeedTensor(new_tensor, GetTensorName())
        external_input_ops = [v for k,v in external_input_exprs.items()]
        for op in forward_ops:
            op.input.extend([name_dict[input] if input in name_dict
                                              else input for input in op.input])
            del op.input[:int(len(op.input)/2)]

        forward_ops = external_input_ops + forward_ops

    # handle grads
    if existing_grads:
        targets = [output.name for output in outputs]
        forward_ops, grad_ops = GraphGradientMaker.Make(forward_ops, targets)
    else: grad_ops = []
    graph_def.op.extend(forward_ops + grad_ops)

    if len(outputs) > 0: