How to use the dragon.vm.tensorflow.framework.ops function in dragon

To help you get started, we’ve selected a few dragon examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github seetaresearch / Dragon / Dragon / python / dragon / vm / tensorflow / ops / variables.py View on Github external
initial_value=None,
        trainable=True,
        collections=None,
        validate_shape=True,
        name=None,
        dtype=None,
        regularizer=None,
        **kwargs
    ):
        super(Variable, self).__init__()

        if initial_value is None:
            raise ValueError('initial_value must be specified.')

        if collections is None:
            collections = [ops.GraphKeys.GLOBAL_VARIABLES]

        if not isinstance(collections, (list, tuple, set)):
            raise ValueError(
                'collections argument to Variable constructor must be a list, tuple, '
                    'or set. Got the type {}'.format(type(collections)))

        if trainable and ops.GraphKeys.TRAINABLE_VARIABLES not in collections:
            collections = list(collections) + [ops.GraphKeys.TRAINABLE_VARIABLES]

        if name is not None:
            # Get a known name from the name scope
            defined_name = _scope.get_default_name_scope() + name
        else:
            if 'name_from_variable_scope' in kwargs:
                # Has a name from the variable scope
                defined_name = kwargs['name_from_variable_scope']
github seetaresearch / Dragon / Dragon / python / dragon / vm / tensorflow / layers / base.py View on Github external
def __call__(self, inputs, *args, **kwargs):
        with vs.variable_scope(self.name,
            reuse=self.built or self._reuse) as scope:
            if not self.built:
                input_shapes = [x.get_shape() for x in nest.flatten(inputs)]
                if len(input_shapes) == 1: self.build(input_shapes[0])
                else: self.build(input_shapes)
            outputs = self.call(inputs, *args, **kwargs)
            # Update global default collections.
            _add_elements_to_collection(self.updates, ops.GraphKeys.UPDATE_OPS)
            return outputs
github seetaresearch / Dragon / Dragon / python / dragon / vm / tensorflow / contrib / framework / ops / variables.py View on Github external
def get_variables(
    scope=None,
    suffix=None,
    collection=ops.GraphKeys.GLOBAL_VARIABLES,
):
    if isinstance(scope, variable_scope.VariableScope):
        scope = scope.name
    if suffix is not None:
        scope = (scope or '') + '.*' + suffix
    return ops.get_collection(collection, scope)
github seetaresearch / Dragon / Dragon / python / dragon / vm / tensorflow / training / learning_rate_decay.py View on Github external
def cosine_decay_restarts(
    learning_rate,
    global_step,
    first_decay_steps,
    t_mul=2.0,
    m_mul=1.0,
    alpha=0.0,
    name=None,
):
    lr = _RunOp(
        inputs=[ops.convert_to_tensor(global_step)],
        module=__name__,
        op='_CosineDecayRestarts',
        param_str=str({
            'learning_rate': learning_rate,
            'first_decay_steps': first_decay_steps,
            't_mul': t_mul,
            'm_mul': m_mul,
            'alpha': alpha
        }),
        name=name,
    )
    lr.set_value(numpy.array(learning_rate, dtype='float32'))
    return lr
github seetaresearch / Dragon / Dragon / python / dragon / vm / tensorflow / training / optimizer.py View on Github external
def compute_gradients(self, loss, var_list=None, **kwargs):
        if var_list is None:
            var_list = variables.trainable_variables() + \
                ops.get_collection(ops.GraphKeys.TRAINABLE_RESOURCE_VARIABLES)
        grads = gradients(loss, var_list)
        grads_and_vars = list(zip(grads, var_list))
        return grads_and_vars
github seetaresearch / Dragon / Dragon / python / dragon / vm / tensorflow / layers / base.py View on Github external
dtype=dtypes.float32,
        **kwargs
    ):
        allowed_kwargs = {'_scope', '_reuse'}
        for kwarg in kwargs:
            if kwarg not in allowed_kwargs:
                raise TypeError('Keyword argument not understood:', kwarg)

        self.trainable = trainable
        self.built = False
        self._trainable_weights = []
        self._non_trainable_weights = []
        self._updates = []
        self._losses = []
        self._reuse = kwargs.get('_reuse')
        self._graph = ops.get_default_graph()
        self._per_input_losses = {}
        self._per_input_updates = {}
        self.dtype = dtypes.as_dtype(dtype)
        self.input_spec = None

        # Determine layer name
        if name is None:
            base_name = _to_snake_case(self.__class__.__name__)
            self.name = _unique_layer_name(base_name)
        else:
            base_name = name
            self.name = name

        self._base_name = base_name
github seetaresearch / Dragon / Dragon / python / dragon / vm / tensorflow / layers / base.py View on Github external
def _unique_layer_name(name):
    global PER_GRAPH_LAYER_NAME_UIDS
    graph = ops.get_default_graph()
    if graph not in PER_GRAPH_LAYER_NAME_UIDS:
        PER_GRAPH_LAYER_NAME_UIDS[graph] = collections.defaultdict(int)
    layer_name_uids = PER_GRAPH_LAYER_NAME_UIDS[graph]
    layer_name_uids[name] += 1
    return name + '_' + str(layer_name_uids[name])
github seetaresearch / Dragon / Dragon / python / dragon / vm / tensorflow / ops / losses.py View on Github external
def softmax_cross_entropy(
    onehot_labels,
    logits,
    weights=1.,
    label_smoothing=0,
    scope=None,
    loss_collection=ops.GraphKeys.LOSSES,
    reduction=Reduction.SUM_BY_NONZERO_WEIGHTS,
):
    if onehot_labels is None: raise ValueError("onehot_labels must not be None.")
    if logits is None: raise ValueError("logits must not be None.")
    normalization = None
    if reduction == Reduction.NONE: normalization = 'UNIT'
    elif reduction == Reduction.MEAN: normalization = 'FULL'
    elif reduction == Reduction.SUM_BY_NONZERO_WEIGHTS or \
            reduction == Reduction.SUM_OVER_NONZERO_WEIGHTS:
        normalization = 'NONE'
    elif reduction == Reduction.SUM_OVER_BATCH_SIZE:
        normalization = 'BATCH_SIZE'
    loss = _ops.SoftmaxCrossEntropy(
        [logits, onehot_labels],
        normalization=normalization,
        name=scope,
github seetaresearch / Dragon / Dragon / python / dragon / vm / tensorflow / ops / losses.py View on Github external
if logits is None: raise ValueError("logits must not be None.")
    normalization = None
    if reduction == Reduction.NONE: normalization = 'UNIT'
    elif reduction == Reduction.MEAN: normalization = 'FULL'
    elif reduction == Reduction.SUM_BY_NONZERO_WEIGHTS or \
            reduction == Reduction.SUM_OVER_NONZERO_WEIGHTS:
        normalization = 'NONE'
    elif reduction == Reduction.SUM_OVER_BATCH_SIZE:
        normalization = 'BATCH_SIZE'
    loss = _ops.SparseSoftmaxCrossEntropy(
        [logits, labels],
        normalization=normalization,
        name=scope,
    )
    if weights != 1.0: loss = weights * loss
    ops.add_to_collection(loss_collection, loss)
    return loss