How to use the tensorpack.tfutils.common.get_tf_version_tuple function in tensorpack

To help you get started, we’ve selected a few tensorpack examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github armandmcqueen / tensorpack-mask-rcnn / tensorpack / models / fc.py View on Github external
bias_initializer=tf.zeros_initializer(),
        kernel_regularizer=None,
        bias_regularizer=None,
        activity_regularizer=None):
    """
    A wrapper around `tf.layers.Dense`.
    One difference to maintain backward-compatibility:
    Default weight initializer is variance_scaling_initializer(2.0).

    Variable Names:

    * ``W``: weights of shape [in_dim, out_dim]
    * ``b``: bias
    """
    if kernel_initializer is None:
        if get_tf_version_tuple() <= (1, 12):
            kernel_initializer = tf.contrib.layers.variance_scaling_initializer(2.0)
        else:
            kernel_initializer = tf.keras.initializers.VarianceScaling(2.0, distribution='untruncated_normal')

    inputs = batch_flatten(inputs)
    with rename_get_variable({'kernel': 'W', 'bias': 'b'}):
        layer = tf.layers.Dense(
            units=units,
            activation=activation,
            use_bias=use_bias,
            kernel_initializer=kernel_initializer,
            bias_initializer=bias_initializer,
            kernel_regularizer=kernel_regularizer,
            bias_regularizer=bias_regularizer,
            activity_regularizer=activity_regularizer,
            _reuse=tf.get_variable_scope().reuse)
github armandmcqueen / tensorpack-mask-rcnn / MaskRCNN_no_batch / model_mrcnn.py View on Github external
"""
    Args:
        feature (NxCx s x s): size is 7 in C4 models and 14 in FPN models.
        num_category(int):
        num_convs (int): number of convolution layers
        norm (str or None): either None or 'GN'

    Returns:
        mask_logits (N x num_category x 2s x 2s):
    """
    assert norm in [None, 'GN'], norm
    l = feature
    with argscope([Conv2D, Conv2DTranspose], data_format='channels_first',
                  kernel_initializer=tf.variance_scaling_initializer(
                      scale=2.0, mode='fan_out',
                      distribution='untruncated_normal' if get_tf_version_tuple() >= (1, 12) else 'normal')):
        # c2's MSRAFill is fan_out
        for k in range(num_convs):
            l = Conv2D('fcn{}'.format(k), l, cfg.MRCNN.HEAD_DIM, 3, activation=tf.nn.relu)
            if norm is not None:
                l = GroupNorm('gn{}'.format(k), l)
        l = Conv2DTranspose('deconv', l, cfg.MRCNN.HEAD_DIM, 2, strides=2, activation=tf.nn.relu)
        l = Conv2D('conv', l, num_category, 1)
    return l
github tensorpack / tensorpack / examples / FasterRCNN / modeling / model_frcnn.py View on Github external
"""
    Args:
        feature (NCHW):
        num_classes(int): num_category + 1
        num_convs (int): number of conv layers
        norm (str or None): either None or 'GN'

    Returns:
        2D head feature
    """
    assert norm in [None, 'GN'], norm
    l = feature
    with argscope(Conv2D, data_format='channels_first',
                  kernel_initializer=tf.variance_scaling_initializer(
                      scale=2.0, mode='fan_out',
                      distribution='untruncated_normal' if get_tf_version_tuple() >= (1, 12) else 'normal')):
        for k in range(num_convs):
            l = Conv2D('conv{}'.format(k), l, cfg.FPN.FRCNN_CONV_HEAD_DIM, 3, activation=tf.nn.relu)
            if norm is not None:
                l = GroupNorm('gn{}'.format(k), l)
        l = FullyConnected('fc', l, cfg.FPN.FRCNN_FC_HEAD_DIM,
                           kernel_initializer=tf.variance_scaling_initializer(), activation=tf.nn.relu)
    return l
github tensorpack / tensorpack / tensorpack / graph_builder / training.py View on Github external
(tf.Operation, tf.Operation)

            1. the training op.

            2. the op which sync variables from GPU 0 to other GPUs.
                It has to be run before the training has started.
                And you can optionally run it later to sync non-trainable variables.
        """
        assert len(grad_list) == len(self.towers)
        raw_devices = ['/gpu:{}'.format(k) for k in self.towers]

        DataParallelBuilder._check_grad_list(grad_list)

        dtypes = set([x[0].dtype.base_dtype for x in grad_list[0]])
        dtypes_nccl_supported = [tf.float32, tf.float64]
        if get_tf_version_tuple() >= (1, 8):
            dtypes_nccl_supported.append(tf.float16)
        valid_for_nccl = all([k in dtypes_nccl_supported for k in dtypes])
        if self._mode == 'nccl' and not valid_for_nccl:
            logger.warn("Cannot use mode='nccl' because some gradients have unsupported types. Fallback to mode='cpu'")
            self._mode = 'cpu'

        if self._mode in ['nccl', 'hierarchical']:
            all_grads, all_vars = split_grad_list(grad_list)
            # use allreduce from tf-benchmarks
            # from .batch_allreduce import AllReduceSpecAlgorithm
            # algo = AllReduceSpecAlgorithm('nccl', list(range(8)), 0, 10)
            # all_grads, warmup_ops = algo.batch_all_reduce(all_grads, 1, True, False)
            # print("WARMUP OPS", warmup_ops)

            if self._mode == 'nccl':
                all_grads = allreduce_grads(all_grads, average=self._average)  # #gpu x #param
github tensorpack / tensorpack / tensorpack / tfutils / varreplace.py View on Github external
def custom_getter_scope(custom_getter):
    """
    Args:
        custom_getter: the same as in :func:`tf.get_variable`

    Returns:
        The current variable scope with a custom_getter.
    """
    scope = tf.get_variable_scope()
    if get_tf_version_tuple() >= (1, 5):
        with tf.variable_scope(
                scope, custom_getter=custom_getter,
                auxiliary_name_scope=False):
            yield
    else:
        ns = tf.get_default_graph().get_name_scope()
        with tf.variable_scope(
                scope, custom_getter=custom_getter):
            with tf.name_scope(ns + '/' if ns else ''):
                yield
github armandmcqueen / tensorpack-mask-rcnn / MaskRCNN / model_frcnn.py View on Github external
Returns: n boolean, the selection
        """
        prob, box = X
        output_shape = tf.shape(prob, out_type=tf.int64)
        # filter by score threshold
        ids = tf.reshape(tf.where(prob > cfg.TEST.RESULT_SCORE_THRESH), [-1])
        prob = tf.gather(prob, ids)
        box = tf.gather(box, ids)
        # NMS within each class
        #selection = non_max_suppression_custom(
            #box, prob, cfg.TEST.RESULTS_PER_IM, cfg.TEST.FRCNN_NMS_THRESH)
        selection = tf.image.non_max_suppression(
            box, prob, cfg.TEST.RESULTS_PER_IM, cfg.TEST.FRCNN_NMS_THRESH)
        selection = tf.gather(ids, selection)

        if get_tf_version_tuple() >= (1, 13):
            sorted_selection = tf.sort(selection, direction='ASCENDING')
            mask = tf.sparse.SparseTensor(indices=tf.expand_dims(sorted_selection, 1),
                                          values=tf.ones_like(sorted_selection, dtype=tf.bool),
                                          dense_shape=output_shape)
            mask = tf.sparse.to_dense(mask, default_value=False)
        else:
            # this function is deprecated by TF
            sorted_selection = -tf.nn.top_k(-selection, k=tf.size(selection))[0]
            mask = tf.sparse_to_dense(
                sparse_indices=sorted_selection,
                output_shape=output_shape,
                sparse_values=True,
                default_value=False)
        return mask
github armandmcqueen / tensorpack-mask-rcnn / tensorpack / models / tflayer.py View on Github external
def monkeypatch_tf_layers():
    if get_tf_version_tuple() < (1, 4):
        if not hasattr(tf.layers, 'Dense'):
            from tensorflow.python.layers.core import Dense
            tf.layers.Dense = Dense

            from tensorflow.python.layers.normalization import BatchNormalization
            tf.layers.BatchNormalization = BatchNormalization

            from tensorflow.python.layers.convolutional import Conv2DTranspose, Conv2D
            tf.layers.Conv2DTranspose = Conv2DTranspose
            tf.layers.Conv2D = Conv2D

            from tensorflow.python.layers.pooling import MaxPooling2D, AveragePooling2D
            tf.layers.MaxPooling2D = MaxPooling2D
            tf.layers.AveragePooling2D = AveragePooling2D
github armandmcqueen / tensorpack-mask-rcnn / tensorpack / tfutils / scope_utils.py View on Github external
def wrapper(*args, **kwargs):
        scope = tf.get_variable_scope()
        h = hash((tf.get_default_graph(), scope.name))
        # print("Entering " + scope.name + " reuse: " + str(h in used_scope))
        if h in used_scope:
            if get_tf_version_tuple() >= (1, 5):
                with tf.variable_scope(scope, reuse=True, auxiliary_name_scope=False):
                    return func(*args, **kwargs)
            else:
                ns = tf.get_default_graph().get_name_scope()
                with tf.variable_scope(scope, reuse=True), \
                        tf.name_scope(ns + '/' if ns else ''):
                    return func(*args, **kwargs)
        else:
            used_scope.add(h)
            return func(*args, **kwargs)
github armandmcqueen / tensorpack-mask-rcnn / MaskRCNN / model / mask_head.py View on Github external
num_category(int): Number of total classes
        num_convs (int): number of convolution layers
        norm (str or None): either None or 'GN'

    Returns:
        mask_logits: Num_boxes x num_category x (2 * H_roi) x (2 * W_roi)
    """
    assert norm in [None, 'GN'], norm
    l = feature
    if fp16:
        l = tf.cast(l, tf.float16)
    with mixed_precision_scope(mixed=fp16):
      with argscope([Conv2D, Conv2DTranspose], data_format='channels_first',
                  kernel_initializer=tf.variance_scaling_initializer(
                      scale=2.0, mode='fan_out', seed=seed_gen.next(),
                      distribution='untruncated_normal' if get_tf_version_tuple() >= (1, 12) else 'normal')):
        # c2's MSRAFill is fan_out
        for k in range(num_convs):
            l = Conv2D('fcn{}'.format(k), l, cfg.MRCNN.HEAD_DIM, 3, activation=tf.nn.relu, seed=seed_gen.next())
            if norm is not None:
                if fp16: l = tf.cast(l, tf.float32)
                l = GroupNorm('gn{}'.format(k), l)
                if fp16: l = tf.cast(l, tf.float16)
        l = Conv2DTranspose('deconv', l, cfg.MRCNN.HEAD_DIM, 2, strides=2, activation=tf.nn.relu, seed=seed_gen.next()) # 2x upsampling
        l = Conv2D('conv', l, num_category, 1, seed=seed_gen.next())
    if fp16:
        l = tf.cast(l, tf.float32)
    return l