How to use the tensorpack.tfutils.summary.add_moving_summary function in tensorpack

To help you get started, we’ve selected a few tensorpack examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github tensorpack / tensorpack / examples / SimilarityLearning / mnist-embeddings.py View on Github external
def build_graph(self, x, y, label):
        # embed them
        single_input = x
        x, y = self.embed([x, y])

        # tag the embedding of 'input' with name 'emb', just for inference later on
        with tf.variable_scope(tf.get_variable_scope(), reuse=True):
            tf.identity(self.embed(single_input), name="emb")

        # compute the actual loss
        cost, pos_dist, neg_dist = contrastive_loss(x, y, label, 5., extra=True, scope="loss")
        cost = tf.identity(cost, name="cost")

        # track these values during training
        add_moving_summary(pos_dist, neg_dist, cost)
        return cost
github qq456cvb / doudizhu-C / TensorPack / AutoEncoder / main.py View on Github external
if param[-1] == 'identity':
                        conv = identity_block(conv, param[0], param[1])
                    elif param[-1] == 'upsampling':
                        conv = upsample_block(conv, param[0], param[1])
                    elif param[-1] == 'downsampling':
                        conv = downsample_block(conv, param[0], param[1])
                    else:
                        raise Exception('unsupported layer type')
                print(conv.shape)
                decoded = tf.reshape(conv, [-1, conv.shape[1] * conv.shape[2] * conv.shape[3]])

        reconstuct_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.pad(onehot_cards, [[0, 0], [0, 4]]), logits=decoded)
        reconstuct_loss = tf.reduce_mean(tf.reduce_sum(reconstuct_loss, -1), name='reconstruct_loss')
        l2_loss = tf.truediv(regularize_cost_from_collection(), tf.cast(tf.shape(onehot_cards)[0], tf.float32), name='l2_loss')
        add_moving_summary(reconstuct_loss, decay=0)
        add_moving_summary(l2_loss, decay=0)
        loss = reconstuct_loss + l2_loss
        return loss
github tensorpack / tensorpack / examples / DoReFa-Net / svhn-digit-dorefa.py View on Github external
.FullyConnected('fc1', 10)())
        tf.nn.softmax(logits, name='output')

        # compute the number of failed samples
        wrong = tf.cast(tf.logical_not(tf.nn.in_top_k(logits, label, 1)), tf.float32, name='wrong_tensor')
        # monitor training error
        add_moving_summary(tf.reduce_mean(wrong, name='train_error'))

        cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label)
        cost = tf.reduce_mean(cost, name='cross_entropy_loss')
        # weight decay on all W of fc layers
        wd_cost = regularize_cost('fc.*/W', l2_regularizer(1e-7))

        add_param_summary(('.*/W', ['histogram', 'rms']))
        total_cost = tf.add_n([cost, wd_cost], name='cost')
        add_moving_summary(cost, wd_cost, total_cost)
        return total_cost
github iamhankai / ghostnet / tensorflow / imagenet_utils.py View on Github external
if self.data_format == 'NCHW':
            image = tf.transpose(image, [0, 3, 1, 2])

        logits = self.get_logits(image)
        print('self.label_smoothing', self.label_smoothing)
        loss = ImageNetModel.compute_loss_and_error(logits, label, self.label_smoothing)

        if self.weight_decay > 0:
            wd_loss = regularize_cost(self.weight_decay_pattern,
                                      tf.contrib.layers.l2_regularizer(self.weight_decay),
                                      name='l2_regularize_loss')
            add_moving_summary(loss, wd_loss)
            total_cost = tf.add_n([loss, wd_loss], name='cost')
        else:
            total_cost = tf.identity(loss, name='cost')
            add_moving_summary(total_cost)

        if self.loss_scale != 1.:
            logger.info("Scaling the total loss by {} ...".format(self.loss_scale))
            return total_cost * self.loss_scale
        else:
            return total_cost
github vqdang / hover_net / src / model / unet.py View on Github external
else:
                prob_np = tf.identity(soft[...,1], name='predmap-prob')
                prob_np = tf.expand_dims(prob_np, axis=-1)
                predmap_coded = prob_np

            # * channel ordering: type-map, segmentation map
            # encoded so that inference can extract all output at once
            predmap_coded = tf.identity(predmap_coded, name='predmap-coded')

        ####
        if is_training:
            ######## LOSS
            ### classification loss
            loss_bce = categorical_crossentropy(soft, one)
            loss_bce = tf.reduce_mean(loss_bce * pen_map, name='loss-bce')
            add_moving_summary(loss_bce)

            wd_loss = regularize_cost('.*/W', l2_regularizer(1.0e-5), name='l2_wd_loss')
            add_moving_summary(wd_loss)
            self.cost = loss_bce + wd_loss

            add_param_summary(('.*/W', ['histogram']))   # monitor W

            #### logging visual sthg
            orig_imgs = tf.cast(orig_imgs  , tf.uint8)
            orig_imgs = crop_op(orig_imgs, (184, 184), "channels_last")
            tf.summary.image('input', orig_imgs, max_outputs=1)
    
            pred = colorize(prob_np[...,0], cmap='jet')
            true = colorize(true[...,0], cmap='jet')
            pen_map = colorize(pen_map, cmap='jet')
github armandmcqueen / tensorpack-mask-rcnn / MaskRCNN / model_fpn.py View on Github external
"""
    sqrtarea = tf.sqrt(tf_area(boxes))
    level = tf.cast(tf.floor(
        4 + tf.log(sqrtarea * (1. / 224) + 1e-6) * (1.0 / np.log(2))), tf.int32)

    # RoI levels range from 2~5 (not 6)
    level_ids = [
        tf.where(level <= 2),
        tf.where(tf.equal(level, 3)),   # == is not supported
        tf.where(tf.equal(level, 4)),
        tf.where(level >= 5)]
    level_ids = [tf.reshape(x, [-1], name='roi_level{}_id'.format(i + 2))
                 for i, x in enumerate(level_ids)]
    num_in_levels = [tf.size(x, name='num_roi_level{}'.format(i + 2))
                     for i, x in enumerate(level_ids)]
    add_moving_summary(*num_in_levels)

    level_boxes = [tf.gather(boxes, ids) for ids in level_ids]
    return level_ids, level_boxes
github tensorpack / tensorpack / examples / SimilarityLearning / mnist-embeddings.py View on Github external
def build_graph(self, x, y, label):
        single_input = x
        x, y = self.embed([x, y])

        with tf.variable_scope(tf.get_variable_scope(), reuse=True):
            tf.identity(self.embed(single_input), name="emb")

        cost = siamese_cosine_loss(x, y, label, scope="loss")
        cost = tf.identity(cost, name="cost")
        add_moving_summary(cost)
        return cost
github tensorpack / tensorpack / examples / Char-RNN / char-rnn.py View on Github external
input_list = tf.unstack(input_feature, axis=1)  # seqlen x (Bxrnnsize)

        outputs, last_state = rnn.static_rnn(cell, input_list, initial, scope='rnnlm')
        last_state = tf.identity(last_state, 'last_state')

        # seqlen x (Bxrnnsize)
        output = tf.reshape(tf.concat(outputs, 1), [-1, param.rnn_size])  # (Bxseqlen) x rnnsize
        logits = FullyConnected('fc', output, param.vocab_size, activation=tf.identity)
        tf.nn.softmax(logits / param.softmax_temprature, name='prob')

        xent_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits=logits, labels=tf.reshape(nextinput, [-1]))
        cost = tf.reduce_mean(xent_loss, name='cost')
        summary.add_param_summary(('.*/W', ['histogram']))   # monitor histogram of all W
        summary.add_moving_summary(cost)
        return cost
github armandmcqueen / tensorpack-mask-rcnn / MaskRCNN_no_batch / model_frcnn.py View on Github external
fg_inds = tf.reshape(tf.where(fg_mask), [-1])
        num_fg = tf.minimum(int(
            cfg.FRCNN.BATCH_PER_IM * cfg.FRCNN.FG_RATIO),
            tf.size(fg_inds), name='num_fg')
        fg_inds = tf.random_shuffle(fg_inds)[:num_fg]
        # fg_inds = fg_inds[:num_fg]

        bg_inds = tf.reshape(tf.where(tf.logical_not(fg_mask)), [-1])
        num_bg = tf.minimum(
            cfg.FRCNN.BATCH_PER_IM - num_fg,
            tf.size(bg_inds), name='num_bg')
        bg_inds = tf.random_shuffle(bg_inds)[:num_bg]
        # bg_inds = bg_inds[:num_bg]

        add_moving_summary(num_fg, num_bg)
        return fg_inds, bg_inds
github vqdang / hover_net / src / model / micronet.py View on Github external
with tf.variable_scope("", reuse=True):
                aux_loss_dw = tf.get_variable('aux_loss_dw')

            loss_list = [] # index 0 is main output
            global_step = tf.train.get_or_create_global_step()
            global_step = tf.cast(global_step, tf.float32)
            for idx, sub_soft in enumerate(soft_list):
                loss_bce = categorical_crossentropy(sub_soft, one)
                loss_bce = tf.reduce_mean(loss_bce * pen_map)
                loss_bce = loss_bce if idx == 0 else loss_bce * aux_loss_dw
                loss_bce = tf.identity(loss_bce, name='loss-bce-%d' % idx)
                loss_list.append(loss_bce)
                add_moving_summary(loss_bce)

            wd_loss = regularize_cost('.*/W', l2_regularizer(1.0e-5), name='l2_wd_loss')
            add_moving_summary(wd_loss)

            cost = tf.add_n(loss_list) + wd_loss
            self.cost = tf.identity(cost, name='overall_cost')
            add_moving_summary(self.cost)
            ####

            add_param_summary(('.*/W', ['histogram']))   # monitor W

            #### logging visual sthg
            orig_imgs = tf.cast(orig_imgs  , tf.uint8)
            tf.summary.image('input', orig_imgs, max_outputs=1)

            colored_list = [true] + prob_list + [tf.expand_dims(pen_map, axis=-1)]
            colored_list = [colorize(feat[...,0], cmap='jet') for feat in colored_list]

            viz = tf.concat([orig_imgs] + colored_list, 2)