How to use the tensorflow.summary.scalar function in tensorflow

To help you get started, we’ve selected a few tensorflow examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github veronicachelu / temporal_abstraction / networks / network_attention_feudal.py View on Github external
local_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.scope)

    """Gradients and update ops"""
    self.grads_sf, self.apply_grads_sf = self.take_gradient(self.sf_loss, "sr")
    self.grads_option, self.apply_grads_option = self.take_gradient(self.option_loss, "worker")
    self.grads_critic, self.apply_grads_critic = self.take_gradient(self.critic_loss, "manager")
    self.grads_goal, self.apply_grads_goal = self.take_gradient(self.goal_loss, "manager")

    """Summaries"""
    self.merged_summary_sf = tf.summary.merge(self.image_summaries +
      self.summaries_sf + [tf.summary.scalar('SF_loss', self.sf_loss),
        gradient_summaries(zip(self.grads_sf, local_vars))])

    self.merged_summary_option = tf.summary.merge(self.summaries_option +\
                       [tf.summary.scalar('Entropy_loss', self.entropy_loss),
                        tf.summary.scalar('Policy_loss', self.policy_loss),
                        tf.summary.scalar('Mix_critic_loss', self.mix_critic_loss),
                        gradient_summaries(zip(self.grads_option, local_vars))])
    self.merged_summary_critic = tf.summary.merge(self.summaries_critic +\
                                                  [tf.summary.scalar('Critic_loss', self.critic_loss),
                                                   gradient_summaries(zip(self.grads_critic, local_vars))])
    self.merged_summary_goal = tf.summary.merge(self.image_summaries_goal +
                                                [tf.summary.scalar('goal_loss', self.goal_loss),
                                                 gradient_summaries(zip(self.grads_goal, local_vars))])
github machinelearningmindset / TensorFlow-Course / codes / ipython / 3-neural_networks / convolutional-neural-network / code / train_classifier.py View on Github external
tf.summary.image('images', data.train.images[arr], max_outputs=3,
                     collections=['per_epoch_train'])

    # Histogram and scalar summaries sammaries
    # sparsity: This summary is the fraction of zero activation for the output of each layer!
    # activations: This summary is the histogram of activation for the output of each layer!
    # WARNING: tf.summary.histogram can be very time consuming so it will be calculated per epoch!
    for end_point in end_points:
        x = end_points[end_point]
        tf.summary.scalar('sparsity/' + end_point,
                          tf.nn.zero_fraction(x), collections=['train', 'test'])
        tf.summary.histogram('activations/' + end_point, x, collections=['per_epoch_train'])

    # Summaries for loss and accuracy
    tf.summary.scalar("loss", loss, collections=['train', 'test'])
    tf.summary.scalar("accuracy", accuracy, collections=['train', 'test'])
    tf.summary.scalar("global_step", global_step, collections=['train'])
    tf.summary.scalar("learning_rate", learning_rate, collections=['train'])

    # Merge all summaries together.
    summary_train_op = tf.summary.merge_all('train')
    summary_test_op = tf.summary.merge_all('test')
    summary_epoch_train_op = tf.summary.merge_all('per_epoch_train')

    ########################################################
    ############ # Defining the tensors list ###############
    ########################################################

    tensors_key = ['cost', 'accuracy', 'train_op', 'global_step', 'image_place', 'label_place', 'dropout_param',
                   'summary_train_op', 'summary_test_op', 'summary_epoch_train_op']
    tensors = [loss, accuracy, train_op, global_step, image_place, label_place, dropout_param, summary_train_op,
               summary_test_op, summary_epoch_train_op]
github kYroL01 / ConvNet / my_alexnet_cnn.py View on Github external
with tf.Session() as sess:

            ## Construct model: prepare logits, loss and optimizer ##

            # logits: unnormalized log probabilities
            logits = self.alex_net_model(self.img_pl, self.weights, self.biases, self.keep_prob_in, self.keep_prob_hid)

            # loss: cross-entropy between the target and the softmax activation function applied to the model's prediction
            loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=self.label_pl))

            tf.summary.scalar("cross-entropy_for_loss", loss)

            # optimizer: find the best gradients of the loss with respect to each of the variables
            train_step = tf.train.AdamOptimizer(learning_rate=self.learning_rate, epsilon=0.1).minimize(loss)

            tf.summary.scalar("learning_rate", self.learning_rate)
            
            print logits.get_shape(), self.label_pl.get_shape()

            ## Evaluate model: the degree to which the result of the prediction conforms to the correct value ##
            
            # list of booleans
            correct_pred = tf.equal(tf.argmax(logits,1), tf.argmax(self.label_pl, 1))
            # [True, False, True, True] -> [1,0,1,1] -> 0.75
            accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

            tf.summary.scalar("accuracy", accuracy)

            merged_summary_op = tf.summary.merge_all()

            # Initializing the variables
            init = tf.global_variables_initializer()
github kwotsin / transfer_learning_tutorial / train_flowers.py View on Github external
optimizer = tf.train.AdamOptimizer(learning_rate = lr)

        #Create the train_op.
        train_op = slim.learning.create_train_op(total_loss, optimizer)

        #State the metrics that you want to predict. We get a predictions that is not one_hot_encoded.
        predictions = tf.argmax(end_points['Predictions'], 1)
        probabilities = end_points['Predictions']
        accuracy, accuracy_update = tf.contrib.metrics.streaming_accuracy(predictions, labels)
        metrics_op = tf.group(accuracy_update, probabilities)


        #Now finally create all the summaries you need to monitor and group them into one summary op.
        tf.summary.scalar('losses/Total_Loss', total_loss)
        tf.summary.scalar('accuracy', accuracy)
        tf.summary.scalar('learning_rate', lr)
        my_summary_op = tf.summary.merge_all()

        #Now we need to create a training step function that runs both the train_op, metrics_op and updates the global_step concurrently.
        def train_step(sess, train_op, global_step):
            '''
            Simply runs a session for the three arguments provided and gives a logging on the time elapsed for each global step
            '''
            #Check the time for each sess run
            start_time = time.time()
            total_loss, global_step_count, _ = sess.run([train_op, global_step, metrics_op])
            time_elapsed = time.time() - start_time

            #Run the logging to print some results
            logging.info('global step %s: loss: %.4f (%.2f sec/step)', global_step_count, total_loss, time_elapsed)

            return total_loss, global_step_count
github google-research / google-research / bisimulation_aaai2020 / grid_world / grid_world.py View on Github external
# Now reshape to match the shapes of the estimate and target.
    loss_mask = tf.reshape(loss_mask, (self.batch_size**2, 1))
    larger_targets = bisimulation_target - bisimulation_estimate
    larger_targets_count = tf.reduce_sum(
        tf.cast(larger_targets > 0., tf.float64))
    tf.summary.scalar('Learning/LargerTargets', larger_targets_count)
    tf.summary.scalar('Learning/NumUpdates', tf.count_nonzero(loss_mask))
    tf.summary.scalar('Learning/BisimHorizon', self.bisim_horizon_ph)
    bisimulation_loss = tf.losses.mean_squared_error(
        bisimulation_target,
        bisimulation_estimate,
        weights=loss_mask)
    tf.summary.scalar('Learning/loss', bisimulation_loss)
    # Plot average distance between sampled representations.
    average_distance = tf.reduce_mean(bisimulation_estimate)
    tf.summary.scalar('Approx/AverageDistance', average_distance)
    return optimizer.minimize(bisimulation_loss)
github garlicdevs / Fruit-API / fruit / draft / atari.py View on Github external
def create_train_step_2(self):
        learning_rate = self.layer_manager.create_input(tf.float32, shape=None, name="tf_learning_rate_2")
        optimizer = self.optimizer(learning_rate=learning_rate)

        with tf.name_scope('compute-clip-grads-2'):
            gradients = optimizer.compute_gradients(self.tf_total_loss_2)
            tensors = [tensor for gradient, tensor in gradients]
            grads = [gradient for gradient, tensor in gradients]
            clipped_gradients, _ = tf.clip_by_global_norm(grads, self.global_norm_clipping)
            clipped_grads_tensors = zip(clipped_gradients, tensors)
            train_step = optimizer.apply_gradients(clipped_grads_tensors)
        tf.summary.scalar('learning-rate-2', learning_rate)
        return train_step, learning_rate
github wnhsu / FactorizedHierarchicalVAE / src / runners / fhvae_runner.py View on Github external
test_iterator_fn    = lambda: test_set.iterator(2048, set_name)
    test_label_to_N     = test_set.get_label_N(set_name)
    latent1_var         = np.power(model.model_conf["latent1_std"], 2)

    model_dir = "%s/models" % exp_dir
    ckpt_path = os.path.join(model_dir, "fhvae.ckpt")

    # create summaries
    sum_names = ["lb", "logpx_z", "log_pmu1", "neg_kld_z1", "neg_kld_z2"]
    sum_vars = [tf.reduce_mean(model.outputs[name]) for name in sum_names]
    
    with tf.variable_scope("test"):
        test_vars = OrderedDict([(name, tf.get_variable(name, initializer=0.)) \
                for name in sum_names])
        test_summaries = tf.summary.merge(
                [tf.summary.scalar(k, test_vars[k]) for k in test_vars])

    with tf.Session(config=SESS_CONF) as sess:
        start_time = time.time()
        model.saver.restore(sess, ckpt_path)
        info("restore model takes %.2f s" % (time.time() - start_time))
        test_writer = tf.summary.FileWriter("%s/log/test" % exp_dir)

        test_vals = _valid(
                sess, model, sum_names, sum_vars, test_label_to_N, 
                latent1_var, test_iterator_fn, debug=False)
        feed_dict = dict(zip(test_vars.values(), test_vals.values()))
        summary, global_step = sess.run([test_summaries, model.global_step], feed_dict)
        test_writer.add_summary(summary, global_step)
        info("test\t" + ", ".join(["%s %.4f" % p for p in test_vals.items()]))
github matanatz / pcnn / part_segmentation / train.py View on Github external
seg_training_loss_ph = tf.placeholder(tf.float32, shape=())
            seg_testing_loss_ph = tf.placeholder(tf.float32, shape=())

            seg_training_acc_ph = tf.placeholder(tf.float32, shape=())
            seg_testing_acc_ph = tf.placeholder(tf.float32, shape=())
            seg_testing_acc_avg_cat_ph = tf.placeholder(tf.float32, shape=())

            total_train_loss_sum_op = tf.summary.scalar('total_training_loss', total_training_loss_ph)
            total_test_loss_sum_op = tf.summary.scalar('total_testing_loss', total_testing_loss_ph)

            seg_train_loss_sum_op = tf.summary.scalar('seg_training_loss', seg_training_loss_ph)
            seg_test_loss_sum_op = tf.summary.scalar('seg_testing_loss', seg_testing_loss_ph)

            seg_train_acc_sum_op = tf.summary.scalar('seg_training_acc', seg_training_acc_ph)
            seg_test_acc_sum_op = tf.summary.scalar('seg_testing_acc', seg_testing_acc_ph)
            seg_test_acc_avg_cat_op = tf.summary.scalar('seg_testing_acc_avg_cat', seg_testing_acc_avg_cat_ph)

            train_variables = tf.trainable_variables()

            trainer = tf.train.AdamOptimizer(learning_rate)
            train_op = trainer.minimize(loss, var_list=train_variables, global_step=batch)

        saver = tf.train.Saver()

        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        sess = tf.Session(config=config)
        
        init = tf.global_variables_initializer()
        sess.run(init)
github dEcmir / lego_yolo / loss.py View on Github external
cooid = slim.flatten(cooid)
    true = tf.concat(1, [probs, confs, coord])
    wght = tf.concat(1, [proid, conid, cooid])

    print 'Building {} loss'.format(m['model'])
    loss = tf.pow(net_out - true, 2)
    loss = tf.mul(loss, wght)
    loss = tf.reduce_sum(loss, 1)
    loss = .5 * tf.reduce_mean(loss)

    # make the loss retrievable
    # tf.add_to_collection("loss", loss)

    # adding a summary for Tensorboard
    tf.summary.scalar('training_loss', loss)
    tf.summary.scalar('confs_mean', tf.reduce_mean(confs))
    return placeholders, loss
github bfs18 / nsynth_wavenet / wavenet / parallel_wavenet.py View on Github external
log_scale = iaf_dict['log_scale']
            mean_tot = iaf_dict['mean'] + mean_tot * scale
            scale_tot *= scale
            log_scale_tot += log_scale

        mean_tot = tf.squeeze(mean_tot, axis=2)
        scale_tot = tf.squeeze(tf.minimum(scale_tot, tf.exp(7.0)), axis=2)
        log_scale_tot = tf.squeeze(tf.minimum(log_scale_tot, 7.0), axis=2)
        # new_x = tf.squeeze(iaf_x, axis=2)
        new_x = x * scale_tot + mean_tot

        if DETAIL_LOG and not init:
            tf.summary.scalar('new_x', tf.reduce_mean(new_x))
            tf.summary.scalar('new_x_std', utils.reduce_std(new_x))
            tf.summary.scalar('new_x_abs', tf.reduce_mean(tf.abs(new_x)))
            tf.summary.scalar('new_x_abs_std', utils.reduce_std(tf.abs(new_x)))
            tf.summary.scalar('mean_tot', tf.reduce_mean(mean_tot))
            tf.summary.scalar('scale_tot', tf.reduce_mean(scale_tot))
            tf.summary.scalar('log_scale_tot', tf.reduce_mean(log_scale_tot))

        return {'x': new_x,
                'mean_tot': mean_tot,
                'scale_tot': scale_tot,
                'log_scale_tot': log_scale_tot,
                'rand_input': x}