How to use the tensorflow.reduce_mean function in tensorflow

To help you get started, we’ve selected a few tensorflow examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github peace195 / multitask-learning-protein-prediction / multitask-learning / multitask-3states / lstm.py View on Github external
structure_ss = tf.multiply(structure_ss, tf.expand_dims(tf_X_binary_mask, 2))

  structure_rel = tf.split(axis=0, num_or_size_splits=seq_max_len, value=structure_rel)
  # Change back dimension to [batch_size, n_step, n_input]
  structure_rel = tf.stack(structure_rel)
  structure_rel = tf.transpose(structure_rel, [1, 0, 2])
  structure_rel = tf.multiply(structure_rel, tf.expand_dims(tf_X_binary_mask, 2))

  structure_b = tf.split(axis=0, num_or_size_splits=seq_max_len, value=structure_b)
  # Change back dimension to [batch_size, n_step, n_input]
  structure_b = tf.stack(structure_b)
  structure_b = tf.transpose(structure_b, [1, 0, 2])
  structure_b = tf.multiply(structure_b, tf.expand_dims(tf_X_binary_mask, 2))

  cross_entropy_ss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=structure_ss, labels=y_labels))
  cross_entropy_rel = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=structure_rel, labels=rel_label))
  cross_entropy_b = tf.reduce_mean(tf.multiply(tf.nn.softmax_cross_entropy_with_logits(logits=structure_b, labels=b_label), tf_weight_mask))

  regularization = WEIGHT_DECAY * sum(tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables() if not ("noreg" in tf_var.name or "Bias" in tf_var.name))

  prediction_ss = tf.argmax(tf.nn.softmax(structure_ss), 2)
  correct_prediction_ss = tf.reduce_sum(tf.multiply(tf.cast(tf.equal(prediction_ss, tf_y), tf.float32), tf_X_binary_mask))

  prediction_rel = tf.argmax(tf.nn.softmax(structure_rel), 2)
  correct_prediction_rel = tf.reduce_sum(tf.multiply(tf.cast(tf.equal(prediction_rel, tf_rel_label), tf.float32), tf_X_binary_mask))

  prediction_b = tf.argmax(tf.nn.softmax(structure_b), 2)
  correct_prediction_b = tf.reduce_sum(tf.multiply(tf.cast(tf.equal(prediction_b, tf_b_label), tf.float32), tf_X_binary_mask))

  optimizer = tf.train.AdamOptimizer(LEARNING_RATE).minimize(cross_entropy_ss + cross_entropy_rel + cross_entropy_b + regularization)
  saver = tf.train.Saver()
github voxelmorph / voxelmorph / src / losses.py View on Github external
def loss(self, _, y_pred):
        if self.penalty == 'l1':
            df = [tf.reduce_mean(tf.abs(f)) for f in self._diffs(y_pred)]
        else:
            assert self.penalty == 'l2', 'penalty can only be l1 or l2. Got: %s' % self.penalty
            df = [tf.reduce_mean(f * f) for f in self._diffs(y_pred)]
        return tf.add_n(df) / len(df)
github RedHenLab / multi-modal-emotion-prediction / LSTM.py View on Github external
def summary_accuracy(predictions,labels,summary_name):
    """
    Compute average accuracy over the batch and write a summary.
    """
    accuracy = tf.nn.in_top_k(predictions, labels, k=1, name=None)
    accuracy = tf.to_float(accuracy)
    accuracy = tf.reduce_mean(accuracy)
    tf.summary.scalar(summary_name, accuracy)
github GianlucaPaolocci / Sound-classification-on-Raspberry-Pi-with-Tensorflow / urbanMLP.py View on Github external
h_2 = tf.nn.sigmoid(tf.matmul(h_1,W_2) + b_2 )


W = tf.Variable(tf.random_normal([n_hidden_units_two,n_classes], mean = 0, stddev=sd))
b = tf.Variable(tf.random_normal([n_classes], mean = 0, stddev=sd))
y_ = tf.nn.softmax(tf.matmul(h_2,W) + b)

#init = tf.initialize_all_variables()
init = tf.global_variables_initializer()
saver = tf.train.Saver()#[X, Y, W_1, b_1, h_1, W_2, b_2, h_2, W, b, y_]

cost_function = tf.reduce_mean(-tf.reduce_sum(Y * tf.log(y_), reduction_indices=[1]))
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost_function)

correct_prediction = tf.equal(tf.argmax(y_,1), tf.argmax(Y,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))


cost_history = np.empty(shape=[1],dtype=float)
y_true, y_pred = None, None
with tf.Session() as sess:
    sess.run(init)
    for epoch in range(training_epochs):
        _,cost = sess.run([optimizer,cost_function],feed_dict={X:train_x,Y:train_y})
        cost_history = np.append(cost_history,cost)
        print "Epoch: ", epoch, " cost ", cost

    y_pred = sess.run(tf.argmax(y_,1),feed_dict={X: test_x})
    y_true = sess.run(tf.argmax(test_y,1))

    #y_pred_t = sess.run(tf.argmax(y_, 1), feed_dict={X: features_t})
    #print ("truck:") ,y_pred_t
github cvlab-yonsei / projects / FlowGRU / code / main.py View on Github external
grad_stack = []
    nout_stack = []

    with tf.variable_scope(tf.get_variable_scope()):
        for g in range(num_gpu):
            with tf.device('/gpu:' + str(g)):

                #< ----------------------- Forward Pass ----------------------- >#

                net_out0, frn_out0, frn_out1, frn_out2, gru_out, p_gru_out = md.forward_pass\
                (normalize(appr_X[s:s+per_gpu[g]]), tmpr_X[s:s+per_gpu[g]], normalize(p_appr_X[s:s+per_gpu[g]]), p_tmpr_X[s:s+per_gpu[g]], h_prev[s:s+per_gpu[g]])

                #< ----------------------- Loss ----------------------- >#

                recon_loss   = md.sci_log(net_out0, gt[s:s+per_gpu[g]])
                recon_smooth = tf.reduce_mean(md.smoothness_2nd(net_out0, appr_X[s:s+per_gpu[g]]/255., 10))

                photo_loss0   = md.phot_loss(appr_X[s:s+per_gpu[g]]/255., p_appr_X[s:s+per_gpu[g]]/255., frn_out0)
                photo_smooth0 = tf.reduce_mean(md.smoothness_2nd(frn_out0, appr_X[s:s+per_gpu[g]]/255., 10))
                photo_loss1   = md.phot_loss(appr_X_rs2[s:s+per_gpu[g]]/255., p_appr_X_rs2[s:s+per_gpu[g]]/255., frn_out1)
                photo_smooth1 = tf.reduce_mean(md.smoothness_2nd(frn_out1, appr_X_rs2[s:s+per_gpu[g]]/255., 10))
                photo_loss2   = md.phot_loss(appr_X_rs4[s:s+per_gpu[g]]/255., p_appr_X_rs4[s:s+per_gpu[g]]/255., frn_out2)
                photo_smooth2 = tf.reduce_mean(md.smoothness_2nd(frn_out2, appr_X_rs4[s:s+per_gpu[g]]/255., 10))

                loss = (recon_loss + 0.1*recon_smooth) + 0.05*(photo_loss0 + 0.1*photo_smooth0 + photo_loss1 + 0.1*photo_smooth1 + photo_loss2 + 0.1*photo_smooth2)

                tf.get_variable_scope().reuse_variables()

                grad = optimizer.compute_gradients(loss)

                state_stack.append(p_gru_out)
                loss_stack.append(loss)
github liber145 / rlpack / rlpack / algos / continuous_a2c.py View on Github external
self.critic_optimizer = tf.train.AdamOptimizer(self.lr)

        self.action = tf.placeholder(tf.float32, [None, self._dim_act], "action")
        self.span_reward = tf.placeholder(tf.float32, [None], "span_reward")
        self.advantage = tf.placeholder(tf.float32, [None], "advantage")

        self.old_mu = tf.placeholder(tf.float32, (None, self._dim_act), "old_mu")
        self.old_log_var = tf.placeholder(tf.float32, [self._dim_act], "old_log_var")

        logp = -0.5 * tf.reduce_sum(self.log_var)
        logp += -0.5 * tf.reduce_sum(tf.square(self.action - self.mu) / tf.exp(self.log_var), axis=1, keepdims=True)

        logp_old = -0.5 * tf.reduce_sum(self.old_log_var)
        logp_old += -0.5 * tf.reduce_sum(tf.square(self.action - self.old_mu) / tf.exp(self.old_log_var), axis=1, keepdims=True)

        self.actor_loss = -tf.reduce_mean(self.advantage * tf.exp(logp - logp_old))

        # Update by adam.
        self.train_actor_op = self.actor_optimizer.minimize(self.actor_loss)

        # ---------- Build critic algorithm. ----------
        self.critic_loss = tf.reduce_mean(tf.square(self.state_value - self.span_reward))

        # Update by adam.
        self.train_critic_op = self.critic_optimizer.minimize(self.critic_loss, global_step=tf.train.get_global_step())

        # ---------- Build action. ----------
        self.sampled_act = (self.mu + tf.exp(self.log_var / 2.0) * tf.random_normal(shape=[self._dim_act], dtype=tf.float32))
github prannayk / videoMultiGAN / acrcn / 53_no_reconstruction.py View on Github external
def gan_loss(self, X, Y, discriminator, X_in, Y_in, reuse=False, name=None, scope=tf.variable_scope("random_testing"), flag=True):
		if not flag : 
			return tf.reduce_mean(X)
		loss = tf.reduce_mean(Y) - tf.reduce_mean(X)
		epsilon = tf.random_normal([],0.0,1.0)
		mix = (X_in * epsilon) + ((1-epsilon) * Y_in)
		scope.reuse_variables()
		d_hat = discriminator(mix, scope=scope)
		grads = tf.gradients(d_hat, mix)
		ddx_sum = tf.sqrt(tf.reduce_sum(tf.square(grads), axis=1))
		ddx_loss = tf.reduce_mean(tf.square(ddx_sum - 1.0) * self.wgan_scale)
		return loss + ddx_loss
	def generate_batch(self):
github lok419 / Reinforcement_learning_on_portfolio_selection / ActorNetwork.py View on Github external
def reward_2(self):
        # Average Uniform Constant Rebalanced reward
        print("Reward function 2 (Average Uniform Constant Rebalanced reward)")

        # transaction cost
        self.transaction_cost = 1 - tf.reduce_sum(self.transaction_factor * tf.abs(self.model.output[:,:-1] - self.last_action), axis=1)

        return -tf.reduce_mean(tf.log(self.transaction_cost * tf.reduce_sum(self.model.output * self.future_price, axis=1) /
                                      tf.reduce_sum(self.future_price[:,:-1] / self.state_size[0], axis=1)))
github tensorlayer / tensorlayer-chinese / tensorlayer / layers / core.py View on Github external
# multiply by the embedding matrix.
        # embed is the outputs of the hidden layer (embedding layer), it is a
        # row vector with 'embedding_size' values.
        with tf.variable_scope(name):
            embeddings = tf.get_variable(
                name='embeddings', shape=(vocabulary_size, embedding_size), initializer=E_init, dtype=LayersConfig.tf_dtype, **E_init_args)
            embed = tf.nn.embedding_lookup(embeddings, self.inputs)
            # Construct the variables for the NCE loss (i.e. negative sampling)
            nce_weights = tf.get_variable(
                name='nce_weights', shape=(vocabulary_size, embedding_size), initializer=nce_W_init, dtype=LayersConfig.tf_dtype, **nce_W_init_args)
            nce_biases = tf.get_variable(name='nce_biases', shape=(vocabulary_size), initializer=nce_b_init, dtype=LayersConfig.tf_dtype, **nce_b_init_args)

        # Compute the average NCE loss for the batch.
        # tf.nce_loss automatically draws a new sample of the negative labels
        # each time we evaluate the loss.
        self.nce_cost = tf.reduce_mean(
            tf.nn.nce_loss(
                weights=nce_weights,
                biases=nce_biases,
                inputs=embed,
                labels=train_labels,
                num_sampled=num_sampled,
                num_classes=vocabulary_size,
                **nce_loss_args))

        self.outputs = embed
        self.normalized_embeddings = tf.nn.l2_normalize(embeddings, 1)

        self.all_layers = [self.outputs]
        self.all_params = [embeddings, nce_weights, nce_biases]
        self.all_drop = {}
github YunYang1994 / TensorFlow2.0-Examples / 6-Generative_Adversarial_Networks / Pix2Pix.py View on Github external
def generator_loss(disc_generated_output, gen_output, target):
    LAMBDA = 100
    gan_loss = loss_object(tf.ones_like(disc_generated_output), disc_generated_output)
    # mean absolute error
    l1_loss = tf.reduce_mean(tf.abs(target - gen_output))
    total_gen_loss = gan_loss + (LAMBDA * l1_loss)

    return total_gen_loss