How to use the tensorflow.reduce_sum function in tensorflow

To help you get started, we’ve selected a few tensorflow examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github ClementWalter / Keras-FewShotLearning / notebooks / triplet_loss_cifar10.py View on Github external
            lambda x: 1 - tf.reduce_sum(tf.nn.l2_normalize(x[0], axis=1) * tf.nn.l2_normalize(x[1], axis=1), axis=1)
        ),
github Stick-To / YOLO-tensorflow / YOLOv3.py View on Github external
gn1_hwi = tf.boolean_mask(gn1_hwi, rga1mask)
                gn2_yxi = tf.boolean_mask(gn2_yxi, rga2mask)
                gn2_hwi = tf.boolean_mask(gn2_hwi, rga2mask)
                gn3_yxi = tf.boolean_mask(gn3_yxi, rga3mask)
                gn3_hwi = tf.boolean_mask(gn3_hwi, rga3mask)
                gn1_labeli = tf.one_hot(tf.boolean_mask(gn1_labeli, rga1mask), self.num_classes)
                gn2_labeli = tf.one_hot(tf.boolean_mask(gn2_labeli, rga2mask), self.num_classes)
                gn3_labeli = tf.one_hot(tf.boolean_mask(gn3_labeli, rga3mask), self.num_classes)
                rp1bbox_yx_target = gn1_yxi - tf.floor(gn1_yxi)
                rp2bbox_yx_target = gn2_yxi - tf.floor(gn2_yxi)
                rp3bbox_yx_target = gn3_yxi - tf.floor(gn3_yxi)
                rp1bbox_hw_target = tf.log(gn1_hwi/ra1bbox_hw)
                rp2bbox_hw_target = tf.log(gn2_hwi/ra2bbox_hw)
                rp3bbox_hw_target = tf.log(gn3_hwi/ra3bbox_hw)
                yx_loss1 = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(labels=rp1bbox_yx_target, logits=rp1bbox_yx))
                yx_loss2 = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(labels=rp2bbox_yx_target, logits=rp2bbox_yx))
                yx_loss3 = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(labels=rp3bbox_yx_target, logits=rp3bbox_yx))
                hw_loss1 = 0.5 * tf.reduce_sum(tf.square(rp1bbox_hw - rp1bbox_hw_target))
                hw_loss2 = 0.5 * tf.reduce_sum(tf.square(rp2bbox_hw - rp2bbox_hw_target))
                hw_loss3 = 0.5 * tf.reduce_sum(tf.square(rp3bbox_hw - rp3bbox_hw_target))
                coord_loss = (yx_loss1 + yx_loss2 + yx_loss3 + hw_loss1 + hw_loss2 + hw_loss3)
                class_loss1 = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(labels=gn1_labeli, logits=rp1class))
                class_loss2 = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(labels=gn2_labeli, logits=rp2class))
                class_loss3 = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(labels=gn3_labeli, logits=rp3class))
                class_loss = (class_loss1 + class_loss2 + class_loss3)
                obj_loss1 = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(rp1obj), logits=rp1obj))
                obj_loss2 = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(rp2obj), logits=rp2obj))
                obj_loss3 = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(rp3obj), logits=rp3obj))
                obj_loss = (obj_loss1 + obj_loss2 + obj_loss3)
                nogn1_mask = tf.reshape(nogn1_mask, [-1])
                nogn2_mask = tf.reshape(nogn2_mask, [-1])
                nogn3_mask = tf.reshape(nogn3_mask, [-1])
github stevezheng23 / bert_extension_tf / run_nlu.py View on Github external
if token_label_ids is not None:
        with tf.variable_scope("token_loss", reuse=tf.AUTO_REUSE):
            token_label = tf.cast(token_label_ids, dtype=tf.float32)
            token_label_mask = tf.cast(input_masks, dtype=tf.float32)
            masked_token_label = tf.cast(token_label * token_label_mask, dtype=tf.int32)
            token_cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=masked_token_label, logits=masked_token_predict)
            token_loss = tf.reduce_sum(token_cross_entropy * token_label_mask) / tf.reduce_sum(tf.reduce_max(token_label_mask, axis=-1))
            loss = loss + token_loss
    
    if sent_label_ids is not None:
        with tf.variable_scope("sent_loss", reuse=tf.AUTO_REUSE):
            sent_label = tf.cast(sent_label_ids, dtype=tf.float32)
            sent_label_mask = tf.cast(tf.reduce_max(input_masks, axis=-1), dtype=tf.float32)
            masked_sent_label = tf.cast(sent_label * sent_label_mask, dtype=tf.int32)
            sent_cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=masked_sent_label, logits=masked_sent_predict)
            sent_loss = tf.reduce_sum(sent_cross_entropy * sent_label_mask) / tf.reduce_sum(tf.reduce_max(sent_label_mask, axis=-1))
            loss = loss + sent_loss
    
    return loss, token_predict_ids, sent_predict_ids
github deephyper / deephyper / deephyper / search / nas / utils / common / distributions.py View on Github external
def entropy(self):
        a0 = self.logits - tf.reduce_max(self.logits, axis=-1, keepdims=True)
        ea0 = tf.exp(a0)
        z0 = tf.reduce_sum(ea0, axis=-1, keepdims=True)
        p0 = ea0 / z0
        return tf.reduce_sum(p0 * (tf.log(z0) - a0), axis=-1)
    def sample(self):
github chris-chris / pysc2-examples / a2c / kfac.py View on Github external
if fpropFactor is not None:
              if self._approxT2:
                if KFAC_DEBUG:
                  print(('approxT2 grad fisher for %s' % (var.name)))
                bpropFactor = tf.reduce_sum(
                  bpropFactor, [1, 2])  # T^2 terms * 1/T^2
              else:
                bpropFactor = tf.reshape(
                  bpropFactor, [-1, C]) * Oh * Ow  # T * 1/T terms
            else:
              # just doing block diag approx. spatial independent
              # structure does not apply here. summing over
              # spatial locations
              if KFAC_DEBUG:
                print(('block diag approx fisher for %s' % (var.name)))
              bpropFactor = tf.reduce_sum(bpropFactor, [1, 2])

          # assume sampled loss is averaged. TO-DO:figure out better
          # way to handle this
          bpropFactor *= tf.to_float(B)
          ##

          cov_b = tf.matmul(
            bpropFactor, bpropFactor, transpose_a=True) / tf.to_float(tf.shape(bpropFactor)[0])

          updateOps.append(cov_b)
          statsUpdates[stats_var] = cov_b
          statsUpdates_cache[stats_var] = cov_b

    if KFAC_DEBUG:
      aKey = list(statsUpdates.keys())[0]
      statsUpdates[aKey] = tf.Print(statsUpdates[aKey],
github michaelisc / cluttered-omniglot / model.py View on Github external
#returns the pixel corresponding to the center of mass of the segmentation mask
    #if no pixel is segmented [-1,-1] is returned 
    image = tf.cast(image, tf.float32)
    
    sz = image.get_shape().as_list()
    batch_size = sz[0]
    szx = sz[1]
    szy = sz[2]
    
    e = 0.00001
    
    x,y = tf.meshgrid(list(range(0,szx)),list(range(0,szy)))
    x = tf.cast(tf.tile(tf.expand_dims(tf.expand_dims(x, axis=-1), axis=0), [batch_size, 1, 1, 1]), tf.float32)
    y = tf.cast(tf.tile(tf.expand_dims(tf.expand_dims(y, axis=-1), axis=0), [batch_size, 1, 1, 1]), tf.float32)
    comx = (tf.reduce_sum(x * image, axis=[1,2,3])+e)//(tf.reduce_sum(image, axis=[1,2,3])-e)
    comy = (tf.reduce_sum(y * image, axis=[1,2,3])+e)//(tf.reduce_sum(image, axis=[1,2,3])-e)
    
    return comx, comy
github tensorflow / privacy / privacy / bolt_on / optimizers.py View on Github external
False to check if weights > R-ball and only normalize then.

    Raises:
      Exception: If not called from inside this optimizer context.
    """
    if not self._is_init:
      raise Exception('This method must be called from within the optimizer\'s '
                      'context.')
    radius = self.loss.radius()
    for layer in self.layers:
      weight_norm = tf.norm(layer.kernel, axis=0)
      if force:
        layer.kernel = layer.kernel / (weight_norm / radius)
      else:
        layer.kernel = tf.cond(
            tf.reduce_sum(tf.cast(weight_norm > radius, dtype=self.dtype)) > 0,
            lambda k=layer.kernel, w=weight_norm, r=radius: k / (w / r),  # pylint: disable=cell-var-from-loop
            lambda k=layer.kernel: k  # pylint: disable=cell-var-from-loop
        )
github therne / relation-networks-tensorflow / train.py View on Github external
with sv.managed_session() as sess:
        data_info = DataInfo()
        params = HyperParams()

        model = RelationNetwork(data_info, params)
        sess.run(tf.global_variables_initializer())

        # Read and feed to the graph
        image, question, seq_len, answer = read_data(FLAGS.data_path)
        logit_op = model.infer(image, question, seq_len)
        loss_op = model.loss(logit_op, answer)

        with tf.name_scope('Accuracy'):
            # Calculate accuracy
            corrects = tf.equal(tf.argmax(logit_op, axis=1), answer)
            accuracy = tf.reduce_sum(tf.to_int32(corrects))


        logits, losses, acc = sess.run([logit_op, loss_op, accuracy])

        placeholders = {
            model.image: None,
            model.question: None,
            model.answer: None
        }
        losses, logits = sess.run([model.losses, model.logits], placeholders)
github williamleif / GraphSAGE / graphsage / models.py View on Github external
def _loss(self):
        aff = tf.reduce_sum(tf.multiply(self.outputs1, self.outputs2), 1) + self.outputs2_bias
        neg_aff = tf.matmul(self.outputs1, tf.transpose(self.neg_outputs)) + self.neg_outputs_bias
        true_xent = tf.nn.sigmoid_cross_entropy_with_logits(
                labels=tf.ones_like(aff), logits=aff)
        negative_xent = tf.nn.sigmoid_cross_entropy_with_logits(
                labels=tf.zeros_like(neg_aff), logits=neg_aff)
        loss = tf.reduce_sum(true_xent) + tf.reduce_sum(negative_xent)
        self.loss = loss / tf.cast(self.batch_size, tf.float32)
        tf.summary.scalar('loss', self.loss)
github dtak / hip-mdp-public / Qnetwork.py View on Github external
self.hidden2_units  = hidden_layer_sizes[1]
		# Hidden layer 1
		self.h1_weights = tf.Variable(tf.truncated_normal([state_dims,self.hidden1_units], stddev=1.0/float(state_dims*self.hidden1_units)), name='weights')
		self.h1_biases = tf.Variable(tf.zeros([self.hidden1_units]))
		self.hidden1 = self.activation_fn(tf.matmul(self.s,self.h1_weights) + self.h1_biases)
		# Hidden layer 2
		self.h2_weights = tf.Variable(tf.truncated_normal([self.hidden1_units, self.hidden2_units], stddev=1.0/float(self.hidden1_units*self.hidden2_units)),name='weights')
		self.h2_biases = tf.Variable(tf.zeros([self.hidden2_units]))
		self.hidden2 = self.activation_fn(tf.matmul(self.hidden1,self.h2_weights) + self.h2_biases)
		# Output layer
		self.out_weights = tf.Variable(tf.truncated_normal([self.hidden2_units, num_actions], stddev=1.0/float(self.hidden2_units*num_actions)),name='weights')
		self.out_biases = tf.Variable(tf.zeros([num_actions]))
		self.output = tf.matmul(self.hidden2,self.out_weights) + self.out_biases
		self.predict = tf.argmax(self.output,1)
		self.curate_output = tf.reduce_sum(tf.multiply(self.action_array,self.output),1)
		self.td_error = self.next_Q - tf.reduce_sum(tf.multiply(self.action_array,self.output),1)
		# Define the loss according the the bellman equations
		self.td_loss = 0.5*tf.square(self.td_error)

		self.loss = tf.reduce_sum(tf.multiply(self.importance_weights, self.td_loss))
		if clip > 0: # Train and backpropagate clipped errors

			def ClipIfNotNone(gradient):
				if gradient is None:
					return gradient
				return tf.clip_by_norm(gradient, clip)

			self.objective = tf.train.AdamOptimizer(learning_rate = self.learning_rate,epsilon=0.01) 
			self.grads_and_vars = self.objective.compute_gradients(self.loss)
			self.clipped_grads = [ (ClipIfNotNone(grad), var) for grad, var in self.grads_and_vars ]
			self.updateQ = self.objective.apply_gradients(self.clipped_grads)
		else: # Update the network according to gradient descent minimization of the weighted loss