How to use the tensorflow.constant function in tensorflow

To help you get started, we’ve selected a few tensorflow examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github google-research / google-research / dataset_analysis / bert_classifier.py View on Github external
accuracies = tf.convert_to_tensor(accuracies, dtype=tf.float32)
        eval_dict["auc"] = tf.metrics.mean(values=auc_vals)
        eval_dict["auc_weighted"] = tf.metrics.mean(
            values=auc_vals, weights=weights)
        eval_dict["accuracy"] = tf.metrics.mean(values=accuracies)
        eval_dict["accuracy_weighted"] = tf.metrics.mean(
            values=accuracies, weights=weights)

        # Calculate sentiment-based performance
        get_relation_based_scores(label_ids, pred_ind,
                                  tf.constant(sentiment_groups, dtype=tf.int64),
                                  "sentiment")

        # Calculate target-intensity based performance
        get_relation_based_scores(label_ids, pred_ind,
                                  tf.constant(intensity_groups, dtype=tf.int64),
                                  "target_intensity")
github amygdala / tensorflow-workshop / workshop_sections / mnist_series / mnist_cnn_custom_estimator / trainer / task.py View on Github external
def _input_fn():
        X = tf.constant(dataset.images)
        Y = tf.constant(dataset.labels, dtype=tf.int32)
        image_batch, label_batch = tf.train.shuffle_batch([X,Y],
                               batch_size=batch_size,
                               capacity=8*batch_size,
                               min_after_dequeue=4*batch_size,
                               enqueue_many=True
                              )
        return {'x': image_batch} , label_batch
github tensorflow / models / research / object_detection / metrics / coco_evaluation.py View on Github external
detection_boxes = tf.expand_dims(detection_boxes, 0)
      detection_scores = tf.expand_dims(detection_scores, 0)
      detection_classes = tf.expand_dims(detection_classes, 0)

      if num_gt_boxes_per_image is None:
        num_gt_boxes_per_image = tf.shape(groundtruth_boxes)[1:2]
      else:
        num_gt_boxes_per_image = tf.expand_dims(num_gt_boxes_per_image, 0)

      if num_det_boxes_per_image is None:
        num_det_boxes_per_image = tf.shape(detection_boxes)[1:2]
      else:
        num_det_boxes_per_image = tf.expand_dims(num_det_boxes_per_image, 0)

      if is_annotated is None:
        is_annotated = tf.constant([True])
      else:
        is_annotated = tf.expand_dims(is_annotated, 0)
    else:
      if num_gt_boxes_per_image is None:
        num_gt_boxes_per_image = tf.tile(
            tf.shape(groundtruth_boxes)[1:2],
            multiples=tf.shape(groundtruth_boxes)[0:1])
      if num_det_boxes_per_image is None:
        num_det_boxes_per_image = tf.tile(
            tf.shape(detection_boxes)[1:2],
            multiples=tf.shape(detection_boxes)[0:1])
      if is_annotated is None:
        is_annotated = tf.ones_like(image_id, dtype=tf.bool)

    update_op = tf.py_func(update_op, [image_id,
                                       groundtruth_boxes,
github adrianyoung / TextSumma / textsum_model.py View on Github external
def sigmoid_norm(self, score):
        # sigmoid(tanh) --> sigmoid([-1,1]) --> [0.26,0.73] --> [0,1]
        with tf.name_scope("sigmoid_norm"):
            Min = tf.sigmoid(tf.constant(-1, dtype=tf.float32))
            Max = tf.sigmoid(tf.constant(1, dtype=tf.float32))
            prob = tf.sigmoid(score)
            prob_norm = (prob - Min) / (Max - Min)
        return prob_norm
github turiphro / deeplearning / src / tensorflow / helloworld.py View on Github external
# Hello World app for TensorFlow

# Notes:
# - TensorFlow is written in C++ with good Python (and other) bindings.
#   It runs in a separate thread (Session).
# - TensorFlow is fully symbolic: everything is executed at once.
#   This makes it scalable on multiple CPUs/GPUs, and allows for some
#   math optimisations. This also means derivatives can be calculated
#   automatically (handy for SGD).

import tensorflow as tf

# define the graph
M1 = tf.constant([[3., 3.]])
M2 = tf.constant([[2.], [2.]])
M3 = tf.matmul(M1, M2) # symbolic: no calculation yet, all happens at once outside of Python (in GPU, on network, etc)

# start a session to compute the graph
with tf.Session() as sess: # runs on GPU first
    #with tf.device("/gpu:1"): # explicitly choose if you have multiple GPUs
    #with tf.device("grpc://host:2222"): # explicitly choose host with running TensorFlow server
    result = sess.run(M3) # runs subsection of total graph
    print(result) # [[12.]]

state = tf.Variable(0, name='counter')  # maintains state along Session
one = tf.constant(1)
new_value = tf.add(state, one)
update = tf.assign(state, new_value)    # again symbolic
init_op = tf.initialize_all_variables() # makes operator; does not run anything yet
github ETIP-team / ETIP-Project / CNN-SW / model.py View on Github external
def bias_variable(shape, name):
    initial = tf.constant(0.1, shape=shape, name=name)
    return tf.Variable(initial)
github MarvinTeichmann / KittiSeg / model / segmentation / kitti_obj.py View on Github external
def loss(hypes, decoded_logits, labels):
    """Calculate the loss from the logits and the labels.

    Args:
      logits: Logits tensor, float - [batch_size, NUM_CLASSES].
      labels: Labels tensor, int32 - [batch_size].

    Returns:
      loss: Loss tensor of type float.
    """
    logits = decoded_logits['logits']
    with tf.name_scope('loss'):
        logits = tf.reshape(logits, (-1, 2))
        shape = [logits.get_shape()[0], 2]
        epsilon = tf.constant(value=hypes['solver']['epsilon'])
        logits = logits + epsilon
        labels = tf.to_float(tf.reshape(labels, (-1, 2)))

        softmax = tf.nn.softmax(logits)
        head = hypes['arch']['weight']
        cross_entropy = -tf.reduce_sum(tf.mul(labels * tf.log(softmax), head),
                                       reduction_indices=[1])

        cross_entropy_mean = tf.reduce_mean(cross_entropy,
                                            name='xentropy_mean')
        tf.add_to_collection('losses', cross_entropy_mean)

        total_loss = tf.add_n(tf.get_collection('losses'), name='total_loss')

        losses = {}
        losses['total_loss'] = total_loss
github xdever / RFCN-tensorflow / BoxEngine / BoxRefinementNetwork.py View on Github external
						return tf.cond(tf.shape(allLoss)[0]>0, lambda: tf.reduce_mean(Utils.MultiGather.gatherTopK(allLoss, self.nTrainBoxes)), lambda: tf.constant(0.0))
					else:
github SeldonIO / alibi / alibi / explainers / cfproto.py View on Github external
with tf.name_scope('loss_ae') as scope:
            # gamma * AE loss
            if self.ae_model:
                # run autoencoder
                self.adv_ae = self.ae(self.adv_cat)
                self.adv_ae_s = self.ae(self.adv_cat_s)
                if self.is_cat:  # map output autoencoder back to numerical values
                    self.adv_ae = apply_map(self.adv_ae, to_num=True)
                    self.adv_ae_s = apply_map(self.adv_ae_s, to_num=True)
                # compute loss
                self.loss_ae = self.gamma * tf.square(tf.norm(self.adv_ae - self.adv))
                self.loss_ae_s = self.gamma * tf.square(tf.norm(self.adv_ae_s - self.adv_s))
            else:  # no auto-encoder available
                self.loss_ae = tf.constant(0.)
                self.loss_ae_s = tf.constant(0.)

        with tf.name_scope('loss_attack') as scope:
            if not self.model:
                self.loss_attack = tf.placeholder(tf.float32)
            elif self.c_init == 0. and self.c_steps == 1:  # prediction loss term not used
                # make predictions on perturbed instance
                self.pred_proba = self.predict(self.adv_cat)
                self.pred_proba_s = self.predict(self.adv_cat_s)

                self.loss_attack = tf.constant(0.)
                self.loss_attack_s = tf.constant(0.)
            else:
                # make predictions on perturbed instance
                self.pred_proba = self.predict(self.adv_cat)
                self.pred_proba_s = self.predict(self.adv_cat_s)
github MaybeShewill-CV / lanenet-lane-detection / lanenet_model / lanenet_merge_model.py View on Github external
# 计算二值分割损失函数
            decode_logits = inference_ret['logits']
            binary_seg_ret = tf.nn.softmax(logits=decode_logits)
            binary_seg_ret = tf.argmax(binary_seg_ret, axis=-1)
            # 计算像素嵌入
            decode_deconv = inference_ret['deconv']
            # 像素嵌入
            pix_embedding = self.conv2d(inputdata=decode_deconv, out_channel=4, kernel_size=1,
                                        use_bias=False, name='pix_embedding_conv')
            pix_embedding = self.relu(inputdata=pix_embedding, name='pix_embedding_relu')

            return binary_seg_ret, pix_embedding


if __name__ == '__main__':
    model = LaneNet(tf.constant('train', dtype=tf.string))
    input_tensor = tf.placeholder(dtype=tf.float32, shape=[1, 256, 512, 3], name='input')
    binary_label = tf.placeholder(dtype=tf.int64, shape=[1, 256, 512, 1], name='label')
    instance_label = tf.placeholder(dtype=tf.float32, shape=[1, 256, 512, 1], name='label')
    ret = model.compute_loss(input_tensor=input_tensor, binary_label=binary_label,
                             instance_label=instance_label, name='loss')
    for vv in tf.trainable_variables():
        if 'bn' in vv.name:
            continue
        print(vv.name)