How to use the tensorflow.cast function in tensorflow

To help you get started, we’ve selected a few tensorflow examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github TianzhongSong / Tensorflow-quantization-test / utils / layers.py View on Github external
def denselayer(x, w, b, weight_scale=0., activation=''):
    x, sx = quantize(x)
    x = tf.cast(x, dtype=tf.float32)
    x = tf.matmul(x, w)
    s = sx * weight_scale
    x = x * s
    x = tf.add(x, b)
    if activation == "relu":
        x = tf.nn.relu(x)
    return x
github GoogleCloudPlatform / training-data-analyst / courses / machine_learning / asl / open_project / time_series_anomaly_detection / tf_anomaly_detection_model_selection / anomaly_detection_module / trainer / input.py View on Github external
features = convert_sequences_from_strings_to_floats(
        features=features,
        column_list=params["feat_names"],
        seq_len=params["seq_len"])

    return features
  else:
    # For subset of CSV files that DO have labels
    columns = tf.decode_csv(
        records=value_column,
        record_defaults=params["feat_defaults"] + [[0.0]],  # add label default
        field_delim=",")

    features = dict(zip(params["feat_names"] + ["anomalous_sequence_flag"], columns))

    labels = tf.cast(x=features.pop("anomalous_sequence_flag"), dtype=tf.float64)

    features = convert_sequences_from_strings_to_floats(
        features=features,
        column_list=params["feat_names"],
        seq_len=params["seq_len"])

    return features, labels
github sirius-ai / MobileFaceNet_TF / losses / face_losses.py View on Github external
weights = tf.get_variable(name='embedding_weights', shape=(embedding.get_shape().as_list()[-1], out_num),
                                  initializer=w_init, dtype=tf.float32)
        weights_norm = tf.norm(weights, axis=0, keepdims=True)
        weights = tf.div(weights, weights_norm, name='norm_weights')
        # cos(theta+m)
        cos_t = tf.matmul(embedding, weights, name='cos_t')
        cos_t2 = tf.square(cos_t, name='cos_2')
        sin_t2 = tf.subtract(1., cos_t2, name='sin_2')
        sin_t = tf.sqrt(sin_t2, name='sin_t')
        cos_mt = s * tf.subtract(tf.multiply(cos_t, cos_m), tf.multiply(sin_t, sin_m), name='cos_mt')

        # this condition controls the theta+m should in range [0, pi]
        #      0<=theta+m<=pi
        #     -m<=theta<=pi-m
        cond_v = cos_t - threshold
        cond = tf.cast(tf.nn.relu(cond_v, name='if_else'), dtype=tf.bool)

        keep_val = s*(cos_t - mm)
        cos_mt_temp = tf.where(cond, cos_mt, keep_val)

        mask = tf.one_hot(labels, depth=out_num, name='one_hot_mask')
        # mask = tf.squeeze(mask, 1)
        inv_mask = tf.subtract(1., mask, name='inverse_mask')

        s_cos_t = tf.multiply(s, cos_t, name='scalar_cos_t')

        logit = tf.add(tf.multiply(s_cos_t, inv_mask), tf.multiply(cos_mt_temp, mask), name='arcface_loss_output')
        inference_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logit, labels=labels))

    return inference_loss, logit
github SJTU-Thinklab-Det / R3Det_Tensorflow / libs / networks / build_whole_network_r3det_efficientnet.py View on Github external
def make_anchors(self, feature_pyramid):
        with tf.variable_scope('make_anchors'):
            anchor_list = []
            level_list = cfgs.LEVEL
            with tf.name_scope('make_anchors_all_level'):
                for level, base_anchor_size, stride in zip(level_list, cfgs.BASE_ANCHOR_SIZE_LIST, cfgs.ANCHOR_STRIDE):
                    '''
                    (level, base_anchor_size) tuple:
                    (P3, 32), (P4, 64), (P5, 128), (P6, 256), (P7, 512)
                    '''
                    featuremap_height, featuremap_width = tf.shape(feature_pyramid[level])[1], \
                                                          tf.shape(feature_pyramid[level])[2]

                    featuremap_height = tf.cast(featuremap_height, tf.float32)
                    featuremap_width = tf.cast(featuremap_width, tf.float32)

                    if self.method == 'H':
                        tmp_anchors = tf.py_func(generate_anchors.generate_anchors_pre,
                                                 inp=[featuremap_height, featuremap_width, stride,
                                                      np.array(cfgs.ANCHOR_SCALES) * stride, cfgs.ANCHOR_RATIOS, 4.0],
                                                 Tout=[tf.float32])

                        tmp_anchors = tf.reshape(tmp_anchors, [-1, 4])
                    else:
                        tmp_anchors = generate_rotate_anchors.make_anchors(base_anchor_size=base_anchor_size,
                                                                           anchor_scales=cfgs.ANCHOR_SCALES,
                                                                           anchor_ratios=cfgs.ANCHOR_RATIOS,
                                                                           anchor_angles=cfgs.ANCHOR_ANGLES,
                                                                           featuremap_height=featuremap_height,
                                                                           featuremap_width=featuremap_width,
github deepmipt / DeepPavlov / deeppavlov / models / coreference_resolution / new_model.py View on Github external
char_index: [Amount of words, Amount of chars in word (max len), char_embedding_size],
                tf.int32, Character indices.
            text_len: tf.int32, [Amount of sentences]
            speaker_ids: [Amount of independent speakers], tf.int32, Speaker IDs.
            genre: [Amount of independent genres], tf.int32, Genre
            is_training: tf.bool
            gold_starts: tf.int32, [Amount of gold mentions]
            gold_ends: tf.int32, [Amount of gold mentions]
            cluster_ids: tf.int32, [Amount of independent clusters]

        Returns:[candidate_starts, candidate_ends, candidate_mention_scores, mention_starts, mention_ends, antecedents,
                antecedent_scores], loss
        List of predictions and scores, and Loss function value
        """
        self.dropout = 1 - (tf.cast(is_training, tf.float64) * self.dropout_rate)
        self.lexical_dropout = 1 - (tf.cast(is_training, tf.float64) * self.lexical_dropout_rate)

        num_sentences = tf.shape(word_emb)[0]
        max_sentence_length = tf.shape(word_emb)[1]

        text_emb_list = [word_emb]

        if self.char_embedding_size > 0:
            char_emb = tf.gather(
                tf.get_variable("char_embeddings", [len(self.char_dict), self.char_embedding_size]),
                char_index)  # [num_sentences, max_sentence_length, max_word_length, emb]
            flattened_char_emb = tf.reshape(char_emb, [num_sentences * max_sentence_length,
                                                       custom_layers.shape(char_emb, 2),
                                                       custom_layers.shape(char_emb, 3)])
            # [num_sentences * max_sentence_length, max_word_length, emb]

            flattened_aggregated_char_emb = custom_layers.cnn(flattened_char_emb, self.filter_widths,
github wenhuchen / Variational-Vocabulary-Selection / models / models.py View on Github external
self.predictions = tf.argmax(self.logits, -1, output_type=tf.int32)
        
        self.sparsity = tf.constant(0.0, dtype=tf.float32)
        self.reg_loss = tf.constant(0., dtype=tf.float32)

        # ============= Loss and Accuracy =============
        with tf.name_scope("loss"):
            #self.y_one_hot = tf.one_hot(self.y, num_class)
            self.cross_entropy = tf.reduce_mean(
                tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits, labels=self.y))
            self.loss = self.cross_entropy + self.reg_loss
            self.optimizer = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss, global_step=self.global_step)

        with tf.name_scope("accuracy"):
            correct_predictions = tf.equal(self.predictions, self.y)
            self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
github zzh142857 / SiameseFC-tf / src / prepare_training_dataset.py View on Github external
#decode image of z and x
    z = tf.cast(tf.decode_raw(features['z_raw'], tf.uint8), tf.float32) #shape(h, w, c)    
    z = tf.reshape(z, [resize_width, resize_height, channel])
    x = tf.cast(tf.decode_raw(features['x_raw'], tf.uint8), tf.float32)
    x = tf.reshape(x, [resize_width, resize_height, channel])
    x = x * (1. / 255) - 0.5
    z = z * (1. / 255) - 0.5
    
    # coordinte
    z_pos_x = tf.cast(features['z_pos_x'] * resize_width, tf.int32)
    z_pos_y = tf.cast(features['z_pos_y'] * resize_width, tf.int32)
    z_target_w = tf.cast(features['z_target_w'] * resize_width, tf.int32)
    z_target_h = tf.cast(features['z_target_h'] * resize_width, tf.int32)
    x_pos_x = tf.cast(features['x_pos_x'] * resize_width, tf.int32)
    x_pos_y = tf.cast(features['x_pos_y'] * resize_width, tf.int32)
    x_target_w = tf.cast(features['x_target_w'] * resize_width, tf.int32)
    x_target_h = tf.cast(features['x_target_h'] * resize_width, tf.int32)

 
    return  z, x, z_pos_x, z_pos_y, z_target_w, z_target_h, x_pos_x, x_pos_y, x_target_w, x_target_h
github yanx27 / DuReader_QANet_BiDAF / layers / OurLayer.py View on Github external
def mask_logits(inputs, mask, mask_value = -1e30):
    shapes = inputs.shape.as_list()
    mask = tf.cast(mask, tf.float32)
    return inputs*mask + mask_value * (1 - mask)
github tf-encrypted / tf-encrypted / examples / logistic / common.py View on Github external
def print_accuracy(y_hat, y) -> tf.Operation:
      with tf.name_scope("print-accuracy"):
        correct_prediction = tf.equal(tf.round(y_hat), y)
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        print_op = tf.print("Accuracy on {}:".format(data_owner.player_name),
                            accuracy)
        return print_op