How to use the tensorflow.get_variable function in tensorflow

To help you get started, we’ve selected a few tensorflow examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github jiangxinyang227 / NLP-Project / few_shot_learning / relation_network / model.py View on Github external
def neural_tensor_layer(self, class_vector, query_encoder):
        """
        calculate relation scores
        :param class_vector: class vectors
        :param query_encoder: query set encoding matrix. [num_classes * num_queries, encode_size]
        :return:
        """
        num_classes = self.config["num_classes"]
        encode_size = self.config["hidden_sizes"][-1] * 2
        layer_size = self.config["layer_size"]

        M = tf.get_variable("M", [encode_size, encode_size, layer_size], dtype=tf.float32,
                            initializer=tf.truncated_normal_initializer(stddev=(2 / encode_size) ** 0.5))

        # 该层可以理解为从不同的视角去计算类向量和query向量的分数
        # [[class1, class2, ..], [class1, class2, ..], ... layer_size]
        all_mid = []
        for i in range(layer_size):
            # [num_classes, num_classes * num_queries]
            slice_mid = tf.matmul(tf.matmul(class_vector, M[:, :, i]), query_encoder, transpose_b=True)
            all_mid.append(tf.split(slice_mid, [1] * num_classes, axis=0))

        # [[1, 2, .. layer_size], ... class_n], 将同一个类经tensor layer计算出来的分数放在一起
        all_mid = [[mid[j] for mid in all_mid] for j in range(len(all_mid[0]))]

        # [layer_size, num_classes * num_queries]
        all_mid_concat = [tf.concat(mid, axis=0) for mid in all_mid]
github openai / ebm_code_release / ebm_sandbox.py View on Github external
def construct_label(weights, X, Y, Y_GT, model, target_vars):
    # for i in range(FLAGS.num_steps):
    #     Y = Y + tf.random_normal(tf.shape(Y), mean=0.0, stddev=0.03)
    #     e = model.forward(X, weights, label=Y)

    #     Y_grad = tf.clip_by_value(tf.gradients(e, [Y])[0],  -1, 1)
    #     Y = Y - 0.1 * Y_grad
    #     Y = tf.clip_by_value(Y, 0, 1)

    #     Y = Y / tf.reduce_sum(Y, axis=[1], keepdims=True)

    e_bias =  tf.get_variable('e_bias', shape=10, initializer=tf.initializers.zeros())
    l1_norm = tf.placeholder(shape=(), dtype=tf.float32)
    l2_norm = tf.placeholder(shape=(), dtype=tf.float32)

    def compute_logit(X, stop_grad=False, num_steps=0):
        batch_size = tf.shape(X)[0]
        X = tf.reshape(X, (batch_size, 1, 32, 32, 3))
        X = tf.reshape(tf.tile(X, (1, 10, 1, 1, 1)), (batch_size * 10, 32, 32, 3))
        Y_new = tf.reshape(Y, (batch_size*10, 10))

        X_min = X - 8 / 255.
        X_max = X + 8 / 255.

        for i in range(num_steps):
            X = X + tf.random_normal(tf.shape(X), mean=0.0, stddev=0.005)

            energy_noise = model.forward(X, weights, label=Y, reuse=True)
github Pinafore / qb / qanta / guesser / experimental / binarized.py View on Github external
self.i_to_class = i_to_class
        self.cached_wikipedia = None
        self.question_wiki_similarity = None

        word_embeddings, word_embedding_lookup = load_embeddings()
        self.np_word_embeddings = word_embeddings
        self.embedding_lookup = word_embedding_lookup
        word_embeddings = tf.get_variable(
            'word_embeddings',
            initializer=tf.constant(word_embeddings, dtype=tf.float32)
        )
        self.word_embeddings = tf.pad(word_embeddings, [[0, 1], [0, 0]], mode='CONSTANT')

        self.word_dropout_keep_prob_var = tf.get_variable(
            'word_dropout_keep_prob', (), dtype=tf.float32, trainable=False)
        self.nn_dropout_keep_prob_var = tf.get_variable('nn_dropout_keep_prob', (), dtype=tf.float32, trainable=False)
        self.is_training = tf.placeholder(tf.bool, name='is_training')

        self.qb_questions = tf.placeholder(
            tf.int32,
            shape=(self.batch_size, self.question_max_length),
            name='question_input'
        )
        self.question_lengths = tf.placeholder(tf.float32, shape=self.batch_size, name='question_lengths')

        self.wiki_data = tf.placeholder(
            tf.int32,
            shape=(self.batch_size, self.wiki_max_length),
            name='wiki_data_input'
        )
        self.wiki_lengths = tf.placeholder(tf.float32, shape=self.batch_size, name='wiki_data_lengths')
github EliasCai / bert-toxicity-classification / run_classifier.py View on Github external
# instead.

    # 768 hidden size
    # 128 seq length
    # 32 batch size
    output_layer = model.get_pooled_output()

    hidden_size = output_layer.shape[-1].value

    output_weights = tf.get_variable(
        "output_weights",
        [num_labels, hidden_size],
        initializer=tf.truncated_normal_initializer(stddev=0.02),
    )

    output_bias = tf.get_variable(
        "output_bias", [num_labels], initializer=tf.zeros_initializer()
    )

    with tf.variable_scope("loss"):
        if is_training:
            # I.e., 0.1 dropout
            output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)

        logits = tf.matmul(output_layer, output_weights, transpose_b=True)
        logits = tf.nn.bias_add(logits, output_bias)
        probabilities = tf.nn.softmax(logits, axis=-1)
        log_probs = tf.nn.log_softmax(logits, axis=-1)

        # correct_predictions = tf.equal(log_probs, labels)
        # accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
github thuxugang / doudizhu / rl / prioritized_dqn_max.py View on Github external
def build_layers(s, c_names, w_initializer, b_initializer):
            with tf.variable_scope('l1'):
                w1 = tf.get_variable('w1', [self.n_features, self.n_l1], initializer=w_initializer, collections=c_names)
                b1 = tf.get_variable('b1', [1, self.n_l1], initializer=b_initializer, collections=c_names)
                l1 = tf.nn.relu(tf.matmul(s, w1) + b1)
                #l1 = tf.maximum(0.2 * l1, l1)
                
            with tf.variable_scope('l2'):
                w2 = tf.get_variable('w2', [self.n_l1, self.n_l2], initializer=w_initializer, collections=c_names)
                b2 = tf.get_variable('b2', [1, self.n_l2], initializer=b_initializer, collections=c_names)
                l2 = tf.nn.relu(tf.matmul(l1, w2) + b2)
                #l2 = tf.maximum(0.2 * l2, l2)
                
            with tf.variable_scope('l3'):
                w3 = tf.get_variable('w3', [self.n_l2, self.n_actions], initializer=w_initializer, collections=c_names)
                b3 = tf.get_variable('b3', [1, self.n_actions], initializer=b_initializer, collections=c_names)
                out = tf.matmul(l2, w3) + b3
            return out, b3
github praveendareddy21 / Stacked_LSTMS_Highway_Residual_On_TimeSeries_Datasets / highway_lstm_model_UCR_dataset.py View on Github external
def relu_fc(input_2D_tensor_list, features_len, new_features_len, config):
    """make a relu fully-connected layer, mainly change the shape of tensor
       both input and output is a list of tensor
        argument:
            input_2D_tensor_list: list shape is [batch_size,feature_num]
            features_len: int the initial features length of input_2D_tensor
            new_feature_len: int the final features length of output_2D_tensor
            config: Config used for weights initializers
        return:
            output_2D_tensor_list lit shape is [batch_size,new_feature_len]
    """

    W = tf.get_variable(
        "relu_fc_weights",
        initializer=tf.random_normal(
            [features_len, new_features_len],
            mean=0.0,
            stddev=float(config.weights_stddev)
        )
    )
    b = tf.get_variable(
        "relu_fc_biases_noreg",
        initializer=tf.random_normal(
            [new_features_len],
            mean=float(config.bias_mean),
            stddev=float(config.weights_stddev)
        )
    )
github qweas120 / PSVH-3d-reconstruction / run_case.py View on Github external
pool4 = tf.nn.max_pool(conv3b, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')
        shortcuts.append(pool4)
        layer_id += 1

        conv4 = encoder_residual_block(pool4, layer_id, 2, 256)
        pool5 = tf.nn.max_pool(conv4, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')
        shortcuts.append(pool5)
        layer_id += 1

        conv5 = encoder_residual_block(pool5, layer_id, 2, 256)
        pool6 = tf.nn.max_pool(conv5, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')

        feature_map = pool6

        pool6 = tf.reduce_mean(pool6, [1, 2])
        wfc = tf.get_variable("wfc", shape=[256, 1024], initializer=tf.contrib.layers.xavier_initializer())
        feature = tf.matmul(pool6, wfc)

        w_e = tf.get_variable("w_euler", shape=[1024, 3], initializer=tf.contrib.layers.xavier_initializer())
        euler_angle = tf.matmul(feature, w_e)

        w_st = tf.get_variable('w_ft', shape=[1024, 3], initializer=tf.contrib.layers.xavier_initializer())
        st = tf.matmul(feature, w_st)

        print('pool1', pool1)
        print('pool2', pool2)
        print('pool3', pool3)
        print('pool4', pool4)
        print('pool5', pool5)
        print('pool6', pool6)
        print('feature', feature)
        print('feature_map', feature_map)
github shoaibahmed / FCN-TensorFlow / legacy / models / inception_resnet_v2_fcn_8s.py View on Github external
def _bias_variable(shape, constant=0.0):
    initializer = tf.constant_initializer(constant)
    return tf.get_variable(name='biases', shape=shape,
                           initializer=initializer)
github taosir / cnn_handwritten_chinese_recognition / app / views.py View on Github external
labels = tf.placeholder(dtype=tf.int64, shape=[None], name='label_batch')

    conv_1 = slim.conv2d(images, 64, [3, 3], 1, padding='SAME', scope='conv1')  # image_size 62x62
    max_pool_1 = slim.max_pool2d(conv_1, [2, 2], [2, 2], padding='SAME')      # image_size 31x31
    conv_2 = slim.conv2d(max_pool_1, 128, [3, 3], padding='SAME', scope='conv2')   # image_size 29x29
    max_pool_2 = slim.max_pool2d(conv_2, [2, 2], [2, 2], padding='SAME')      # image_size 15x15
    conv_3 = slim.conv2d(max_pool_2, 256, [3, 3], padding='SAME', scope='conv3')      # image_size 13x13
    max_pool_3 = slim.max_pool2d(conv_3, [2, 2], [2, 2], padding='SAME')      # image_size 7x7

    flatten = slim.flatten(max_pool_3)
    fc1 = slim.fully_connected(slim.dropout(flatten, keep_prob), 1024, activation_fn=tf.nn.tanh, scope='fc1')  # 激活函数tanh
    logits = slim.fully_connected(slim.dropout(fc1, keep_prob),__chinese_word_count, activation_fn=None,scope='fc2') # 无激活函数
    loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels)) # softmax
    accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(logits, 1), labels), tf.float32)) # 计算准确率

    global_step = tf.get_variable("step", [], initializer=tf.constant_initializer(0.0), trainable=False)
    rate = tf.train.exponential_decay(2e-4, global_step, decay_steps=2000, decay_rate=0.97, staircase=True) #
    train_op = tf.train.AdamOptimizer(learning_rate=rate).minimize(loss, global_step=global_step) # 自动调节学习率的随机梯度下降算法训练模型
    probabilities = tf.nn.softmax(logits) #

    tf.summary.scalar('loss', loss)
    tf.summary.scalar('accuracy', accuracy)
    merged_summary_op = tf.summary.merge_all()
    predicted_val_top_k, predicted_index_top_k = tf.nn.top_k(probabilities, k=top_k)
    accuracy_in_top_k = tf.reduce_mean(tf.cast(tf.nn.in_top_k(probabilities, labels, top_k), tf.float32))

    return {
        'images': images,
        'labels': labels,
        'keep_prob': keep_prob,
        'top_k': top_k,
        'global_step': global_step,