How to use the tensorflow.Variable function in tensorflow

To help you get started, we’ve selected a few tensorflow examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github solgaardlab / neurophox / neurophox / initializers.py View on Github external
def to_tf(self, phase_varname: str) -> tf.Variable:
        """

        Returns:
            Initialized Tensorflow Variable
        """
        phase_np = self.to_np()
        return tf.Variable(
            name=phase_varname,
            initial_value=phase_np,
            dtype=TF_FLOAT
        )
github HyeongminLEE / Tensorflow_Pix2Pix / model.py View on Github external
self.G_W9 = tf.Variable(tf.truncated_normal([4, 4, self.ch_G9, self.ch_G8], stddev=0.02), name='G_W9')
        self.G_bn9 = batch_norm(name="G_bn9")

        self.G_W10 = tf.Variable(tf.truncated_normal([4, 4, self.ch_G10, self.ch_G9 + self.ch_G7], stddev=0.02), name='G_W10')
        self.G_bn10 = batch_norm(name="G_bn10")

        self.G_W11 = tf.Variable(tf.truncated_normal([4, 4, self.ch_G11, self.ch_G10 + self.ch_G6], stddev=0.02), name='G_W11')
        self.G_bn11 = batch_norm(name="G_bn11")

        self.G_W12 = tf.Variable(tf.truncated_normal([4, 4, self.ch_G12, self.ch_G11 + self.ch_G5], stddev=0.02), name='G_W12')
        self.G_bn12 = batch_norm(name="G_bn12")

        self.G_W13 = tf.Variable(tf.truncated_normal([4, 4, self.ch_G13, self.ch_G12 + self.ch_G4], stddev=0.02), name='G_W13')
        self.G_bn13 = batch_norm(name="G_bn13")

        self.G_W14 = tf.Variable(tf.truncated_normal([4, 4, self.ch_G14, self.ch_G13 + self.ch_G3], stddev=0.02), name='G_W14')
        self.G_bn14 = batch_norm(name="G_bn14")

        self.G_W15 = tf.Variable(tf.truncated_normal([4, 4, self.ch_G15, self.ch_G14 + self.ch_G2], stddev=0.02), name='G_W15')
        self.G_bn15 = batch_norm(name="G_bn15")

        self.G_W16 = tf.Variable(tf.truncated_normal([4, 4, self.ch_G16, self.ch_G15 + self.ch_G1], stddev=0.02), name='G_W16')

        # Discrim
        self.D_W1 = tf.Variable(tf.truncated_normal([4, 4, self.ch_D0, self.ch_D1], stddev=0.02), name='D_W1')
        self.D_bn1 = batch_norm(name="D_bn1")

        self.D_W2 = tf.Variable(tf.truncated_normal([4, 4, self.ch_D1, self.ch_D2], stddev=0.02), name='D_W2')
        self.D_bn2 = batch_norm(name="D_bn2")

        self.D_W3 = tf.Variable(tf.truncated_normal([4, 4, self.ch_D2, self.ch_D3], stddev=0.02), name='D_W3')
        self.D_bn3 = batch_norm(name="D_bn3")
github art-programmer / PlaneNet / code / train_planenet_confidence.py View on Github external
loss = tf.cond(tf.less(training_flag, 2), lambda: loss, lambda: loss_rgbd)
        pass

    
    # train_writer = tf.summary.FileWriter(options.log_dir + '/train')
    # val_writer = tf.summary.FileWriter(options.log_dir + '/val')
    # train_writer_rgbd = tf.summary.FileWriter(options.log_dir + '/train_rgbd')
    # val_writer_rgbd = tf.summary.FileWriter(options.log_dir + '/val_rgbd')
    # writers = [train_writer, val_writer, train_writer_rgbd, val_writer_rgbd]
    
    tf.summary.scalar('loss', loss)
    summary_op = tf.summary.merge_all()

    with tf.variable_scope('statistics'):
        batchno = tf.Variable(0, dtype=tf.int32, trainable=False, name='batchno')
        batchnoinc=batchno.assign(batchno+1)
        pass


    optimizer = tf.train.AdamOptimizer(options.LR)
    train_op = optimizer.minimize(loss, global_step=batchno)

    
    config=tf.ConfigProto()
    #config.gpu_options.allow_growth=True
    config.allow_soft_placement=True
    saver=tf.train.Saver()

    init_op = tf.group(tf.global_variables_initializer(),
                       tf.local_variables_initializer())
github AFAgarap / malware-classification / models / cnn_svm.py View on Github external
def bias_variable(shape):
        """Returns a bias matrix consisting of 0.1 values.

        :param shape: The shape of the bias matrix to create.
        :return: The bias matrix consisting of 0.1 values.
        """
        initial = tf.constant(0.1, shape=shape)
        return tf.Variable(initial)
github leuchine / S-LSTM / sequence_tagging / model / ner_model.py View on Github external
Wii = tf.Variable(tf.random_normal([hidden_size, hidden_size], mean=0.0, stddev=0.1, dtype=tf.float32), dtype=tf.float32, name="Wii")
                Wdi = tf.Variable(tf.random_normal([hidden_size, hidden_size], mean=0.0, stddev=0.1, dtype=tf.float32), dtype=tf.float32, name="Wdi")
            #input gate for output gate
            with tf.name_scope("o_gate"):
                Wxo = tf.Variable(tf.random_normal([hidden_size, hidden_size], mean=0.0, stddev=0.1, dtype=tf.float32), dtype=tf.float32, name="Wxo")
                Who = tf.Variable(tf.random_normal([2*hidden_size, hidden_size], mean=0.0, stddev=0.1, dtype=tf.float32), dtype=tf.float32, name="Who")
                Wio = tf.Variable(tf.random_normal([hidden_size, hidden_size], mean=0.0, stddev=0.1, dtype=tf.float32), dtype=tf.float32, name="Wio")
                Wdo = tf.Variable(tf.random_normal([hidden_size, hidden_size], mean=0.0, stddev=0.1, dtype=tf.float32), dtype=tf.float32, name="Wdo")
            #bias for the gates    
            with tf.name_scope("biases"):
                bi = tf.Variable(tf.random_normal([hidden_size], mean=0.0, stddev=0.1, dtype=tf.float32), dtype=tf.float32, name="bi")
                bo = tf.Variable(tf.random_normal([hidden_size], mean=0.0, stddev=0.1, dtype=tf.float32), dtype=tf.float32, name="bo")
                bf1 = tf.Variable(tf.random_normal([hidden_size], mean=0.0, stddev=0.1, dtype=tf.float32), dtype=tf.float32, name="bf1")
                bf2 = tf.Variable(tf.random_normal([hidden_size], mean=0.0, stddev=0.1, dtype=tf.float32), dtype=tf.float32, name="bf2")
                bf3 = tf.Variable(tf.random_normal([hidden_size], mean=0.0, stddev=0.1, dtype=tf.float32), dtype=tf.float32, name="bf3")
                bf4 = tf.Variable(tf.random_normal([hidden_size], mean=0.0, stddev=0.1, dtype=tf.float32), dtype=tf.float32, name="bf4")

            #dummy node gated attention parameters
            #input gate for dummy state
            with tf.name_scope("gated_d_gate"):
                gated_Wxd = tf.Variable(tf.random_normal([hidden_size, hidden_size], mean=0.0, stddev=0.1, dtype=tf.float32), dtype=tf.float32, name="Wxf")
                gated_Whd = tf.Variable(tf.random_normal([hidden_size, hidden_size], mean=0.0, stddev=0.1, dtype=tf.float32), dtype=tf.float32, name="Whf")
            #output gate
            with tf.name_scope("gated_o_gate"):
                gated_Wxo = tf.Variable(tf.random_normal([hidden_size, hidden_size], mean=0.0, stddev=0.1, dtype=tf.float32), dtype=tf.float32, name="Wxo")
                gated_Who = tf.Variable(tf.random_normal([hidden_size, hidden_size], mean=0.0, stddev=0.1, dtype=tf.float32), dtype=tf.float32, name="Who")
            #forget gate for states of word
            with tf.name_scope("gated_f_gate"):
                gated_Wxf = tf.Variable(tf.random_normal([hidden_size, hidden_size], mean=0.0, stddev=0.1, dtype=tf.float32), dtype=tf.float32, name="Wxo")
                gated_Whf = tf.Variable(tf.random_normal([hidden_size, hidden_size], mean=0.0, stddev=0.1, dtype=tf.float32), dtype=tf.float32, name="Who")
            #biases
            with tf.name_scope("gated_biases"):
github darksigma / Fundamentals-of-Deep-Learning-Book / archive / seq2seq / tmp_seq2seq_model.py View on Github external
the model construction is independent of batch_size, so it can be
        changed after initialization if this is convenient, e.g., for decoding.
      learning_rate: learning rate to start with.
      learning_rate_decay_factor: decay learning rate by this much when needed.
      use_lstm: if true, we use LSTM cells instead of GRU cells.
      num_samples: number of samples for sampled softmax.
      forward_only: if set, we do not construct the backward pass in the model.
    """
    self.source_vocab_size = source_vocab_size
    self.target_vocab_size = target_vocab_size
    self.buckets = buckets
    self.batch_size = batch_size
    self.learning_rate = tf.Variable(float(learning_rate), trainable=False)
    self.learning_rate_decay_op = self.learning_rate.assign(
        self.learning_rate * learning_rate_decay_factor)
    self.global_step = tf.Variable(0, trainable=False)

    # If we use sampled softmax, we need an output projection.
    output_projection = None
    softmax_loss_function = None
    # Sampled softmax only makes sense if we sample less than vocabulary size.
    if num_samples > 0 and num_samples < self.target_vocab_size:
      with tf.device("/cpu:0"):
        w = tf.get_variable("proj_w", [size, self.target_vocab_size])
        w_t = tf.transpose(w)
        b = tf.get_variable("proj_b", [self.target_vocab_size])
      output_projection = (w, b)

      def sampled_loss(inputs, labels):
        with tf.device("/cpu:0"):
          labels = tf.reshape(labels, [-1, 1])
          return tf.nn.sampled_softmax_loss(w_t, b, inputs, labels, num_samples,
github neuroailab / tnn / bypass_unrolled.py View on Github external
def _weights(shape): # weights for convolution
    # shape = [spatial, spatial, num_input_channels, num_output_channels]
    # initialized with truncated normal distribution
    return tf.Variable(tf.truncated_normal(shape, stddev=WEIGHT_STDDEV), name='weights')
def _bias(shape): # bias variable for convolution
github wubinzzu / NeuRec / model / sequential_recommender / Fossil.py View on Github external
def _create_variables(self):
        with tf.name_scope("embedding"):  # The embedding initialization is unknown now
            initializer = tool.get_initializer(self.init_method, self.stddev)
            
            self.c1 = tf.Variable(initializer([self.num_items, self.embedding_size]), name='c1', dtype=tf.float32)
            self.c2 = tf.constant(0.0, tf.float32, [1, self.embedding_size], name='c2')
            self.embedding_P = tf.concat([self.c1, self.c2], 0, name='embedding_P')
            self.embedding_Q = tf.Variable(initializer([self.num_items, self.embedding_size]),
                                           name='embedding_Q', dtype=tf.float32)
            
            self.eta = tf.Variable(initializer([self.num_users, self.high_order]), name='eta')
            self.eta_bias = tf.Variable(initializer([1, self.high_order]), name='eta_bias')
            
            self.bias = tf.Variable(tf.zeros(self.num_items), name='bias')
github dtransposed / Reinforcement-Learning-With-Unity-G.E.A.R / ml-agents / __backup / trainers / bc / models.py View on Github external
def __init__(self, brain, h_size=128, lr=1e-4, n_layers=2, m_size=128,
                 normalize=False, use_recurrent=False, scope='PPO', seed=0):
        with tf.variable_scope(scope):
            LearningModel.__init__(self, m_size, normalize, use_recurrent, brain, seed)
            num_streams = 1
            hidden_streams = self.create_observation_streams(num_streams, h_size, n_layers)
            hidden = hidden_streams[0]
            self.dropout_rate = tf.placeholder(dtype=tf.float32, shape=[], name="dropout_rate")
            hidden_reg = tf.layers.dropout(hidden, self.dropout_rate)
            if self.use_recurrent:
                tf.Variable(self.m_size, name="memory_size", trainable=False, dtype=tf.int32)
                self.memory_in = tf.placeholder(shape=[None, self.m_size], dtype=tf.float32, name='recurrent_in')
                hidden_reg, self.memory_out = self.create_recurrent_encoder(hidden_reg, self.memory_in,
                                                                            self.sequence_length)
                self.memory_out = tf.identity(self.memory_out, name='recurrent_out')

            if brain.vector_action_space_type == "discrete":
                policy_branches = []
                for size in self.act_size:
                    policy_branches.append(
                        tf.layers.dense(
                            hidden,
                            size,
                            activation=None,
                            use_bias=False,
                            kernel_initializer=c_layers.variance_scaling_initializer(factor=0.01)))
                self.action_probs = tf.concat(
github Stanford-STAGES / stanford-stages / inf_convolution.py View on Github external
def batch_norm(x, n_out, av_dims, is_training, scope='bn'):
    """
    Batch normalization on convolutional maps.
    Args:
        x:           Tensor, 4D BHWD input maps
        n_out:       integer, depth of input maps
        phase_train: boolean tf.Varialbe, true indicates training phase
        scope:       string, variable scope
    Return:
        normed:      batch-normalized maps
    """
    with tf.variable_scope(scope):
        beta = tf.Variable(tf.constant(0.0, shape=[n_out]),
                           name='beta', trainable=True)
        gamma = tf.Variable(tf.constant(1.0, shape=[n_out]),
                            name='gamma', trainable=True)
        batch_mean, batch_var = tf.nn.moments(x, av_dims, name='moments')

        ema = tf.train.ExponentialMovingAverage(decay=0.99)

    # phase_train = tf.get_variable('is_training',[],dtype=bool,trainable=False,initializer=tf.constant_initializer(True))
    phase_train = tf.constant(True, dtype=bool, name='is_training')
    if not (is_training):
        phase_train = tf.logical_not(phase_train, name='is_not_training')

    # phase_train = tf.Print(phase_train,[phase_train])
    def mean_var_with_update():
        ema_apply_op = ema.apply([batch_mean, batch_var])
        with tf.control_dependencies([ema_apply_op]):
            return tf.identity(batch_mean), tf.identity(batch_var)