How to use the tflearn.variables.variable function in tflearn

To help you get started, we’ve selected a few tflearn examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github Shen-Lab / DeepAffinity / Joint_models / joint_attention / joint_fixed_RNN / joint-Model.py View on Github external
b = tflearn.variables.variable(name="Attn_b_prot",shape=[protein_MAX_size,comp_MAX_size],initializer=tf.random_normal([protein_MAX_size,comp_MAX_size],stddev=0.1),restore=False)
V = tf.tensordot(prot_gru_2,W,axes=[[2],[0]])
for i in range(batch_size):
   temp = tf.expand_dims(tf.tanh(tf.tensordot(tflearn.reshape(tf.slice(V,[i,0,0],[1,protein_MAX_size,GRU_size_drug]),[protein_MAX_size,GRU_size_drug]),tflearn.reshape(tf.slice(drug_gru_2,[i,0,0],[1,comp_MAX_size,GRU_size_drug]),[comp_MAX_size,GRU_size_drug]),axes=[[1],[1]])+b),0)
   if i==0:
     VU = temp
   else:
     VU = merge([VU,temp],mode='concat',axis=0)

VU = tflearn.reshape(VU,[-1,comp_MAX_size*protein_MAX_size])
alphas_pair = tf.nn.softmax(VU,name='alphas')
alphas_pair = tflearn.reshape(alphas_pair,[-1,protein_MAX_size,comp_MAX_size])

U_size = 256
U_prot = tflearn.variables.variable(name="Attn_U_prot",shape=[U_size,GRU_size_prot],initializer=tf.random_normal([U_size,GRU_size_prot],stddev=0.1),restore=False)
U_drug = tflearn.variables.variable(name="Attn_U_drug",shape=[U_size,GRU_size_drug],initializer=tf.random_normal([U_size,GRU_size_drug],stddev=0.1),restore=False)
B = tflearn.variables.variable(name="Attn_B",shape=[U_size],initializer=tf.random_normal([U_size],stddev=0.1),restore=False)

prod_drug = tf.tensordot(drug_gru_2,U_drug,axes=[[2],[1]])
prod_prot = tf.tensordot(prot_gru_2,U_prot,axes=[[2],[1]])

Attn = tflearn.variables.variable(name="Attn",shape=[batch_size,U_size],initializer=tf.zeros([batch_size,U_size]),restore=False)
for i in range(comp_MAX_size):
        temp = tf.expand_dims(tflearn.reshape(tf.slice(prod_drug,[0,i,0],[batch_size,1,U_size]),[batch_size,U_size]),axis=1) + prod_prot + B
        Attn = Attn + tf.reduce_sum(tf.multiply(tf.tile(tflearn.reshape(tf.slice(alphas_pair,[0,0,i],[batch_size,protein_MAX_size,1]),[batch_size,protein_MAX_size,1]),[1,1,U_size]),temp),axis=1)


Attn_reshape = tflearn.reshape(Attn, [-1, U_size,1])
conv_1 = conv_1d(Attn_reshape, 64, 4,2, activation='leakyrelu', weights_init="xavier",regularizer="L2",name='conv1')
pool_1 = max_pool_1d(conv_1, 4,name='pool1')
#conv_2 = conv_1d(pool_1, 64, 4,2, activation='leakyrelu', weights_init="xavier",regularizer="L2",name='conv2')
#pool_2 = max_pool_1d(conv_2, 4,name='pool2')
github TensorMSA / tensormsa_old / tflearn / layers / normalization.py View on Github external
Batch Normalization: Accelerating Deep Network Training by Reducing
        Internal Covariate Shif. Sergey Ioffe, Christian Szegedy. 2015.

    Links:
        [http://arxiv.org/pdf/1502.03167v3.pdf](http://arxiv.org/pdf/1502.03167v3.pdf)

    """

    input_shape = utils.get_incoming_shape(incoming)
    input_ndim = len(input_shape)

    gamma_init = tf.random_normal_initializer(mean=gamma, stddev=stddev)

    with tf.variable_op_scope([incoming], scope, name, reuse=reuse) as scope:
        name = scope.name
        beta = vs.variable('beta', shape=[input_shape[-1]],
                           initializer=tf.constant_initializer(beta),
                           trainable=trainable, restore=restore)
        gamma = vs.variable('gamma', shape=[input_shape[-1]],
                            initializer=gamma_init, trainable=trainable,
                            restore=restore)
        # Track per layer variables
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, beta)
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, gamma)
        if not restore:
            tf.add_to_collection(tf.GraphKeys.EXCL_RESTORE_VARS, beta)
            tf.add_to_collection(tf.GraphKeys.EXCL_RESTORE_VARS, gamma)

        axis = list(range(input_ndim - 1))
        moving_mean = vs.variable('moving_mean',
                                  input_shape[-1:],
                                  initializer=tf.zeros_initializer,
github tflearn / tflearn / tflearn / layers / normalization.py View on Github external
[http://arxiv.org/pdf/1502.03167v3.pdf](http://arxiv.org/pdf/1502.03167v3.pdf)

    """

    input_shape = utils.get_incoming_shape(incoming)
    input_ndim = len(input_shape)

    gamma_init = tf.random_normal_initializer(mean=gamma, stddev=stddev)

    with tf.variable_scope(scope, default_name=name, values=[incoming],
                           reuse=reuse) as scope:
        name = scope.name
        beta = vs.variable('beta', shape=[input_shape[-1]],
                           initializer=tf.constant_initializer(beta),
                           trainable=trainable, restore=restore)
        gamma = vs.variable('gamma', shape=[input_shape[-1]],
                            initializer=gamma_init, trainable=trainable,
                            restore=restore)
        # Track per layer variables
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, beta)
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, gamma)
        if not restore:
            tf.add_to_collection(tf.GraphKeys.EXCL_RESTORE_VARS, beta)
            tf.add_to_collection(tf.GraphKeys.EXCL_RESTORE_VARS, gamma)

        axis = list(range(input_ndim - 1))

        moving_mean = vs.variable('moving_mean', input_shape[-1:],
                                  initializer=tf.zeros_initializer(),
                                  trainable=False, restore=restore)
        moving_variance = vs.variable('moving_variance',
                                      input_shape[-1:],
github TensorMSA / tensormsa_old / tflearn / layers / conv.py View on Github external
W_init = initializations.get(weights_init)()
        W_regul = None
        if regularizer:
            W_regul = lambda x: losses.get(regularizer)(x, weight_decay)
        W = vs.variable('W', shape=filter_size, regularizer=W_regul,
                        initializer=W_init, trainable=trainable,
                        restore=restore)

        # Track per layer variables
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, W)

        b = None
        if bias:
            if isinstance(bias_init, str):
                bias_init = initializations.get(bias_init)()
            b = vs.variable('b', shape=nb_filter, initializer=bias_init,
                            trainable=trainable, restore=restore)
            # Track per layer variables
            tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, b)

        inference = tf.nn.conv2d(incoming, W, strides, padding)
        if b: inference = tf.nn.bias_add(inference, b)

        if isinstance(activation, str):
            inference = activations.get(activation)(inference)
        elif hasattr(activation, '__call__'):
            inference = activation(inference)
        else:
            raise ValueError("Invalid Activation.")

        # Track activations.
        tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, inference)
github TensorMSA / tensormsa_old / tflearn / layers / conv.py View on Github external
filter_size = utils.autoformat_filter_conv2d(filter_size,
                                                 input_shape[-1],
                                                 nb_filter)
    strides = utils.autoformat_kernel_2d(strides)
    padding = utils.autoformat_padding(padding)

    with tf.variable_op_scope([incoming], scope, name, reuse=reuse) as scope:
        name = scope.name

        W_init = weights_init
        if isinstance(weights_init, str):
            W_init = initializations.get(weights_init)()
        W_regul = None
        if regularizer:
            W_regul = lambda x: losses.get(regularizer)(x, weight_decay)
        W = vs.variable('W', shape=filter_size, regularizer=W_regul,
                        initializer=W_init, trainable=trainable,
                        restore=restore)

        # Track per layer variables
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, W)

        b = None
        if bias:
            if isinstance(bias_init, str):
                bias_init = initializations.get(bias_init)()
            b = vs.variable('b', shape=nb_filter, initializer=bias_init,
                            trainable=trainable, restore=restore)
            # Track per layer variables
            tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, b)

        inference = tf.nn.conv2d(incoming, W, strides, padding)
github TensorMSA / tensormsa_old / tflearn / layers / core.py View on Github external
restore=restore)
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, W)

        if isinstance(bias_init, str):
            bias_init = initializations.get(bias_init)()
        b = va.variable('b', shape=[n_units], initializer=bias_init,
                        trainable=trainable, restore=restore)
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, b)

        # Weight and bias for the transform gate
        W_T = va.variable('W_T', shape=[n_inputs, n_units],
                          regularizer=None, initializer=W_init,
                          trainable=trainable, restore=restore)
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, W_T)

        b_T = va.variable('b_T', shape=[n_units],
                          initializer=tf.constant_initializer(-1),
                          trainable=trainable, restore=restore)
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, b_T)

        # If input is not 2d, flatten it.
        if len(input_shape) > 2:
            incoming = tf.reshape(incoming, [-1, n_inputs])

        if isinstance(activation, str):
            activation = activations.get(activation)
        elif hasattr(activation, '__call__'):
            activation = activation
        else:
            raise ValueError("Invalid Activation.")

        H = activation(tf.matmul(incoming, W) + b)
github tflearn / tflearn / tflearn / layers / conv.py View on Github external
W_regul = lambda x: regularizers.get(regularizer)(x, weight_decay)
        W = vs.variable('W', shape=filter_size, regularizer=W_regul,
                        initializer=W_init, trainable=trainable,
                        restore=restore)

        # Track per layer variables
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, W)

        b = None
        if bias:
            b_shape = [nb_filter]
            if isinstance(bias_init, str):
                bias_init = initializations.get(bias_init)()
            elif type(bias_init) in [tf.Tensor, np.ndarray, list]:
                b_shape = None
            b = vs.variable('b', shape=b_shape, initializer=bias_init,
                            trainable=trainable, restore=restore)
            # Track per layer variables
            tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, b)

        inference = tf.nn.conv2d(incoming, W, strides, padding)
        if b is not None: inference = tf.nn.bias_add(inference, b)

        if activation:
            if isinstance(activation, str):
                inference = activations.get(activation)(inference)
            elif hasattr(activation, '__call__'):
                inference = activation(inference)
            else:
                raise ValueError("Invalid Activation.")

        # Track activations.
github tflearn / tflearn / tflearn / layers / normalization.py View on Github external
beta = vs.variable('beta', shape=[input_shape[-1]],
                           initializer=tf.constant_initializer(beta),
                           trainable=trainable, restore=restore)
        gamma = vs.variable('gamma', shape=[input_shape[-1]],
                            initializer=gamma_init, trainable=trainable,
                            restore=restore)
        # Track per layer variables
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, beta)
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, gamma)
        if not restore:
            tf.add_to_collection(tf.GraphKeys.EXCL_RESTORE_VARS, beta)
            tf.add_to_collection(tf.GraphKeys.EXCL_RESTORE_VARS, gamma)

        axis = list(range(input_ndim - 1))

        moving_mean = vs.variable('moving_mean', input_shape[-1:],
                                  initializer=tf.zeros_initializer(),
                                  trainable=False, restore=restore)
        moving_variance = vs.variable('moving_variance',
                                      input_shape[-1:],
                                      initializer=tf.constant_initializer(1.),
                                      trainable=False,
                                      restore=restore)

        # Define a function to update mean and variance
        def update_mean_var():
            mean, variance = tf.nn.moments(incoming, axis)

            update_moving_mean = moving_averages.assign_moving_average(
                moving_mean, mean, decay, zero_debias=False)
            update_moving_variance = moving_averages.assign_moving_average(
                moving_variance, variance, decay, zero_debias=False)
github tflearn / tflearn / tflearn / layers / conv.py View on Github external
strides = utils.autoformat_kernel_2d(strides)
    padding = utils.autoformat_padding(padding)

    with tf.variable_scope(scope, default_name=name, values=[incoming],
                           reuse=reuse) as scope:
        name = scope.name

        W_init = weights_init
        if isinstance(weights_init, str):
            W_init = initializations.get(weights_init)()
        elif type(W_init) in [tf.Tensor, np.ndarray, list]:
            filter_size = None
        W_regul = None
        if regularizer is not None:
            W_regul = lambda x: regularizers.get(regularizer)(x, weight_decay)
        W = vs.variable('W', shape=filter_size,
                        regularizer=W_regul, initializer=W_init,
                        trainable=trainable, restore=restore)
        # Track per layer variables
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, W)

        b = None
        if bias:
            b_shape = [nb_filter]
            if isinstance(bias_init, str):
                bias_init = initializations.get(bias_init)()
            elif type(bias_init) in [tf.Tensor, np.ndarray, list]:
                b_shape = None
            b = vs.variable('b', shape=b_shape, initializer=bias_init,
                            trainable=trainable, restore=restore)
            # Track per layer variables
            tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, b)
github tflearn / tflearn / tflearn / layers / conv.py View on Github external
tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, W)

        bias_init = initializations.get(bias_init)()
        b = vs.variable('b', shape=nb_filter, initializer=bias_init,
                        trainable=trainable, restore=restore)
        # Track per layer variables
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, b)

        # Weight and bias for the transform gate
        W_T = vs.variable('W_T', shape=nb_filter,
                          regularizer=None, initializer=W_init,
                          trainable=trainable, restore=restore)
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' +
                             name, W_T)

        b_T = vs.variable('b_T', shape=nb_filter,
                          initializer=tf.constant_initializer(-3),
                          trainable=trainable, restore=restore)
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' +
                             name, b_T)

        if isinstance(activation, str):
            activation = activations.get(activation)
        elif hasattr(activation, '__call__'):
            activation = activation
        else:
            raise ValueError("Invalid Activation.")

        # Shared convolution for gating
        convolved = tf.nn.conv2d(incoming, W, strides, padding)
        H = activation(convolved + b)
        T = tf.sigmoid(tf.multiply(convolved, W_T) + b_T)