How to use tensorflow - 10 common examples

To help you get started, we’ve selected a few tensorflow examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github sharpstill / AU_R-CNN / test_feature / RAM_tf / ram.py View on Github external
r = int(minRadius * (2 ** (i)))  # current radius

            d_raw = 2 * r
            d = tf.constant(d_raw, shape=[1])
            d = tf.tile(d, [2])  # replicate d to 2 times in dimention 1, just used as slice
            loc_k = loc[k,:] # k is bach index
            # each image is first resize to biggest radius img: one_img2, then offset + loc_k - r is the adjust location
            adjusted_loc = offset + loc_k - r  # 2 * max_radius + loc_k - current_radius
            one_img2 = tf.reshape(one_img, (one_img.get_shape()[0].value, one_img.get_shape()[1].value))

            # crop image to (d x d)
            zoom = tf.slice(one_img2, adjusted_loc, d) # slice start from adjusted_loc

            # resize cropped image to (sensorBandwidth x sensorBandwidth)
            # note that sensorBandwidth is side length for the smallest zoom (finest granularity)
            zoom = tf.image.resize_bilinear(tf.reshape(zoom, (1, d_raw, d_raw, 1)), (sensorBandwidth, sensorBandwidth))
            zoom = tf.reshape(zoom, (sensorBandwidth, sensorBandwidth))
            imgZooms.append(zoom)

        zooms.append(tf.stack(imgZooms))

    zooms = tf.stack(zooms)

    glimpse_images.append(zooms)

    return zooms
github kellywzhang / reading-comprehension / testing / testing_reader2.py View on Github external
vocab_size = 50000
embedding_dim = 8
batch_size = 2
state_size = 11
input_size = 8

# Starting interactive Session
sess = tf.InteractiveSession()

# Placeholders
# can add assert statements to ensure shared None dimensions are equal (batch_size)
seq_lens_d = tf.placeholder(tf.int32, [None, ], name="seq_lens_d")
seq_lens_q = tf.placeholder(tf.int32, [None, ], name="seq_lens_q")
input_d = tf.placeholder(tf.int32, [None, None], name="input_d")
input_q = tf.placeholder(tf.int32, [None, None], name="input_q")
input_a = tf.placeholder(tf.int32, [None, ], name="input_a")
input_m = tf.placeholder(tf.int32, [None, ], name="input_m")
n_steps = tf.placeholder(tf.int32)

# toy feed dict
feed = {
    n_steps: 5,
    seq_lens_d: [5,4],
    seq_lens_q: [2,3],
    input_d: [[20,30,40,50,60],[2,3,4,5,0]], # document
    input_q: [[2,3,0],[1,2,3]],              # query
    input_a: [1,0],                          # answer
    input_m: [2,3],                           # number of entities
}

mask_d = tf.cast(tf.sequence_mask(seq_lens_d), tf.int32)
mask_q = tf.cast(tf.sequence_mask(seq_lens_q), tf.int32)
github peace195 / multitask-learning-protein-prediction / multitask-learning / multitask-8states / lstm_test_ss_only.py View on Github external
plt.text(j, i, format(cm[i, j], fmt),
             horizontalalignment="center",
             color="white" if cm[i, j] > thresh else "black")

  plt.tight_layout()
  plt.ylabel('True label')
  plt.xlabel('Predicted label')

# Modeling
graph = tf.Graph()
with graph.as_default():
  tf_X = tf.placeholder(tf.int64, shape=[None, seq_max_len])
  tf_y = tf.placeholder(tf.int64, shape=[None, seq_max_len])
  tf_rel_label = tf.placeholder(tf.int64, shape=[None, seq_max_len])
  tf_b_label = tf.placeholder(tf.int64, shape=[None, seq_max_len])
  tf_word_embeddings = tf.placeholder(tf.float32, shape=[vocabulary_size, embedding_size])
  tf_X_binary_mask = tf.placeholder(tf.float32, shape=[None, seq_max_len])
  tf_weight_mask = tf.placeholder(tf.float32, shape=[None, seq_max_len])
  tf_weight_mask_ss = tf.placeholder(tf.float32, shape=[None, seq_max_len])
  tf_seq_len = tf.placeholder(tf.int64, shape=[None, ])
  keep_prob = tf.placeholder(tf.float32)
  
  ln_w = tf.Variable(tf.truncated_normal([embedding_size, nb_linear_inside], stddev=1.0 / math.sqrt(embedding_size)))
  ln_b = tf.Variable(tf.zeros([nb_linear_inside]))
  sent_w = tf.Variable(tf.truncated_normal([nb_lstm_inside, 8],
                       stddev=1.0 / math.sqrt(2 * nb_lstm_inside)))
  sent_b = tf.Variable(tf.zeros([8]))

  rel_w = tf.Variable(tf.truncated_normal([nb_lstm_inside, nb_label],
                       stddev=1.0 / math.sqrt(2 * nb_lstm_inside)))
  rel_b = tf.Variable(tf.zeros([nb_label]))
github TianzhongSong / Tensorflow-quantization-test / utils / layers.py View on Github external
def denselayer(x, w, b, weight_scale=0., activation=''):
    x, sx = quantize(x)
    x = tf.cast(x, dtype=tf.float32)
    x = tf.matmul(x, w)
    s = sx * weight_scale
    x = x * s
    x = tf.add(x, b)
    if activation == "relu":
        x = tf.nn.relu(x)
    return x
github rlworkgroup / garage / tests / benchmarks / garage / tf / policies / test_benchmark_continuous_mlp_policy.py View on Github external
size_in_transitions=params['replay_buffer_size'],
            time_horizon=params['n_rollout_steps'])

        ddpg = DDPG(env_spec=env.spec,
                    policy=policy,
                    qf=qf,
                    replay_buffer=replay_buffer,
                    steps_per_epoch=params['steps_per_epoch'],
                    policy_lr=params['policy_lr'],
                    qf_lr=params['qf_lr'],
                    target_update_tau=params['tau'],
                    n_train_steps=params['n_train_steps'],
                    discount=params['discount'],
                    min_buffer_size=int(1e4),
                    exploration_strategy=action_noise,
                    policy_optimizer=tf.train.AdamOptimizer,
                    qf_optimizer=tf.train.AdamOptimizer)

        # Set up logger since we are not using run_experiment
        tabular_log_file = osp.join(log_dir, 'progress.csv')
        dowel_logger.add_output(dowel.StdOutput())
        dowel_logger.add_output(dowel.CsvOutput(tabular_log_file))
        dowel_logger.add_output(dowel.TensorBoardOutput(log_dir))

        runner.setup(ddpg, env, sampler_args=dict(n_envs=12))
        runner.train(n_epochs=params['n_epochs'],
                     batch_size=params['n_rollout_steps'])

        dowel_logger.remove_all()

        return tabular_log_file
github ddddwee1 / Robomaster_NTU_2018 / parctice / old_files / Armour_plate_detection_training / Robot_detection_v2 / veri_original_image / testing / graph.py View on Github external
def build_graph(test=False):
	with tf.name_scope('imgholder'): # The placeholder is just a holder and doesn't contains the actual data.
		imgholder = tf.placeholder(tf.float32,[None,256,256,3]) # The 3 is color channels
	with tf.name_scope('bias_holder'):
		bias_holder = tf.placeholder(tf.float32,[None,16,16,4]) # The bias (x,y,w,h) for 16*16 feature maps.
	with tf.name_scope('conf_holder'):
		conf_holder = tf.placeholder(tf.float32,[None,16,16,1]) # The confidence about 16*16 feature maps.
	with tf.name_scope('croppedholder'):
		croppedholder = tf.placeholder(tf.float32,[None,32,32,3]) # 256 is the number of feature maps
	with tf.name_scope('veri_conf_holder2'):
		veri_conf_holder = tf.placeholder(tf.float32, [None,1])
#	with tf.name_scope('veri_bias_holder'):
#		veri_bias_holder = tf.placeholder(tf.float32, [None,4]) # The veri output numbers,x,y,w,h

	with tf.name_scope('mask'):
		maskholder = tf.placeholder(tf.float32,[None,16,16,1])

	conf, bias,feature_map = RPN(imgholder,test)
	veri_conf = verify_net(croppedholder,test)
	
	bias_loss = tf.reduce_sum(tf.reduce_mean(tf.square(bias*conf_holder - bias_holder),axis=0))
	conf_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=conf,labels=conf_holder))

#	veri_bias_loss = tf.reduce_sum(tf.reduce_mean(tf.square(veri_bias*veri_conf_holder - veri_bias_holder),axis=0))
github kmkolasinski / deep-learning-notes / seminars / normalizing-flows / test_flow_layers.py View on Github external
def try_to_train_identity_layer(
        self,
        layer: fl.FlowLayer,
        flow: fl.FlowData,
        feed_dict_fn: Optional[Callable[[], Dict[tf.Tensor, np.ndarray]]] = None,
        sess: Optional[tf.Session] = None,
        post_init_fn: Optional[Callable[[tf.Session], None]] = None,
    ):
        x, logdet, z = flow
        new_flow = layer(flow, forward=True, is_training=True)
        x_rec, logdet_rec, z_rec = new_flow
        loss = tf.losses.mean_squared_error(x, x_rec)
        opt = tf.train.MomentumOptimizer(0.1, 0.9)
        opt_op = opt.minimize(loss)

        sess = tf.Session() if sess is None else sess
        sess.run(tf.global_variables_initializer())
        if post_init_fn is not None:
            post_init_fn(sess)
        losses = []
        for i in range(50):
            if feed_dict_fn is not None:
                feed_dict = feed_dict_fn()
            else:
                feed_dict = None
            loss_np, _ = sess.run([loss, opt_op], feed_dict=feed_dict)
            losses.append(loss_np)

        self.assertGreater(losses[0], losses[-1])
github Simon4Yan / Learning-via-Translation / SPGAN / test_spgan.py View on Github external
""" param """
parser = argparse.ArgumentParser(description='')
parser.add_argument('--dataset', dest='dataset', default='market2duke', help='which dataset to use')
parser.add_argument('--crop_size', dest='crop_size', type=int, default=256, help='then crop to this size')
args = parser.parse_args()

dataset = args.dataset
crop_size = args.crop_size


""" run """
with tf.Session() as sess:
    a_real = tf.placeholder(tf.float32, shape=[None, crop_size, crop_size, 3])
    b_real = tf.placeholder(tf.float32, shape=[None, crop_size, crop_size, 3])

    a2b = models.generator(a_real, 'a2b')
    b2a = models.generator(b_real, 'b2a')
    b2a2b = models.generator(b2a, 'a2b', reuse=True)
    a2b2a = models.generator(a2b, 'b2a', reuse=True)

    #--retore--#
    saver = tf.train.Saver()
    ckpt_path = utils.load_checkpoint('./checkpoints/' + dataset + '_spgan', sess, saver)
    saver.restore(sess, ckpt_path)

    if ckpt_path is None:
        raise Exception('No checkpoint!')
    else:
        print('Copy variables from % s' % ckpt_path)
github peace195 / multitask-learning-protein-prediction / multitask-learning / multitask-3states / lstm.py View on Github external
structure_ss = tf.multiply(structure_ss, tf.expand_dims(tf_X_binary_mask, 2))

  structure_rel = tf.split(axis=0, num_or_size_splits=seq_max_len, value=structure_rel)
  # Change back dimension to [batch_size, n_step, n_input]
  structure_rel = tf.stack(structure_rel)
  structure_rel = tf.transpose(structure_rel, [1, 0, 2])
  structure_rel = tf.multiply(structure_rel, tf.expand_dims(tf_X_binary_mask, 2))

  structure_b = tf.split(axis=0, num_or_size_splits=seq_max_len, value=structure_b)
  # Change back dimension to [batch_size, n_step, n_input]
  structure_b = tf.stack(structure_b)
  structure_b = tf.transpose(structure_b, [1, 0, 2])
  structure_b = tf.multiply(structure_b, tf.expand_dims(tf_X_binary_mask, 2))

  cross_entropy_ss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=structure_ss, labels=y_labels))
  cross_entropy_rel = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=structure_rel, labels=rel_label))
  cross_entropy_b = tf.reduce_mean(tf.multiply(tf.nn.softmax_cross_entropy_with_logits(logits=structure_b, labels=b_label), tf_weight_mask))

  regularization = WEIGHT_DECAY * sum(tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables() if not ("noreg" in tf_var.name or "Bias" in tf_var.name))

  prediction_ss = tf.argmax(tf.nn.softmax(structure_ss), 2)
  correct_prediction_ss = tf.reduce_sum(tf.multiply(tf.cast(tf.equal(prediction_ss, tf_y), tf.float32), tf_X_binary_mask))

  prediction_rel = tf.argmax(tf.nn.softmax(structure_rel), 2)
  correct_prediction_rel = tf.reduce_sum(tf.multiply(tf.cast(tf.equal(prediction_rel, tf_rel_label), tf.float32), tf_X_binary_mask))

  prediction_b = tf.argmax(tf.nn.softmax(structure_b), 2)
  correct_prediction_b = tf.reduce_sum(tf.multiply(tf.cast(tf.equal(prediction_b, tf_b_label), tf.float32), tf_X_binary_mask))

  optimizer = tf.train.AdamOptimizer(LEARNING_RATE).minimize(cross_entropy_ss + cross_entropy_rel + cross_entropy_b + regularization)
  saver = tf.train.Saver()
github BII-wushuang / Lie-Group-Motion-Prediction / src / models.py View on Github external
else:
        seq_length_out = config.test_output_window

    # Inputs
    enc_in = tf.transpose(input, [1, 0, 2])
    dec_in = tf.transpose(dec_in, [1, 0, 2])

    enc_in = tf.reshape(enc_in, [-1, config.input_size])
    dec_in = tf.reshape(dec_in, [-1, config.input_size])

    enc_in = tf.split(enc_in, seq_length_in-1, axis=0)
    dec_in = tf.split(dec_in, seq_length_out, axis=0)

    if config.model == 'ERD':
        # Encoder
        fc = [tf.layers.dense(enc_in[i], 500,activation= tf.nn.relu,reuse=tf.AUTO_REUSE, name="fc") for i in range(config.input_window_size-1)]
        config.hidden_size = 1000
        hidden_size = [config.hidden_size, config.hidden_size]
        number_of_layers = len(hidden_size)

        def lstm_cell(size):
            cell = tf.contrib.rnn.LSTMCell(size)
            cell = tf.nn.rnn_cell.DropoutWrapper(cell, output_keep_prob=config.keep_prob)
            return cell

        enc_cells = [lstm_cell(hidden_size[i]) for i in range(number_of_layers)]
        enc_cell = tf.contrib.rnn.MultiRNNCell(enc_cells)
        output, final_state = tf.contrib.rnn.static_rnn(enc_cell, fc, dtype=tf.float32)
        enc_state = [(final_state[i][0], final_state[i][1]) for i in range(number_of_layers)]

        # Decoder
        dec_cell = [tf.nn.rnn_cell.LSTMCell(hidden_size[i]) for i in range(number_of_layers)]