How to use the tensorflow.Session function in tensorflow

To help you get started, we’ve selected a few tensorflow examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github kmkolasinski / deep-learning-notes / seminars / normalizing-flows / test_flow_layers.py View on Github external
def try_to_train_identity_layer(
        self,
        layer: fl.FlowLayer,
        flow: fl.FlowData,
        feed_dict_fn: Optional[Callable[[], Dict[tf.Tensor, np.ndarray]]] = None,
        sess: Optional[tf.Session] = None,
        post_init_fn: Optional[Callable[[tf.Session], None]] = None,
    ):
        x, logdet, z = flow
        new_flow = layer(flow, forward=True, is_training=True)
        x_rec, logdet_rec, z_rec = new_flow
        loss = tf.losses.mean_squared_error(x, x_rec)
        opt = tf.train.MomentumOptimizer(0.1, 0.9)
        opt_op = opt.minimize(loss)

        sess = tf.Session() if sess is None else sess
        sess.run(tf.global_variables_initializer())
        if post_init_fn is not None:
            post_init_fn(sess)
        losses = []
        for i in range(50):
            if feed_dict_fn is not None:
                feed_dict = feed_dict_fn()
            else:
                feed_dict = None
            loss_np, _ = sess.run([loss, opt_op], feed_dict=feed_dict)
            losses.append(loss_np)

        self.assertGreater(losses[0], losses[-1])
github zergylord / oneshot / extended_match_net.py View on Github external
loss = tf.reduce_mean(-correct_prob,0)
tf.scalar_summary('loss',loss)
optim = tf.train.GradientDescentOptimizer(learning_rate)
#optim = tf.train.AdamOptimizer(learning_rate)
grads = optim.compute_gradients(loss)
grad_summaries = [tf.histogram_summary(v.name,g) if g is not None else '' for g,v in grads]
train_step = optim.apply_gradients(grads)
print('created train step!')



'''
    End of the construction of the computational graph. The remaining code runs training steps.
'''

sess = tf.Session()
merged = tf.merge_all_summaries()
writer = tf.train.SummaryWriter(FLAGS.summary_dir,sess.graph)
sess.run(tf.initialize_all_variables())
print('running now!')
for i in range(int(1e7)):
    mb_x_i,mb_y_i,mb_x_hat,mb_y_hat = get_minibatch()
    feed_dict = {x_hat: mb_x_hat,
                y_hat_ind: mb_y_hat,
                x_i: mb_x_i,
                y_i_ind: mb_y_i}
    _,mb_loss,summary,ans = sess.run([train_step,loss,merged,cos_sim],feed_dict=feed_dict)
    if i % int(1e1) == 0:
        print(i,'loss: ',mb_loss,'time: ',time.time()-cur_time)
        cur_time = time.time()
        run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
        run_metadata = tf.RunMetadata()
github simoroma / RecoverGANlatentVector / recover_latent_vector.py View on Github external
print('Model downloaded.')
# Define the optimization problem
fzp = generator(zp)
loss = tf.losses.mean_squared_error(labels=fz, predictions=fzp)

# Decayed gradient descent
global_step = tf.Variable(0, trainable=False)
starter_learning_rate = 0.99
learning_rate = tf.train.exponential_decay(starter_learning_rate,
                                           global_step,
                                           10000, 0.005)
opt = tf.train.GradientDescentOptimizer(learning_rate)
# Optimize on the variable zp
train = opt.minimize(loss, var_list=zp, global_step=global_step)

sess = tf.Session()
sess.run(tf.global_variables_initializer())
for i in range(200): # Use more iterations (10000)
  # If we know the original latent vector, we can also compute
  # how far the recovered vector is from it
  _, loss_value, zp_val, eta = sess.run((train, loss, zp, learning_rate))
  z_loss = np.sqrt(np.sum(np.square(zp_val - start_zp))/len(zp_val[0]))
  print("%03d) eta=%03f, loss = %f, z_loss = %f" % (i, eta, loss_value, z_loss))
# Save the recovered latent vector
zp_val = sess.run(zp)
np.save(folder + "zp_rec", zp_val)

# Print out the corresponding image out of the recovered
# latent vector
imgs = sess.run(generator(zp))
imgs = (imgs * 255).astype(np.uint8)
Image.fromarray(imgs[0]).save(folder + "foo_rec.png")
github jxwufan / AssociativeRetrieval / LSTM_model.py View on Github external
def load_validation(self):
    data_reader = utils.DataReader(data_filename="input_seqs_validation", batch_size=16)
    inputs_seqs_batch, outputs_batch = data_reader.read(False, 1)
    init_op = tf.group(tf.initialize_all_variables(),
                       tf.initialize_local_variables())

    sess = tf.Session()
    sess.run(init_op)
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    self.validation_inputs = []
    self.validation_targets = []
    try:
      while not coord.should_stop():
        input_data, targets = sess.run([inputs_seqs_batch, outputs_batch])
        self.validation_inputs.append(input_data)
        self.validation_targets.append(targets)
    except tf.errors.OutOfRangeError:
      pass
    finally:
      coord.request_stop()
    coord.join(threads)
    sess.close()
github WolfNiu / AdversarialDialogue / src / main.py View on Github external
def main(start_epoch):
    (model, graph, saver_seq2seq) = build_model()
    config = gpu_config()

    with tf.Session(graph=graph, config=config) as sess:
        sess.run(tf.global_variables_initializer())
        print("Initialized.")

        restore_ckpt = None
        if start_epoch > -1:
            if force_restore_point != "":
                restore_ckpt = force_restore_point
            else:
                restore_ckpt = f"{ckpt_path}/{model_extra_str}_{start_epoch}"

        if restore_ckpt is not None:
            saver_seq2seq.restore(sess, restore_ckpt)
            print("Restored from", restore_ckpt)

        for i in xrange(num_epochs):
            if not infer_only: # for getting perplexity of test data, use train branch
github jonasrothfuss / DeepEpisodicMemory / train_model_multi_gpu.py View on Github external
def start_session(self):
    """Starts a session and initializes all variables. Provides access to session and coordinator"""
    # Start Session and initialize variables
    self.status = True

    init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())

    self.sess = tf.Session()
    self.sess.run(init_op)

    # Start input enqueue threads
    self.coord = tf.train.Coordinator()
    self.threads = tf.train.start_queue_runners(sess=self.sess, coord=self.coord)
github odlgroup / odl / examples / contrib / tensorflow / tensorflow_tomography_cnn_gradient.py View on Github external
def conv2d(x, W):
    return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')


def var(x):
    return tf.Variable(tf.constant(x, dtype='float32'))


def create_variable(name, shape, stddev=0.01):
    variable = tf.Variable(tf.truncated_normal(shape, stddev=stddev), name=name)
    return variable


with tf.Session() as sess:

    # Create ODL data structures
    size = 128
    space = odl.uniform_discr([-64, -64], [64, 64], [size, size],
                              dtype='float32')
    geometry = odl.tomo.parallel_beam_geometry(space)
    ray_transform = odl.tomo.RayTransform(space, geometry)


    # Create tensorflow layer from odl operator
    odl_op_layer = odl.as_tensorflow_layer(ray_transform,
                                           'RayTransform')
    odl_op_layer_adjoint = odl.as_tensorflow_layer(ray_transform.adjoint,
                                                   'RayTransformAdjoint')

    n_data = 50
github yingtaoHuo / wakeUp / wakeUp.py View on Github external
pygame.mixer.music.play()
	time.sleep(1)
	pygame.mixer.music.stop()

#构建网络
x=tf.placeholder(tf.float32,[None,1640])
y=tf.placeholder(tf.float32,[None,3])
L1=add_layer(x,1640,128,activation_function=tf.nn.tanh)
L2=add_layer(L1,128,32,activation_function=tf.nn.relu)
prediction=add_layer(L2,32,3,activation_function=None)
zuizhong=tf.nn.softmax(prediction)
saver=tf.train.Saver()
#主程序从此开始
record()
zhuanhua()
with tf.Session() as sess:
	sess.run(tf.global_variables_initializer())
	saver.restore(sess,"2")
	huan=[]
	for root,dirs,files in os.walk(txt_files):
		for OneFileName in files:
			f=np.loadtxt(txt_files+OneFileName)
			x_data=f
			y_prediction=sess.run(zuizhong,feed_dict={x:x_data})
			confidence=[]
			confidence=zhixin(x_data,y_prediction)
			huanxing=panduan(confidence,0.331,10)
			huan.append(huanxing)
	for i in range(len(huan)):
		if(huan[i]==1):
			wake_up_success()
		else:
github cagladbahadir / LOUPE / scripts / train_ipmi.py View on Github external
models_dir = '../models/ipmi_test/' # change this to a location to save models
nb_epochs_train = 60
batch_size = 32


###############################################################################
# GPU
###############################################################################

# gpu handling
gpu = '/gpu:' + str(gpu_id)
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
set_session(tf.Session(config=config))


###############################################################################
# Data - FASHION_MNIST for demo, replace with your favorite dataset
###############################################################################

from keras.datasets import fashion_mnist
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
xdata = np.pad(x_train, ((0,0), (2,2), (2,2)), 'constant')  # get to 32x32
xdata = xdata[..., np.newaxis]/255
val_data = xdata[0:1,...]
xdata = xdata[1:,...]
vol_size = xdata.shape[1:-1]

# prepare some place_holder k_space (second entry for the output)
# The second loss function doesn't take this into consideration 
github stokesj / EWC / main.py View on Github external
def main():
    args = parse_args()
    with tf.Session() as sess:
        tuner = HyperparameterTuner(sess=sess, hidden_layers=args.hidden_layers, hidden_units=args.hidden_units,
                                    num_perms=args.num_perms, trials=args.trials, epochs=args.epochs)
        tuner.search()
        print(tuner.best_parameters)