How to use the tensorflow.placeholder function in tensorflow

To help you get started, we’ve selected a few tensorflow examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github kellywzhang / reading-comprehension / testing / testing_reader2.py View on Github external
vocab_size = 50000
embedding_dim = 8
batch_size = 2
state_size = 11
input_size = 8

# Starting interactive Session
sess = tf.InteractiveSession()

# Placeholders
# can add assert statements to ensure shared None dimensions are equal (batch_size)
seq_lens_d = tf.placeholder(tf.int32, [None, ], name="seq_lens_d")
seq_lens_q = tf.placeholder(tf.int32, [None, ], name="seq_lens_q")
input_d = tf.placeholder(tf.int32, [None, None], name="input_d")
input_q = tf.placeholder(tf.int32, [None, None], name="input_q")
input_a = tf.placeholder(tf.int32, [None, ], name="input_a")
input_m = tf.placeholder(tf.int32, [None, ], name="input_m")
n_steps = tf.placeholder(tf.int32)

# toy feed dict
feed = {
    n_steps: 5,
    seq_lens_d: [5,4],
    seq_lens_q: [2,3],
    input_d: [[20,30,40,50,60],[2,3,4,5,0]], # document
    input_q: [[2,3,0],[1,2,3]],              # query
    input_a: [1,0],                          # answer
    input_m: [2,3],                           # number of entities
}

mask_d = tf.cast(tf.sequence_mask(seq_lens_d), tf.int32)
mask_q = tf.cast(tf.sequence_mask(seq_lens_q), tf.int32)
github peace195 / multitask-learning-protein-prediction / multitask-learning / multitask-8states / lstm_test_ss_only.py View on Github external
plt.text(j, i, format(cm[i, j], fmt),
             horizontalalignment="center",
             color="white" if cm[i, j] > thresh else "black")

  plt.tight_layout()
  plt.ylabel('True label')
  plt.xlabel('Predicted label')

# Modeling
graph = tf.Graph()
with graph.as_default():
  tf_X = tf.placeholder(tf.int64, shape=[None, seq_max_len])
  tf_y = tf.placeholder(tf.int64, shape=[None, seq_max_len])
  tf_rel_label = tf.placeholder(tf.int64, shape=[None, seq_max_len])
  tf_b_label = tf.placeholder(tf.int64, shape=[None, seq_max_len])
  tf_word_embeddings = tf.placeholder(tf.float32, shape=[vocabulary_size, embedding_size])
  tf_X_binary_mask = tf.placeholder(tf.float32, shape=[None, seq_max_len])
  tf_weight_mask = tf.placeholder(tf.float32, shape=[None, seq_max_len])
  tf_weight_mask_ss = tf.placeholder(tf.float32, shape=[None, seq_max_len])
  tf_seq_len = tf.placeholder(tf.int64, shape=[None, ])
  keep_prob = tf.placeholder(tf.float32)
  
  ln_w = tf.Variable(tf.truncated_normal([embedding_size, nb_linear_inside], stddev=1.0 / math.sqrt(embedding_size)))
  ln_b = tf.Variable(tf.zeros([nb_linear_inside]))
  sent_w = tf.Variable(tf.truncated_normal([nb_lstm_inside, 8],
                       stddev=1.0 / math.sqrt(2 * nb_lstm_inside)))
  sent_b = tf.Variable(tf.zeros([8]))

  rel_w = tf.Variable(tf.truncated_normal([nb_lstm_inside, nb_label],
                       stddev=1.0 / math.sqrt(2 * nb_lstm_inside)))
  rel_b = tf.Variable(tf.zeros([nb_label]))
github wenxichen / tensorflow_yolo2 / src / slim_dir / datasets / download_and_convert_cifar10.py View on Github external
offset: An offset into the absolute number of images previously written.

  Returns:
    The new offset.
  """
  with tf.gfile.Open(filename, 'r') as f:
    data = cPickle.load(f)

  images = data['data']
  num_images = images.shape[0]

  images = images.reshape((num_images, 3, 32, 32))
  labels = data['labels']

  with tf.Graph().as_default():
    image_placeholder = tf.placeholder(dtype=tf.uint8)
    encoded_image = tf.image.encode_png(image_placeholder)

    with tf.Session('') as sess:

      for j in range(num_images):
        sys.stdout.write('\r>> Reading file [%s] image %d/%d' % (
            filename, offset + j + 1, offset + num_images))
        sys.stdout.flush()

        image = np.squeeze(images[j]).transpose((1, 2, 0))
        label = labels[j]

        png_string = sess.run(encoded_image,
                              feed_dict={image_placeholder: image})

        example = dataset_utils.image_to_tfexample(
github conan7882 / GoogLeNet-Inception / src / nets / googlenet.py View on Github external
def _create_train_input(self):
        self.image = tf.placeholder(
            tf.float32, [None, None, None, self._n_channel], name='image')
        self.label = tf.placeholder(tf.int64, [None], 'label')
        self.keep_prob = tf.placeholder(tf.float32, name='keep_prob')
        self.lr = tf.placeholder(tf.float32, name='lr')
github JenifferWuUCLA / pulmonary-nodules-MaskRCNN / pulmonary-nodules-Demos / classical-CNN / train_model_using_own_dataset / 05 / src / cnn.py View on Github external
def __init__(self, data, classifies, learning_rate, **kw):
        self._data = data
        self._type_number = self._data.type_number
        self._image_size = self._data.image_size
        self._image_channel = self._data.image_channel
        self._batch_size = self._data.batch_size
        self._classifies = classifies

        input_shape = [self._batch_size, self._image_size, self._image_size, self._image_channel]
        self._images = tf.placeholder(shape=input_shape, dtype=tf.float32)
        self._labels = tf.placeholder(dtype=tf.int32, shape=[self._batch_size])

        self._logits, self._softmax, self._prediction = classifies(self._images, **kw)
        self._entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self._labels, logits=self._logits)
        self._loss = tf.reduce_mean(self._entropy)
        self._solver = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=0.5).minimize(self._loss)

        self._saver = tf.train.Saver()
        self._sess = tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True)))
        pass
github CoNLL-UD-2018 / UDPipe-Future / ud_parser2.py View on Github external
def construct(self, args, num_words, num_chars, num_tags):
        with self.session.graph.as_default():
            # Inputs
            self.sentence_lens = tf.placeholder(tf.int32, [None])
            self.word_ids = tf.placeholder(tf.int32, [None, None])
            self.charseqs = tf.placeholder(tf.int32, [None, None])
            self.charseq_lens = tf.placeholder(tf.int32, [None])
            self.charseq_ids = tf.placeholder(tf.int32, [None, None])
            self.tags = dict((tag, tf.placeholder(tf.int32, [None, None])) for tag in args.tags)
            self.heads = tf.placeholder(tf.int32, [None, None])
            self.is_training = tf.placeholder(tf.bool, [])
            self.learning_rate = tf.placeholder(tf.float32, [])

            # RNN Cell
            if args.rnn_cell == "LSTM":
                rnn_cell = tf.nn.rnn_cell.BasicLSTMCell
            elif args.rnn_cell == "GRU":
                rnn_cell = tf.nn.rnn_cell.GRUCell
            else:
                raise ValueError("Unknown rnn_cell {}".format(args.rnn_cell))

            # Word embeddings
            inputs = 0
            if args.we_dim:
github siddhantjain / PointCloudAnnotationTool / src / Python / src / FineTune.py View on Github external
def placeholder_inputs():
    pointclouds_ph = tf.placeholder(tf.float32, shape=(batch_size, point_num, 3))
    input_label_ph = tf.placeholder(tf.float32, shape=(batch_size, NUM_CATEGORIES))
    labels_ph = tf.placeholder(tf.int32, shape=(batch_size))
    seg_ph = tf.placeholder(tf.int32, shape=(batch_size, point_num))
    pairwise_distances_ph = tf.placeholder(tf.float32, shape=(batch_size, point_num*point_num))
    return pointclouds_ph, input_label_ph, labels_ph, seg_ph, pairwise_distances_ph
github davidslac / mlearntut / ex04_tf_train.py View on Github external
training_X, training_Y = readData(train_files, 'xtcavimg', 'lasing', 'tf', numOutputs)
    validation_X, validation_Y = readData(validation_files, 'xtcavimg', 'lasing', 'tf', numOutputs)
    read_time = time.time()-t0
    minibatch_size = 24
    batches_per_epoch = len(training_X)//minibatch_size
    print("-- read %d samples in %.2fsec. batch_size=%d, %d batches per epoch" %
          (len(training_X)+len(validation_X), read_time, minibatch_size, batches_per_epoch))
    sys.stdout.flush()

    VALIDATION_SIZE = 80
    shuffle_data(validation_X, validation_Y)
    validation_X = validation_X[0:VALIDATION_SIZE]
    validation_Y = validation_Y[0:VALIDATION_SIZE]

    # EXPLAIN: placeholders
    img_placeholder = tf.placeholder(tf.int16,
                                     shape=(None,363,284,1),
                                     name='img')
    labels_placeholder = tf.placeholder(tf.float32, 
                                        shape=(None, numOutputs),
                                        name='labels')
    model = build_model(img_placeholder, numOutputs=2)    

     ## loss 
    cross_entropy_loss_all = tf.nn.softmax_cross_entropy_with_logits(model.final_logits,
                                                                     labels_placeholder)
    cross_entropy_loss = tf.reduce_mean(cross_entropy_loss_all)

    ## training
    global_step = tf.Variable(0, trainable=False)
    lr = 0.002
    learning_rate = tf.train.exponential_decay(learning_rate=lr,
github chrischute / squad-transformer / code / main.py View on Github external
test_examples, test_answers, test_info, _, _ = preprocess(test_data)

        # Get formatted examples in memory for creating a TF Dataset
        formatted_examples, output_types, output_shapes = get_formatted_examples(FLAGS, test_examples, word2id, char2id)

        # Construct a generator function for building TF dataset
        def gen():
            infinite_idx = 0
            while True:
                yield formatted_examples[infinite_idx]
                infinite_idx = (infinite_idx + 1) % len(formatted_examples)

        # Initialize data pipeline (repeat so we can use this multiple times in an ensemble).
        test_dataset = tf.data.Dataset.from_generator(gen, output_types, output_shapes).repeat().batch(FLAGS.batch_size)
        test_iterator = test_dataset.make_one_shot_iterator()
        input_handle = tf.placeholder(tf.string, shape=())
        input_iterator = tf.data.Iterator.from_string_handle(input_handle, test_dataset.output_types, test_dataset.output_shapes)

        # Ensemble or single eval.
        is_ensemble = FLAGS.ensemble_path != ""
        if is_ensemble:  # Path to file with a list of directories for ensemble
            with open(FLAGS.ensemble_path, 'r') as fh:
                checkpoint_paths = [line.strip() for line in fh.readlines() if line]
                if len(checkpoint_paths) == 0:
                    raise Exception("Ensemble path {} did not contain any checkpoint paths.".format(FLAGS.ensemble_path))
        else:
            checkpoint_paths = [FLAGS.checkpoint_dir]

        # Make predictions using all checkpoints specified in checkpoint_paths
        model = SQuADTransformer(FLAGS, input_iterator, input_handle, word_emb_matrix, char_emb_matrix)
        all_answers = defaultdict(list)  # Maps from UUID to list of (answer text, prob) pairs.
        for i in range(len(checkpoint_paths)):