How to use the tensorflow.expand_dims function in tensorflow

To help you get started, we’ve selected a few tensorflow examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github JerrikEph / SentenceOrdering_PTR / model.py View on Github external
encoder_input = tf.nn.embedding_lookup(self.embedding, self.ph_encoder_input) #(batch_size, tstps_en, len_sentence, embed_size)
        if self.config.sent_rep == 'lstm':
            encoder_input, eos, sos = lstm_sentence_rep(encoder_input)  # (b_sz, tstp_en, emb_sz)
        elif self.config.sent_rep == 'cnn':
            encoder_input, eos, sos = cnn_sentence_rep(encoder_input)  # (b_sz, tstp_en, emb_sz)
        elif self.config.sent_rep == 'cbow':
            encoder_input, eos, sos = cbow_sentence_rep(encoder_input)  # (b_sz, tstp_en, emb_sz)
        else:
            assert ValueError('sent_rep: ' + self.config.sent_rep)
            exit()

        emb_sz = tf.shape(encoder_input)[2]
        self.sos = sos
        
        dummy_1 = tf.expand_dims(encoder_input, 1) #(b_sz, 1, tstps_en, emb_sz)
        
        encoder_input_tile = tf.tile(dummy_1, [1, tstps_de, 1, 1])       #(b_sz, tstps_de, tstps_en, emb_sz)
        
        dummy_decoder_label = tf.select(self.ph_decoder_label >= 0, self.ph_decoder_label, tf.zeros_like(self.ph_decoder_label))
        mask = tf.one_hot(dummy_decoder_label, depth=tstps_en, on_value=True, off_value=False) #(b_sz, tstps_de, tstps_en)
        decoder_input = tf.boolean_mask(encoder_input_tile, mask) #(b_sz*tstps_de, emb_sz)
        decoder_input = tf.reshape(decoder_input, shape=(b_sz, tstps_de, emb_sz), name='fetch_input_reshape_0') #(b_sz, tstps_de, emb_sz)
        
        encoder_input = tf.concat(concat_dim=1, values=[eos, encoder_input]) #(b_sz, tstps_en+1, emb_sz)
        decoder_input = tf.concat(concat_dim=1, values=[sos, decoder_input]) #(b_sz, tstps_de+1, emb_sz)
    
        self.encoder_tstps = self.ph_input_encoder_len + 1
        self.decoder_tstps = self.ph_input_decoder_len + 1
        dummy_1 = tf.reshape(tf.ones(shape=(b_sz, 1), dtype=tf.int32)* tf.constant(-1), shape=(b_sz, 1), name='fetch_input_reshape_1') #b_sz, 1
        decoder_label = tf.concat(concat_dim=1, values=[self.ph_decoder_label, dummy_1]) + 1               #b_sz, tstps_de+1
        self.decoder_label = tf.sequence_mask(self.decoder_tstps,
github Tom-Ryder / VIforSDEs / lotka-volterra / VI_for_SDEs.py View on Github external
def _path_sampler(self, inp, mu_nn, sigma_nn):
        '''
        sample new state using learned Gaussian state transitions
        :param inp: current state of system
        :param mu_nn: drift vector from RNN
        :param sigma_nn: diffusion matrix from RNN as cholesky factor
        '''
        out_dist = tfd.TransformedDistribution(distribution=tfd.MultivariateNormalTriL(
            loc=inp + self.dt * tf.squeeze(mu_nn), scale_tril=tf.sqrt(self.dt) * sigma_nn), bijector=tfb.Softplus(event_ndims=1))
        out = tf.expand_dims(out_dist.sample(), 1)
        return out
github carpedm20 / SPIRAL-tensorflow / models / policy.py View on Github external
action_one_hot, action = \
                        categorical_sample(logit, np.prod(action_size))

                # [batch, max_time, action_size[name]]
                one_hot_samples[name] = tf.reshape(
                        action_one_hot, [batch_size, max_time, -1],
                        name="one_hot_samples_{}".format(name))
                # [batch, max_time, 1]
                samples[name] = tf.reshape(
                        action, [batch_size, max_time],
                        name="samples_{}".format(name))

                if action_idx < len(action_sizes) - 1:
                    # this will be feeded to make gradient flows
                    out = mlp(
                            tf.expand_dims(samples[name], -1), int(16),
                            name='sample_mlp')
                    # [batch, max_time, lstm_size]
                    z = tl.dense(
                            tf.concat([z, out], -1), int(lstm_size),
                            activation=tf.nn.relu,
                            name="concat_z_fc")

        return one_hot_samples, samples, logits
github zhongbin1 / DeepMatching / models / DSA.py View on Github external
def get_masked_weights(inputs, seq_len, max_len):
    seq_mask = tf.sequence_mask(seq_len, max_len, dtype=tf.float32)  # [batch_size, max_len]
    seq_mask = tf.expand_dims(seq_mask, 1)  # [batch_size, 1, max_len]
    outputs = inputs * seq_mask + (seq_mask - 1) * 1e9
    outputs = tf.nn.softmax(outputs, axis=-1)
    return outputs
github apeterswu / RL4NMT / tensor2tensor / utils / model_builder.py View on Github external
if model_class._num_datashards == 1:  # work on single GPU cards, fast sample
            print("###Work on Single GPU card, Use Fast Decode.###")
            train_beam = getattr(hparams, 'train_beam', None)
            if mrt_samples:
                samples, _ = model_class._fast_decode(features, decode_length=50,
                                                      beam_size=mrt_samples, top_beams=mrt_samples)
                inputs = tf.squeeze(tf.squeeze(features["inputs"], axis=-1), axis=-1)
                targets = tf.squeeze(tf.squeeze(features["targets"], axis=-1), axis=-1)
                batch_size = tf.shape(inputs)[0]
                inputs_len = tf.shape(inputs)[1]
                targets_len = tf.shape(targets)[1]
                inputs_tile = tf.tile(inputs, [1, mrt_samples])
                targets_tile = tf.tile(targets, [1, mrt_samples])
                inputs_reshape = tf.reshape(inputs_tile, [batch_size*mrt_samples, inputs_len])
                targets_reshape = tf.reshape(targets_tile, [batch_size*mrt_samples, targets_len])
                inputs_feed = tf.expand_dims(tf.expand_dims(inputs_reshape, axis=-1), axis=-1)
                targets_feed = tf.expand_dims(tf.expand_dims(targets_reshape, axis=-1), axis=-1)
                features["inputs"] = inputs_feed
                features["targets"] = targets_feed
            elif train_beam and train_beam != 1:  # beam search with hparams.train_beam size and return the top 1 sample
                samples, _ = model_class._fast_decode(features, decode_length=50, beam_size=hparams.train_beam)
            else:
                targets_beam = getattr(hparams, 'targets_beam', None)
                if targets_beam:
                    targets_samples, _ = model_class._fast_decode(features, decode_length=50,
                                                                  beam_size=4, sampling_method='argmax')
                    targets_samples = tf.reshape(targets_samples, [tf.shape(targets_samples)[0], tf.shape(targets_samples)[1], 1, 1])
                    features["targets"] = targets_samples
                samples, _ = model_class._fast_decode(features, decode_length=50)
            samples = tf.expand_dims(samples, axis=-1)
            samples = tf.expand_dims(samples, axis=-1)  # add two additional dimensions to make it compatible.
        else:  # work on multi GPU cards, only support slow sample
github ecobost / cnn4brca / code / old / logistic.py View on Github external
def model(image):
	""" A simple linear model.
	
	Args:
		image: A 3D tensor. The input image

	Returns:
		A 2D tensor. The predicted segmentation: a logit heatmap.
	"""
	batch = tf.expand_dims(image, 0)	# Batch with a single image
	
	# Logistic regression
	with tf.name_scope('logistic') as scope:
		# Create filter and bias
		filter = tf.Variable(tf.random_uniform([112, 112, 1, 1], -1.0, 1.0),
							 name='weights')
		bias = tf.Variable(tf.zeros([1]), name='bias')
		
		# Add weights to the weights collection (for regularization)
		tf.add_to_collection(tf.GraphKeys.WEIGHTS, filter)
		
		# Perform 2-d convolution
		w_times_x = tf.nn.conv2d(batch, filter, [1, 1, 1, 1], padding='SAME')
		output = w_times_x + bias
	
		# Summarize activations
github apache / tika / tika-parsers / src / main / resources / org / apache / tika / parser / captioning / tf / model_wrapper.py View on Github external
def build_inputs(self):
        """Input prefetching, preprocessing and batching"""

        image_feed = tf.placeholder(dtype=tf.string, shape=[], name="image_feed")
        input_feed = tf.placeholder(dtype=tf.int64,
                                    shape=[None],  # batch_size
                                    name="input_feed")

        # process image and insert batch dimensions
        images = tf.expand_dims(self.process_image(image_feed), 0)
        input_seqs = tf.expand_dims(input_feed, 1)

        # no target sequences or input mask in inference mode
        target_seqs = None
        input_mask = None

        self.images = images
        self.input_seqs = input_seqs
        self.target_seqs = target_seqs
        self.input_mask = input_mask
github luheng / lsgn / model_utils.py View on Github external
def batch_gather(emb, indices):
  # TODO: Merge with util.batch_gather.
  """
  Args:
    emb: Shape of [num_sentences, max_sentence_length, (emb)]
    indices: Shape of [num_sentences, k, (l)]
  """
  num_sentences = tf.shape(emb)[0] 
  max_sentence_length = tf.shape(emb)[1] 
  flattened_emb = flatten_emb(emb)  # [num_sentences * max_sentence_length, emb]
  offset = tf.expand_dims(tf.range(num_sentences) * max_sentence_length, 1)  # [num_sentences, 1]
  if len(indices.get_shape()) == 3:
    offset = tf.expand_dims(offset, 2)  # [num_sentences, 1, 1]
  return tf.gather(flattened_emb, indices + offset)
github HyeonwooNoh / VQA-Transfer-ExternalData / vlmap_crop_and_run / model.py View on Github external
# V2L [bs, L_DIM]
        map_L, V2L_hidden = modules.V2L(feat_V, MAP_DIM, L_DIM, scope='V2L',
                                        is_train=is_train, reuse=tf.AUTO_REUSE)
        # Language inputs
        seq_len = self.batches['region']['region_description_len']
        wordset_seq = self.batches['region']['wordset_region_description']
        blank_seq = self.batches['region']['blank_description']
        # enc_L: "enc" in language enc-dec [bs, L_DIM]
        embed_blank_seq = tf.nn.embedding_lookup(self.glove_wordset, blank_seq)
        enc_L = modules.language_encoder(
            embed_blank_seq, seq_len + 1, L_DIM,
            scope='language_encoder', reuse=tf.AUTO_REUSE)
        # dec_L: "dec" in language enc-dec + description [2*bs, L_DIM]
        start_tokens = tf.zeros([tf.shape(wordset_seq)[0]], dtype=tf.int32) + \
            self.wordset_vocab['dict']['<s>']
        seq_with_start = tf.concat([tf.expand_dims(start_tokens, axis=1),
                                    wordset_seq[:, :-1]], axis=1)
        embed_seq_with_start = tf.nn.embedding_lookup(self.glove_wordset,
                                                      seq_with_start)
        in_L = feat_V + enc_L
        if self.use_embed_transform: decoder_dim = L_DIM
        else: decoder_dim = W_DIM
        logits, pred, pred_len = modules.language_decoder(
            in_L, embed_seq_with_start,
            seq_len + 1,  # seq_len + 1 is for <s>
            lambda e: tf.nn.embedding_lookup(self.glove_wordset, e),
            decoder_dim, start_tokens, self.wordset_vocab['dict'][''],
            self.region_max_len + 1,  # + 1 for 
            unroll_type='teacher_forcing', output_layer=self.word_predictor,
            is_train=is_train, scope='language_decoder', reuse=tf.AUTO_REUSE)
        _, greedy_pred, greedy_pred_len = modules.language_decoder(
            in_L, embed_seq_with_start,</s></s>
github PolyAI-LDN / conversational-datasets / baselines / vector_based.py View on Github external
self._input_ids = tf.placeholder(
                name="input_ids", shape=[None, None], dtype=tf.int32)
            self._input_mask = tf.placeholder(
                name="input_mask", shape=[None, None], dtype=tf.int32)
            self._segment_ids = tf.zeros_like(self._input_ids)
            bert_inputs = dict(
                input_ids=self._input_ids,
                input_mask=self._input_mask,
                segment_ids=self._segment_ids
            )

            embeddings = embed_fn(
                inputs=bert_inputs, signature="tokens", as_dict=True)[
                "sequence_output"
            ]
            mask = tf.expand_dims(
                tf.cast(self._input_mask, dtype=tf.float32), -1)
            self._embeddings = tf.reduce_sum(mask * embeddings, axis=1)

            init_ops = (
                tf.global_variables_initializer(), tf.tables_initializer())
        glog.info("Initializing graph.")
        self._session.run(init_ops)