How to use the tensorflow.reshape function in tensorflow

To help you get started, we’ve selected a few tensorflow examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github sharpstill / AU_R-CNN / test_feature / RAM_tf / ram.py View on Github external
r = int(minRadius * (2 ** (i)))  # current radius

            d_raw = 2 * r
            d = tf.constant(d_raw, shape=[1])
            d = tf.tile(d, [2])  # replicate d to 2 times in dimention 1, just used as slice
            loc_k = loc[k,:] # k is bach index
            # each image is first resize to biggest radius img: one_img2, then offset + loc_k - r is the adjust location
            adjusted_loc = offset + loc_k - r  # 2 * max_radius + loc_k - current_radius
            one_img2 = tf.reshape(one_img, (one_img.get_shape()[0].value, one_img.get_shape()[1].value))

            # crop image to (d x d)
            zoom = tf.slice(one_img2, adjusted_loc, d) # slice start from adjusted_loc

            # resize cropped image to (sensorBandwidth x sensorBandwidth)
            # note that sensorBandwidth is side length for the smallest zoom (finest granularity)
            zoom = tf.image.resize_bilinear(tf.reshape(zoom, (1, d_raw, d_raw, 1)), (sensorBandwidth, sensorBandwidth))
            zoom = tf.reshape(zoom, (sensorBandwidth, sensorBandwidth))
            imgZooms.append(zoom)

        zooms.append(tf.stack(imgZooms))

    zooms = tf.stack(zooms)

    glimpse_images.append(zooms)

    return zooms
github allenai / document-qa / experimental / paragraph_selection / paragraph_selection_with_context.py View on Github external
with tf.variable_scope("encode_questions"):
                q_encoded = self.encode_question_words.apply(is_train, q_embed, q_mask)
        else:
            q_encoded = q_embed

        context_matching_features = tf.einsum("qwf,qpdf->qpwd", q_encoded, encoded_context)
        word_features = tf.concat([word_features, context_matching_features], axis=3)

        if self.question_features is not None:
            with tf.variable_scope("question_features"):
                q_embed = self.question_features.apply(is_train, q_embed, q_mask)

        # (batch * paragraph, word, features) -> (batch*paragraph, features)
        word_features = tf.reshape(word_features, (-1, word_dim, word_features.shape.as_list()[-1]))
        q_embed = tf.tile(tf.expand_dims(q_embed, 1), [1, para_dim, 1, 1])
        q_embed = tf.reshape(q_embed, (-1, word_dim, q_embed.shape.as_list()[-1]))

        with tf.variable_scope("merge"):
            combined_fe = self.merge_with_features.apply(is_train, q_embed, word_features)

        flattened_mask = tf.reshape(tf.tile(tf.expand_dims(q_mask, 1), [1, para_dim]), (-1,))
        flattened_mask *= tf.cast(tf.reshape(tf.sequence_mask(n_paragraphs, para_dim), (-1,)), tf.int32)

        if self.map_joint is not None:
            with tf.variable_scope("map_joint"):
                combined_fe = self.map_joint.apply(is_train, combined_fe, flattened_mask)

        with tf.variable_scope("reduce_word_features"):
            combined_fe = self.encode_joint_features.apply(is_train, combined_fe, flattened_mask)

            # (batch*paragraph, features) -> (batch, paragraph, features)
            combined_fe = tf.reshape(combined_fe, (batch, para_dim, combined_fe.shape.as_list()[-1]))
github nityansuman / tensorhub / nlp / machine_translation.py View on Github external
def call(self, x, hidden, enc_output):
        # Attention on encoder output
        context_vector, attention_weights = self.attention(hidden, enc_output)

        # Embedding layer
        x = self.embedding(x)

        # Residual with attention and target sequence
        x = tf.concat([tf.expand_dims(context_vector, 1), x], axis=-1)

        # Passing the concatenated vector to the decoder
        output, state = self.decoder_layer(x)

        # Reshape output
        output = tf.reshape(output, (-1, output.shape[2]))

        # Pass through fully connected layer
        x = self.fc(output)
        return x, state, attention_weights
github paruby / Wasserstein-Auto-Encoders / models.py View on Github external
else:
                real_input = real_im
                out_input = out_im

            height = int(out_input.get_shape()[1])
            width = int(out_input.get_shape()[2])
            channels = int(out_input.get_shape()[-1])
            n_filters = model.opts['adversarial_cost_n_filters']
            adversarial_cost = 0

            for kernel_size in [3,4,5]:
                w = tf.get_variable('adv_filter_%d' % kernel_size,
                                    [(kernel_size**2) * channels, n_filters],
                                    initializer=tf.truncated_normal_initializer(stddev=0.01))
                w = tf.nn.l2_normalize(w, 0)
                w = tf.reshape(w, [kernel_size, kernel_size, channels, n_filters])


                bias = tf.get_variable('adv_bias_%d' % kernel_size,
                                       [n_filters],
                                       initializer=tf.constant_initializer(0.001))

                fake_img_repr = tf.nn.conv2d(out_input, w, strides=[1,1,1,1], padding="SAME")
                fake_img_repr = tf.nn.bias_add(fake_img_repr, bias)

                real_img_repr = tf.nn.conv2d(real_input, w, strides=[1,1,1,1], padding="SAME")
                real_img_repr = tf.nn.bias_add(real_img_repr, bias)

                sq_diff = (real_img_repr - fake_img_repr)**2
                sq_diff = tf.reduce_mean(sq_diff, axis=[0,3]) # mean over batch and channels
                sq_diff = tf.reduce_sum(sq_diff)
github haowen-xu / tfsnippet / tfsnippet / distributions / mixture.py View on Github external
compute_density=None, name=None):
        if is_reparameterized and not self.is_reparameterized:
            raise RuntimeError('{} is not re-parameterized.'.format(self))

        #######################################################################
        # slow routine: generate the mixture by one_hot * stack([c.sample()]) #
        #######################################################################
        with tf.name_scope(name or 'Mixture.sample'):
            cat = self.categorical.sample(n_samples, group_ndims=0)
            mask = tf.one_hot(cat, self.n_components, dtype=self.dtype, axis=-1)
            if self.value_ndims > 0:
                static_shape = (mask.get_shape().as_list() +
                                [1] * self.value_ndims)
                dynamic_shape = concat_shapes([get_shape(mask),
                                               [1] * self.value_ndims])
                mask = tf.reshape(mask, dynamic_shape)
                mask.set_shape(static_shape)
            mask = tf.stop_gradient(mask)

            # derive the mixture samples
            c_samples = [
                c.sample(n_samples, group_ndims=0)
                for c in self.components
            ]
            samples = tf.reduce_sum(
                mask * tf.stack(c_samples, axis=-self.value_ndims - 1),
                axis=-self.value_ndims - 1
            )

            if not self.is_reparameterized:
                samples = tf.stop_gradient(samples)
github jparkhill / TensorMol / TensorMol / RawEmbeddings.py View on Github external
eles_: a neles X 1 tensor of elements present in the data.
	        SFPsR_: A symmetry function parameter of radius part
	        Rr_cut: Radial Cutoff of radius part
	        eleps_: a nelepairs X 2 X 12tensor of elements pairs present in the data.
	        SFPsA_: A symmetry function parameter of angular part
	        RA_cut: Radial Cutoff of angular part

	Returns:
	        Digested Mol. In the shape nmol X maxnatom X (Dimension of radius part + Dimension of angular part)
	"""
	inp_shp = tf.shape(R)
	nmol = inp_shp[0]
	natom = inp_shp[1]
	nele = tf.shape(eles_)[0]
	nelep = tf.shape(eleps_)[0]
	GMR = tf.reshape(TFSymRSet_Update2(R, Zs, eles_, SFPsR_, eta, Rr_cut), [nmol, natom, -1])
	GMA = tf.reshape(TFSymASet_Update2(R, Zs, eleps_, SFPsA_, zeta,  eta, Ra_cut), [nmol, natom, -1])
	GM = tf.concat([GMR, GMA], axis=2)
	num_ele, num_dim = eles_.get_shape().as_list()
	MaskAll = tf.equal(tf.reshape(Zs,[nmol,natom,1]),tf.reshape(eles_,[1,1,nele]))
	ToMask1 = AllSinglesSet(tf.cast(tf.tile(tf.reshape(tf.range(natom),[1,natom]),[nmol,1]),dtype=tf.int64), prec=tf.int64)
	v = tf.cast(tf.reshape(tf.range(nmol*natom), [nmol, natom, 1]), dtype=tf.int64)
	ToMask = tf.concat([ToMask1, v], axis = -1)
	IndexList = []
	SymList= []
	GatherList = []
	for e in range(num_ele):
		GatherList.append(tf.boolean_mask(ToMask,tf.reshape(tf.slice(MaskAll,[0,0,e],[nmol,natom,1]),[nmol, natom])))
		NAtomOfEle=tf.shape(GatherList[-1])[0]
		SymList.append(tf.gather_nd(GM, tf.slice(GatherList[-1],[0,0],[NAtomOfEle,2])))
		mol_index = tf.reshape(tf.slice(GatherList[-1],[0,0],[NAtomOfEle,1]),[NAtomOfEle, 1])
		atom_index = tf.reshape(tf.slice(GatherList[-1],[0,2],[NAtomOfEle,1]),[NAtomOfEle, 1])
github ConvLab / ConvLab / convlab / modules / word_dst / multiwoz / mdbt_util.py View on Github external
def get_metrics(predictions, true_predictions, no_turns, mask, num_slots):
    mask = tf.reshape(mask, [-1, num_slots])
    correct_prediction = tf.cast(tf.equal(predictions, true_predictions), "float32") * mask

    num_positives = tf.reduce_sum(true_predictions)
    classified_positives = tf.reduce_sum(predictions)

    true_positives = tf.multiply(predictions, true_predictions)
    num_true_positives = tf.reduce_sum(true_positives)

    recall = num_true_positives / num_positives
    precision = num_true_positives / classified_positives
    f_score = (2 * recall * precision) / (recall + precision)
    accuracy = tf.reduce_sum(correct_prediction) / (tf.cast(tf.reduce_sum(no_turns), dtype="float32") * num_slots)

    return precision, recall, f_score, accuracy
github stevezheng23 / reading_comprehension_tf / reading_comprehension / layer / recurrent.py View on Github external
state_list = []
            for i in range(self.num_layer):
                state_list.append(_extract_hidden_state(fwd_state[i], self.cell_type))
                state_list.append(_extract_hidden_state(bwd_state[i], self.cell_type))

            final_state_recurrent = tf.concat(state_list, axis=-1)
            final_state_mask = tf.squeeze(tf.reduce_max(input_mask, axis=1, keepdims=True), axis=1)
            
            if shape_size > 3:
                output_recurrent_shape = tf.shape(output_recurrent)
                output_mask_shape = tf.shape(output_mask)
                final_state_recurrent_shape = tf.shape(final_state_recurrent)
                final_state_mask_shape = tf.shape(final_state_mask)
                output_recurrent = tf.reshape(output_recurrent,
                    shape=tf.concat([input_data_shape[:-2], output_recurrent_shape[-2:]], axis=0))
                output_mask = tf.reshape(output_mask,
                    shape=tf.concat([input_mask_shape[:-2], output_mask_shape[-2:]], axis=0))
                final_state_recurrent = tf.reshape(final_state_recurrent,
                    shape=tf.concat([input_data_shape[:-2], final_state_recurrent_shape[-1:]], axis=0))
                final_state_mask = tf.reshape(final_state_mask,
                    shape=tf.concat([input_mask_shape[:-2], final_state_mask_shape[-1:]], axis=0))
        
        return output_recurrent, output_mask, final_state_recurrent, final_state_mask
github salu133445 / musegan / v2 / musegan / musegan / components.py View on Github external
config['net_g']['bar_merged'][-1][1][0] = config['num_track']

            nets['bar_merged'] = NeuralNet(
                tf.concat([nets['bar_pitch_time'].tensor_out,
                           nets['bar_time_pitch'].tensor_out], -1),
                config['net_g']['bar_merged'], name='bar_merged'
            )

            tensor_out = nets['bar_merged'].tensor_out

        # Private bar generator mode
        elif config['net_g']['bar_generator_type'] == 'private':
            # Tile private latent vector along time axis
            if 'private' in self.tensor_in:
                tiled_private = [
                    tf.reshape(
                        tf.tile(self.tensor_in['private'][..., idx], (1, 4)),
                        (-1, 4, self.tensor_in['private'].get_shape()[1])
                    )
                    for idx in range(config['num_track'])
                ]

            # Define private temporal generator
            if 'temporal_private' in self.tensor_in:
                nets['temporal_private'] = [
                    NeuralNet(self.tensor_in['temporal_private'][..., idx],
                              config['net_g']['temporal_private'],
                              name='temporal_private_'+str(idx))
                    for idx in range(config['num_track'])
                ]

            # Get the final input for each bar generator
github tensorflow / models / research / neural_gpu / neural_gpu.py View on Github external
# Final convolution to get logits, list outputs.
          outputs = tf.matmul(tf.reshape(outputs, [-1, nmaps]), output_w)
          outputs = tf.reshape(outputs, [length, batch_size, noclass])
        gpu_outputs[gpu] = tf.nn.softmax(outputs)

        # Calculate cross-entropy loss and normalize it.
        targets_soft = make_dense(tf.squeeze(gpu_target[gpu], [1]),
                                  noclass, 0.1)
        targets_soft = tf.reshape(targets_soft, [-1, noclass])
        targets_hard = make_dense(tf.squeeze(gpu_target[gpu], [1]),
                                  noclass, 0.0)
        targets_hard = tf.reshape(targets_hard, [-1, noclass])
        output = tf.transpose(outputs, [1, 0, 2])
        xent_soft = tf.reshape(tf.nn.softmax_cross_entropy_with_logits(
            logits=tf.reshape(output, [-1, noclass]), labels=targets_soft),
                               [batch_size, length])
        xent_hard = tf.reshape(tf.nn.softmax_cross_entropy_with_logits(
            logits=tf.reshape(output, [-1, noclass]), labels=targets_hard),
                               [batch_size, length])
        low, high = 0.1 / float(noclass - 1), 0.9
        const = high * tf.log(high) + float(noclass - 1) * low * tf.log(low)
        weight_sum = tf.reduce_sum(weights) + 1e-20
        true_perp = tf.reduce_sum(xent_hard * weights) / weight_sum
        soft_loss = tf.reduce_sum(xent_soft * weights) / weight_sum
        perp_loss = soft_loss + const
        # Final loss: cross-entropy + shared parameter relaxation part + extra.
        mem_loss = 0.5 * tf.reduce_mean(mem_loss) / length_float
        total_loss = perp_loss + mem_loss
        gpu_losses[gpu].append(true_perp)

        # Gradients.