How to use the tensorflow.shape function in tensorflow

To help you get started, we’ve selected a few tensorflow examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github singnet / semantic-vision / experiments / invariance / baseline / autoencoders2 / models / x86_z20_rs49 / train.py View on Github external
rec_real_z    = reconstructor_tf(real_z,        angles_tf, reuse = True)

    # reconstruction loss ( decoder cost)
    r_cost = tf.losses.mean_squared_error(real_data_rot, rec_data)     
    
    if (is_aae):
    
    
        # for our discriminator
        # encoded_data is a fake_z
        fake_z = encoded_data
        d_on_real_data  = discriminator_tf(real_z, reuse = False)
        d_on_fake_data  = discriminator_tf(fake_z, reuse = True)
    
    
        alpha = tf.random_uniform(shape=[tf.shape(fake_z)[0], 1, 1, 1], minval=0., maxval=1.)
        interpolates      = real_z + alpha * (fake_z - real_z)
            
        
        gradients        = tf.gradients(discriminator_tf(interpolates, reuse=True), [interpolates])[0]
        slopes           = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=[1,2,3]))
        gradient_penalty = tf.reduce_mean((slopes-1)**2)
    
        # "generator" loss (it is also our encoder)
        e_cost  = -tf.reduce_mean(d_on_fake_data)
        
        # discriminator loss 
        d_cost  =  tf.reduce_mean(d_on_fake_data) - tf.reduce_mean(d_on_real_data) + LAMBDA * gradient_penalty
        d_param  = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,   scope='Discriminator')
        e_param  = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,   scope='Encoder')
        
        d_train_op = tf.train.RMSPropOptimizer(learning_rate=LEARNING_RATE).minimize(d_cost, var_list=d_param)
github 0x5eba / Skin-Cancer-Segmentation / Mask / model.py View on Github external
def nms_keep_map(class_id):
        """Apply Non-Maximum Suppression on ROIs of the given class."""
        # Indices of ROIs of the given class
        ixs = tf.where(tf.equal(pre_nms_class_ids, class_id))[:, 0]
        # Apply NMS
        class_keep = tf.image.non_max_suppression(
                tf.gather(pre_nms_rois, ixs),
                tf.gather(pre_nms_scores, ixs),
                max_output_size=config.DETECTION_MAX_INSTANCES,
                iou_threshold=config.DETECTION_NMS_THRESHOLD)
        # Map indices
        class_keep = tf.gather(keep, tf.gather(ixs, class_keep))
        # Pad with -1 so returned tensors have the same shape
        gap = config.DETECTION_MAX_INSTANCES - tf.shape(class_keep)[0]
        class_keep = tf.pad(class_keep, [(0, gap)],
                            mode='CONSTANT', constant_values=-1)
        # Set shape so map_fn() can infer result shape
        class_keep.set_shape([config.DETECTION_MAX_INSTANCES])
        return class_keep
github dvornikita / blitznet / detector.py View on Github external
self.seg_ph = tf.placeholder(shape=[None, None], dtype=tf.int32, name='seg_ph')

        img = tf.image.resize_bilinear(tf.expand_dims(self.image_ph, 0),
                                       (img_size, img_size))
        self.net.create_trunk(img)

        if args.detect:
            self.net.create_multibox_head(self.loader.num_classes)
            confidence = tf.nn.softmax(tf.squeeze(self.net.outputs['confidence']))
            location = tf.squeeze(self.net.outputs['location'])
            self.nms(location, confidence, self.bboxer.tiling)

        if args.segment:
            self.net.create_segmentation_head(self.loader.num_classes)
            self.segmentation = self.net.outputs['segmentation']
            seg_shape = tf.shape(self.image_ph)[:2]
            self.segmentation = tf.image.resize_bilinear(self.segmentation, seg_shape)

            self.segmentation = tf.cast(tf.argmax(tf.squeeze(self.segmentation), axis=-1), tf.int32)
            self.segmentation = tf.reshape(self.segmentation, seg_shape)
            self.segmentation.set_shape([None, None])

            if not self.no_gt:
                easy_mask = self.seg_ph <= self.loader.num_classes
                predictions = tf.boolean_mask(self.segmentation, easy_mask)
                labels = tf.boolean_mask(self.seg_ph, easy_mask)
                self.mean_iou, self.iou_update = mean_iou(predictions, labels, self.loader.num_classes)
            else:
                self.mean_iou = tf.constant(0)
                self.iou_update = tf.constant(0)
github iwyoo / tf_ThinPlateSpline / ThinPlateSpline2.py View on Github external
def _meshgrid(height, width, source):
    x_t = tf.tile(
      tf.reshape(tf.linspace(-1.0, 1.0, width), [1, width]), [height, 1])
    y_t = tf.tile(
      tf.reshape(tf.linspace(-1.0, 1.0, height), [height, 1]), [1, width])

    x_t_flat = tf.reshape(x_t, (1, 1, -1))
    y_t_flat = tf.reshape(y_t, (1, 1, -1))

    num_batch = tf.shape(source)[0]
    px = tf.expand_dims(source[:,:,0], 2) # [bn, pn, 1]
    py = tf.expand_dims(source[:,:,1], 2) # [bn, pn, 1]
    d2 = tf.square(x_t_flat - px) + tf.square(y_t_flat - py)
    r = d2 * tf.log(d2 + 1e-6) # [bn, pn, h*w]
    x_t_flat_g = tf.tile(x_t_flat, tf.stack([num_batch, 1, 1])) # [bn, 1, h*w]
    y_t_flat_g = tf.tile(y_t_flat, tf.stack([num_batch, 1, 1])) # [bn, 1, h*w]
    ones = tf.ones_like(x_t_flat_g) # [bn, 1, h*w]

    grid = tf.concat([ones, x_t_flat_g, y_t_flat_g, r], 1) # [bn, 3+pn, h*w]
    return grid
github shaohua0116 / demo2program / models / model_induction_baseline.py View on Github external
token_dim, use_bias=False, name="output_projection")
                if init_state is None:
                    init_state = rnn.LSTMStateTuple(visual_c, visual_h)
                decoder = seq2seq.BasicDecoder(
                    lstm_cell, helper, init_state,
                    output_layer=projection_layer)
                # pred_length [batch_size]: length of the predicted sequence
                outputs, final_context_state, pred_length = seq2seq.dynamic_decode(
                    decoder, maximum_iterations=max_sequence_len,
                    scope='dynamic_decoder')
                pred_length = tf.expand_dims(pred_length, axis=1)

                # as dynamic_decode generate variable length sequence output,
                # we pad it dynamically to match input embedding shape.
                rnn_output = outputs.rnn_output
                sz = tf.shape(rnn_output)
                dynamic_pad = tf.zeros(
                    [sz[0], max_sequence_len - sz[1], sz[2]],
                    dtype=rnn_output.dtype)
                pred_seq = tf.concat([rnn_output, dynamic_pad], axis=1)
                seq_shape = pred_seq.get_shape().as_list()
                pred_seq.set_shape(
                    [seq_shape[0], max_sequence_len, seq_shape[2]])

                pred_seq = tf.transpose(
                    tf.reshape(pred_seq,
                               [self.batch_size, max_sequence_len, -1]),
                    [0, 2, 1])  # make_dim: [bs, n, len]
                return pred_seq, pred_length, final_context_state
github hccho2 / Tacotron-Wavenet-Vocoder-Korean / wavenet / model.py View on Github external
current_layer = input_batch  # causal cut으로 길이 1이 줄어든 상태
        if self.train_mode==False:
            self.causal_queue = tf.scatter_update(self.causal_queue,tf.range(self.batch_size),tf.concat([self.causal_queue[:,1:,:],input_batch],axis=1) )
            current_layer = self.causal_queue
        
            self.local_condition_queue  = tf.scatter_update(self.local_condition_queue,tf.range(self.batch_size),tf.concat([self.local_condition_queue[:,1:,:],local_condition_batch],axis=1) )
            local_condition_batch = self.local_condition_queue



        # Pre-process the input with a regular convolution
        current_layer = self._create_causal_layer(current_layer)  # conv1d를 통과 하면서, (filter_width-1)= 1 만큼 더 줄어 있다.

        # 아래의 output_width는 최대 SAMPLE_SIZE = 100,000까지 이고, 짧은 파일이나 파일의 끝부분이면 더 100,000 안 될 수 있다.
        if self.train_mode==True:
            output_width = tf.shape(input_batch)[1] - self.receptive_field + 1   # 모든 dilated convolution을 통과 한 이후 길이. +1을 하는 이유는 causal cut으로 줄어든 1만큼 다시 더해준다.
        else:
            output_width = 1

        # Add all defined dilation layers.
        with tf.variable_scope('dilated_stack'):
            for layer_index, dilation in enumerate(self.dilations): # [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
                with tf.variable_scope('layer{}'.format(layer_index)):
                    
                    if self.train_mode==False:
                        self.dilation_queue[layer_index] =  tf.scatter_update(self.dilation_queue[layer_index],tf.range(self.batch_size),tf.concat([self.dilation_queue[layer_index][:,1:,:],current_layer],axis=1) )
                        current_layer =  self.dilation_queue[layer_index]
                    
                    output, current_layer = self._create_dilation_layer(current_layer, layer_index, dilation,local_condition_batch,global_condition_batch, output_width)
                    outputs.append(output)
        with tf.name_scope('postprocessing'):
            # Perform (+) -> ReLU -> 1x1 conv -> ReLU -> 1x1 conv to
github stu92054 / Domain-adaptation-on-segmentation / Adapt_Road_Scene / models / model_static_normalized.py View on Github external
def _upscore_layer(self, bottom, shape,
                       num_classes, name, debug,
                       ksize=4, stride=2):
        strides = [1, stride, stride, 1]
        with tf.variable_scope(name):
            in_features = bottom.get_shape()[3].value

            if shape is None:
                # Compute shape out of Bottom
                in_shape = tf.shape(bottom)

                h = ((in_shape[1] - 1) * stride) + 1
                w = ((in_shape[2] - 1) * stride) + 1
                new_shape = [in_shape[0], h, w, num_classes]
            else:
                new_shape = [int(shape[0]), int(shape[1]), int(shape[2]), num_classes]
            
            logging.debug("Layer: %s, Fan-in: %d" % (name, in_features))
            f_shape = [ksize, ksize, num_classes, in_features]

            # create
            num_input = ksize * ksize * in_features / stride
            stddev = (2 / num_input)**0.5

            weights = self.get_deconv_filter('upsample',f_shape)
            self._add_wd_and_summary(weights, self.wd, "fc_wlosses")
github tensorflow / tensor2tensor / tensor2tensor / models / research / universal_transformer_util.py View on Github external
ValueError: Unknown act type
  """
  if hparams.act_type not in ["basic", "global", "random", "accumulated"]:
    raise ValueError("Unknown act type: %s" % hparams.act_type)

  state = x
  act_max_steps = hparams.act_max_steps
  threshold = 1.0 - hparams.act_epsilon
  state_shape_static = state.get_shape()

  state_slice = slice(0, 2)
  if hparams.act_type == "global":
    state_slice = slice(0, 1)

  # Dynamic shape for update tensors below
  update_shape = tf.shape(state)[state_slice]

  # Halting probabilities (p_t^n in the paper)
  halting_probability = tf.zeros(update_shape, name="halting_probability")

  # Remainders (R(t) in the paper)
  remainders = tf.zeros(update_shape, name="remainder")

  # Number of updates performed (N(t) in the paper)
  n_updates = tf.zeros(update_shape, name="n_updates")

  # Previous cell states (s_t in the paper)
  previous_state = tf.zeros_like(state, name="previous_state")
  step = tf.constant(0, dtype=tf.int32)

  def ut_function(state, step, halting_probability, remainders, n_updates,
                  previous_state):
github tensorflow / addons / tensorflow_addons / image / transform_ops.py View on Github external
Returns:
      3D tensor of matrices with shape `(N, 3, 3)`. The output matrices map the
        *output coordinates* (in homogeneous coordinates) of each transform to
        the corresponding *input coordinates*.

    Raises:
      ValueError: If `transforms` have an invalid shape.
    """
    with tf.name_scope(name or "flat_transforms_to_matrices"):
        transforms = tf.convert_to_tensor(transforms, name="transforms")
        if transforms.shape.ndims not in (1, 2):
            raise ValueError(
                "Transforms should be 1D or 2D, got: %s" % transforms)
        # Make the transform(s) 2D in case the input is a single transform.
        transforms = tf.reshape(transforms, tf.constant([-1, 8]))
        num_transforms = tf.shape(transforms)[0]
        # Add a column of ones for the implicit last entry in the matrix.
        return tf.reshape(
            tf.concat([transforms, tf.ones([num_transforms, 1])], axis=1),
            tf.constant([-1, 3, 3]))
github iyah4888 / SIGGRAPH18SSS / deeplab_resnet / hc_deeplab.py View on Github external
def lossfunction(self, tweightmat, tindicator, tembeddings):

		with tf.variable_scope('loss_computation') as scope:
			# tembeddings: #pts x 64
			sqrvals = tf.reduce_sum(tf.square(tembeddings), 1, keep_dims=True)
			# sqrvals: #pts x 1
			sqrvalsmat = tf.tile(sqrvals, [1, tf.shape(sqrvals)[0]])
			sqrvalsmat2 = tf.add(sqrvalsmat,tf.transpose(sqrvalsmat))
			distmat =  tf.add(sqrvalsmat2, tf.scalar_mul(-2.0, tf.matmul(tembeddings,  tf.transpose(tembeddings))))/64.0

			sigmamat = tf.scalar_mul(2.0, tf.reciprocal(1.0+tf.exp(distmat)))
			posnegmapping = tf.log(tf.add(tf.scalar_mul(0.5, 1.0-tindicator), tf.multiply(tindicator, sigmamat)))
			wcrossentropy = tf.multiply(tf.negative(tindicator+2.0), posnegmapping)
			lossval = tf.reduce_mean(wcrossentropy)
		return lossval