How to use the ops.conv1d function in ops

To help you get started, we’ve selected a few ops examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github indiejoseph / chinese-char-rnn / models / bytenet.py View on Github external
def decode_layer(self, input_, dilation, layer_no):
    relu1 = tf.nn.relu(input_, name="dec_relu1_layer{}".format(layer_no))
    conv1 = ops.conv1d(relu1, self.residual_channels, name="dec_conv1d_1_layer{}".format(layer_no))

    relu2 = tf.nn.relu(conv1, name="enc_relu2_layer{}".format(layer_no))
    dilated_conv = ops.dilated_conv1d(relu2, self.residual_channels,
      dilation, self.decoder_filter_width,
      causal = True,
      name = "dec_dilated_conv_laye{}".format(layer_no)
      )

    relu3 = tf.nn.relu(dilated_conv, name="dec_relu3_layer{}".format(layer_no))
    conv2 = ops.conv1d(relu3, 2 * self.residual_channels, name="dec_conv1d_2_layer{}".format(layer_no))

    return input_ + conv2
github paarthneekhara / convolutional-vqa / Models / text_model_v2.py View on Github external
def _byetenet_residual_block(input_, dilation, layer_no, options, source_mask, train = True):
        # input_ = layer_norm(input_, trainable = train)
        relu1 = tf.nn.relu(input_, name = 'enc_relu1_layer{}'.format(layer_no))
        conv1 = ops.conv1d(relu1, options['residual_channels'], name = 'enc_conv1d_1_layer{}'.format(layer_no))
        # conv1 = layer_norm(conv1, trainable = train)
        conv1 = conv1 * source_mask
        relu2 = tf.nn.relu(conv1, name = 'enc_relu2_layer{}'.format(layer_no))
        dilated_conv = ops.conv1d(relu2, options['residual_channels'], 
            dilation, options['encoder_filter_width'],
            causal = True, 
            name = "enc_dilated_conv_layer{}".format(layer_no)
            )
        # dilated_conv = layer_norm(dilated_conv, trainable = train)
        dilated_conv = dilated_conv * source_mask
        relu3 = tf.nn.relu(dilated_conv, name = 'enc_relu3_layer{}'.format(layer_no))
        conv2 = ops.conv1d(relu3, 2 * options['residual_channels'], name = 'enc_conv1d_2_layer{}'.format(layer_no))
        conv2 = conv2 * source_mask
        return input_ + conv2
github paarthneekhara / byteNet-tensorflow / ByteNet / model.py View on Github external
def encode_layer(self, input_, dilation, layer_no, last_layer = False):
		options = self.options
		relu1 = tf.nn.relu(input_, name = 'enc_relu1_layer{}'.format(layer_no))
		conv1 = ops.conv1d(relu1, options['residual_channels'], name = 'enc_conv1d_1_layer{}'.format(layer_no))
		conv1 = tf.mul(conv1, self.source_masked_d)
		relu2 = tf.nn.relu(conv1, name = 'enc_relu2_layer{}'.format(layer_no))
		dilated_conv = ops.dilated_conv1d(relu2, options['residual_channels'], 
			dilation, options['encoder_filter_width'],
			causal = False, 
			name = "enc_dilated_conv_layer{}".format(layer_no)
			)
		dilated_conv = tf.mul(dilated_conv, self.source_masked_d)
		relu3 = tf.nn.relu(dilated_conv, name = 'enc_relu3_layer{}'.format(layer_no))
		conv2 = ops.conv1d(relu3, 2 * options['residual_channels'], name = 'enc_conv1d_2_layer{}'.format(layer_no))
		return input_ + conv2
github indiejoseph / chinese-char-rnn / models / bytenet.py View on Github external
def encoder(self, input_):
    curr_input = input_
    for layer_no, dilation in enumerate(self.self.encoder_dilations):
      layer_output = self.encode_layer(curr_input, dilation, layer_no)

      # ENCODE ONLY TILL THE INPUT LENGTH, conditioning should be 0 beyond that
      layer_output = tf.mul(layer_output, self.source_masked, name="layer_{}_output".format(layer_no))

      curr_input = layer_output

    # TO BE CONCATENATED WITH TARGET EMBEDDING
    processed_output = tf.nn.relu( ops.conv1d(tf.nn.relu(layer_output),
      self.residual_channels,
      name="encoder_post_processing") )

    processed_output = tf.mul(processed_output, self.source_masked_d, name="encoder_processed")

    return processed_output
github paarthneekhara / convolutional-vqa / Models / text_model.py View on Github external
def encoder(self, input_, train = True):
        options = self.options
        curr_input = input_

        for layer_no, dilation in enumerate(self.options['encoder_dilations']):
            layer_output = self.encode_layer(curr_input, dilation, layer_no, train)
            curr_input = layer_output

        processed_output = tf.nn.relu( ops.conv1d(tf.nn.relu(layer_output), 
            options['residual_channels'], 
            name = 'encoder_post_processing') )
        
        return processed_output
github paarthneekhara / byteNet-tensorflow / ByteNet / model.py View on Github external
def decoder(self, input_, encoder_embedding = None):
		options = self.options
		curr_input = input_
		if encoder_embedding != None:
			# CONDITION WITH ENCODER EMBEDDING FOR THE TRANSLATION MODEL
			curr_input = tf.concat(2, [input_, encoder_embedding])
			print "Decoder Input", curr_input
			

		for layer_no, dilation in enumerate(options['decoder_dilations']):
			layer_output = self.decode_layer(curr_input, dilation, layer_no)
			curr_input = layer_output


		processed_output = ops.conv1d(tf.nn.relu(layer_output), 
			options['n_target_quant'], 
			name = 'decoder_post_processing')

		return processed_output
github paarthneekhara / byteNet-tensorflow / ByteNet / model.py View on Github external
def decode_layer(self, input_, dilation, layer_no):
		options = self.options
		relu1 = tf.nn.relu(input_, name = 'dec_relu1_layer{}'.format(layer_no))
		conv1 = ops.conv1d(relu1, options['residual_channels'], name = 'dec_conv1d_1_layer{}'.format(layer_no))
		

		relu2 = tf.nn.relu(conv1, name = 'enc_relu2_layer{}'.format(layer_no))
		dilated_conv = ops.dilated_conv1d(relu2, options['residual_channels'], 
			dilation, options['decoder_filter_width'],
			causal = True, 
			name = "dec_dilated_conv_laye{}".format(layer_no)
			)
		
		relu3 = tf.nn.relu(dilated_conv, name = 'dec_relu3_layer{}'.format(layer_no))
		conv2 = ops.conv1d(relu3, 2 * options['residual_channels'], name = 'dec_conv1d_2_layer{}'.format(layer_no))
		
		return input_ + conv2
github paarthneekhara / byteNet-tensorflow / ByteNet / model.py View on Github external
def decode_layer(self, input_, dilation, layer_no):
		options = self.options
		relu1 = tf.nn.relu(input_, name = 'dec_relu1_layer{}'.format(layer_no))
		conv1 = ops.conv1d(relu1, options['residual_channels'], name = 'dec_conv1d_1_layer{}'.format(layer_no))
		

		relu2 = tf.nn.relu(conv1, name = 'enc_relu2_layer{}'.format(layer_no))
		dilated_conv = ops.dilated_conv1d(relu2, options['residual_channels'], 
			dilation, options['decoder_filter_width'],
			causal = True, 
			name = "dec_dilated_conv_laye{}".format(layer_no)
			)
		
		relu3 = tf.nn.relu(dilated_conv, name = 'dec_relu3_layer{}'.format(layer_no))
		conv2 = ops.conv1d(relu3, 2 * options['residual_channels'], name = 'dec_conv1d_2_layer{}'.format(layer_no))
		
		return input_ + conv2
github paarthneekhara / byteNet-tensorflow / ByteNet / model.py View on Github external
def encode_layer(self, input_, dilation, layer_no, last_layer = False):
		options = self.options
		relu1 = tf.nn.relu(input_, name = 'enc_relu1_layer{}'.format(layer_no))
		conv1 = ops.conv1d(relu1, options['residual_channels'], name = 'enc_conv1d_1_layer{}'.format(layer_no))
		conv1 = tf.mul(conv1, self.source_masked_d)
		relu2 = tf.nn.relu(conv1, name = 'enc_relu2_layer{}'.format(layer_no))
		dilated_conv = ops.dilated_conv1d(relu2, options['residual_channels'], 
			dilation, options['encoder_filter_width'],
			causal = False, 
			name = "enc_dilated_conv_layer{}".format(layer_no)
			)
		dilated_conv = tf.mul(dilated_conv, self.source_masked_d)
		relu3 = tf.nn.relu(dilated_conv, name = 'enc_relu3_layer{}'.format(layer_no))
		conv2 = ops.conv1d(relu3, 2 * options['residual_channels'], name = 'enc_conv1d_2_layer{}'.format(layer_no))
		return input_ + conv2