How to use the ops.batch_norm function in ops

To help you get started, we’ve selected a few ops examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github dougalsutherland / opt-mmd / gan / model_tmmd.py View on Github external
self.gf_dim = gf_dim
        self.df_dim = df_dim

        self.gfc_dim = gfc_dim
        self.dfc_dim = dfc_dim

        self.c_dim = c_dim

        # batch normalization : deals with poor initialization helps gradient flow
        self.d_bn1 = batch_norm(name='d_bn1')
        self.d_bn2 = batch_norm(name='d_bn2')
        self.d_bn3 = batch_norm(name='d_bn3')

        self.g_bn0 = batch_norm(name='g_bn0')
        self.g_bn1 = batch_norm(name='g_bn1')
        self.g_bn2 = batch_norm(name='g_bn2')
        self.g_bn3 = batch_norm(name='g_bn3')

        self.dataset_name = dataset_name
        self.build_model()
github lilianweng / unified-gan-tensorflow / model.py View on Github external
image_dims = [self.input_height, self.input_width, self.c_dim]

        self.inputs = tf.placeholder(
            tf.float32, [self.batch_size] + image_dims, name='real_images')

        inputs = self.inputs

        ##############################
        # Define batch normalization layers for constructing D and G networks.
        # Batch normalization : deals with poor initialization helps gradient flow
        self.d_bn1 = batch_norm(name='d_bn1')
        self.d_bn2 = batch_norm(name='d_bn2')
        if not self.y_dim:
            self.d_bn3 = batch_norm(name='d_bn3')

        self.g_bn0 = batch_norm(name='g_bn0')
        self.g_bn1 = batch_norm(name='g_bn1')
        self.g_bn2 = batch_norm(name='g_bn2')
        if not self.y_dim:
            self.g_bn3 = batch_norm(name='g_bn3')

        ##############################
        # Define the model structure

        self.z = tf.placeholder(tf.float32, [None, self.z_dim], name='z')
        self.z_sum = tf.summary.histogram("z", self.z)

        self.G = self.generator(self.z, self.y)
        self.sampler = self.sampler(self.z, self.y)
        self.D, self.D_logits = self.discriminator(inputs, self.y, reuse=False)
        self.D_, self.D_logits_ = self.discriminator(self.G, self.y, reuse=True)
github tolstikhin / adagan / vae.py View on Github external
def discriminator(self, opts, input_, is_training,
                      prefix='DISCRIMINATOR', reuse=False):
        """Encoder function, suitable for simple toy experiments.

        """
        num_filters = opts['d_num_filters']

        with tf.variable_scope(prefix, reuse=reuse):
            h0 = ops.conv2d(opts, input_, num_filters / 8, scope='h0_conv')
            h0 = ops.batch_norm(opts, h0, is_training, reuse, scope='bn_layer1')
            h0 = tf.nn.relu(h0)
            h1 = ops.conv2d(opts, h0, num_filters / 4, scope='h1_conv')
            h1 = ops.batch_norm(opts, h1, is_training, reuse, scope='bn_layer2')
            h1 = tf.nn.relu(h1)
            h2 = ops.conv2d(opts, h1, num_filters / 2, scope='h2_conv')
            h2 = ops.batch_norm(opts, h2, is_training, reuse, scope='bn_layer3')
            h2 = tf.nn.relu(h2)
            h3 = ops.conv2d(opts, h2, num_filters, scope='h3_conv')
            h3 = ops.batch_norm(opts, h3, is_training, reuse, scope='bn_layer4')
            h3 = tf.nn.relu(h3)
            # Already has NaNs!!
            latent_mean = ops.linear(opts, h3, opts['latent_space_dim'], scope='h3_lin')
            log_latent_sigmas = ops.linear(opts, h3, opts['latent_space_dim'], scope='h3_lin_sigma')

        return latent_mean, log_latent_sigmas
github tolstikhin / adagan / gan.py View on Github external
height = output_shape[0] / 16
            width = output_shape[1] / 16
            h0 = ops.linear(opts, noise, num_filters * height * width,
                            scope='h0_lin')
            h0 = tf.reshape(h0, [-1, height, width, num_filters])
            h0 = ops.batch_norm(opts, h0, is_training, reuse, scope='bn_layer1')
            h0 = tf.nn.relu(h0)
            _out_shape = [dim1, height * 2, width * 2, num_filters / 2]
            # for 128 x 128 does 8 x 8 --> 16 x 16
            h1 = ops.deconv2d(opts, h0, _out_shape, scope='h1_deconv')
            h1 = ops.batch_norm(opts, h1, is_training, reuse, scope='bn_layer2')
            h1 = tf.nn.relu(h1)
            _out_shape = [dim1, height * 4, width * 4, num_filters / 4]
            # for 128 x 128 does 16 x 16 --> 32 x 32 
            h2 = ops.deconv2d(opts, h1, _out_shape, scope='h2_deconv')
            h2 = ops.batch_norm(opts, h2, is_training, reuse, scope='bn_layer3')
            h2 = tf.nn.relu(h2)
            _out_shape = [dim1, height * 8, width * 8, num_filters / 8]
            # for 128 x 128 does 32 x 32 --> 64 x 64 
            h3 = ops.deconv2d(opts, h2, _out_shape, scope='h3_deconv')
            h3 = ops.batch_norm(opts, h3, is_training, reuse, scope='bn_layer4')
            h3 = tf.nn.relu(h3)
            _out_shape = [dim1, height * 16, width * 16, num_filters / 16]
            # for 128 x 128 does 64 x 64 --> 128 x 128 
            h4 = ops.deconv2d(opts, h3, _out_shape, scope='h4_deconv')
            h4 = ops.batch_norm(opts, h4, is_training, reuse, scope='bn_layer5')
            h4 = tf.nn.relu(h4)
            _out_shape = [dim1] + list(output_shape)
            # data_shape[0] x data_shape[1] x ? -> data_shape
            h5 = ops.deconv2d(opts, h4, _out_shape,
                              d_h=1, d_w=1, scope='h5_deconv')
            h5 = ops.batch_norm(opts, h5, is_training, reuse, scope='bn_layer6')
github tolstikhin / wae / models.py View on Github external
layer_x = ops.deconv2d(
            opts, layer_x, [batch_size, height, width, channels],
            d_h=stride, d_w=stride, scope='h%d_deconv' % i,
            conv_filters_dim=kernel, padding='VALID')
        if opts['batch_norm']:
            layer_x = ops.batch_norm(opts, layer_x, is_training,
                                     reuse, scope='h%d_bn' % i)
        layer_x = ops.lrelu(layer_x, 0.1)
    assert height == data_height
    assert width == data_width

    # Then two 1x1 convolutions.
    layer_x = ops.conv2d(opts, layer_x, num_units / 8, d_h=1, d_w=1,
                         scope='conv2d_1x1', conv_filters_dim=1)
    if opts['batch_norm']:
        layer_x = ops.batch_norm(opts, layer_x,
                                 is_training, reuse, scope='hfinal_bn')
    layer_x = ops.lrelu(layer_x, 0.1)
    layer_x = ops.conv2d(opts, layer_x, data_channels, d_h=1, d_w=1,
                         scope='conv2d_1x1_2', conv_filters_dim=1)
    if opts['input_normalize_sym']:
        return tf.nn.tanh(layer_x), layer_x
    else:
        return tf.nn.sigmoid(layer_x), layer_x
github lilianweng / unified-gan-tensorflow / model.py View on Github external
if self.crop:
            image_dims = [self.output_height, self.output_width, self.c_dim]
        else:
            image_dims = [self.input_height, self.input_width, self.c_dim]

        self.inputs = tf.placeholder(
            tf.float32, [self.batch_size] + image_dims, name='real_images')

        inputs = self.inputs

        ##############################
        # Define batch normalization layers for constructing D and G networks.
        # Batch normalization : deals with poor initialization helps gradient flow
        self.d_bn1 = batch_norm(name='d_bn1')
        self.d_bn2 = batch_norm(name='d_bn2')
        if not self.y_dim:
            self.d_bn3 = batch_norm(name='d_bn3')

        self.g_bn0 = batch_norm(name='g_bn0')
        self.g_bn1 = batch_norm(name='g_bn1')
        self.g_bn2 = batch_norm(name='g_bn2')
        if not self.y_dim:
            self.g_bn3 = batch_norm(name='g_bn3')

        ##############################
        # Define the model structure

        self.z = tf.placeholder(tf.float32, [None, self.z_dim], name='z')
        self.z_sum = tf.summary.histogram("z", self.z)

        self.G = self.generator(self.z, self.y)
github lilianweng / unified-gan-tensorflow / model.py View on Github external
inputs = self.inputs

        ##############################
        # Define batch normalization layers for constructing D and G networks.
        # Batch normalization : deals with poor initialization helps gradient flow
        self.d_bn1 = batch_norm(name='d_bn1')
        self.d_bn2 = batch_norm(name='d_bn2')
        if not self.y_dim:
            self.d_bn3 = batch_norm(name='d_bn3')

        self.g_bn0 = batch_norm(name='g_bn0')
        self.g_bn1 = batch_norm(name='g_bn1')
        self.g_bn2 = batch_norm(name='g_bn2')
        if not self.y_dim:
            self.g_bn3 = batch_norm(name='g_bn3')

        ##############################
        # Define the model structure

        self.z = tf.placeholder(tf.float32, [None, self.z_dim], name='z')
        self.z_sum = tf.summary.histogram("z", self.z)

        self.G = self.generator(self.z, self.y)
        self.sampler = self.sampler(self.z, self.y)
        self.D, self.D_logits = self.discriminator(inputs, self.y, reuse=False)
        self.D_, self.D_logits_ = self.discriminator(self.G, self.y, reuse=True)

        self.d_sum = tf.summary.histogram("d", self.D)
        self.d__sum = tf.summary.histogram("d_", self.D_)
        self.G_sum = tf.summary.image("G", self.G, max_outputs=4)
        self.inputs_sum = tf.summary.image("inputs", self.inputs, max_outputs=4)
github dougalsutherland / opt-mmd / gan / model_mmd.py View on Github external
self.output_size = output_size
        self.sample_dir = sample_dir
        self.log_dir=log_dir
        self.checkpoint_dir = checkpoint_dir
        self.z_dim = z_dim

        self.gf_dim = gf_dim
        self.df_dim = df_dim

        self.gfc_dim = gfc_dim
        self.dfc_dim = dfc_dim

        self.c_dim = c_dim

        # batch normalization : deals with poor initialization helps gradient flow
        self.d_bn1 = batch_norm(name='d_bn1')
        self.d_bn2 = batch_norm(name='d_bn2')
        self.d_bn3 = batch_norm(name='d_bn3')

        self.g_bn0 = batch_norm(name='g_bn0')
        self.g_bn1 = batch_norm(name='g_bn1')
        self.g_bn2 = batch_norm(name='g_bn2')
        self.g_bn3 = batch_norm(name='g_bn3')

        self.dataset_name = dataset_name
        self.build_model()
github tolstikhin / adagan / pot.py View on Github external
""" Decoder actually.

        """

        output_shape = self._data.data_shape
        num_units = opts['g_num_filters']

        with tf.variable_scope("GENERATOR", reuse=reuse):
            # if not opts['convolutions']:
            if opts['g_arch'] == 'mlp':
                layer_x = noise
                for i in range(opts['g_num_layers']):
                    layer_x = ops.linear(opts, layer_x, num_units, 'h%d_lin' % i)
                    layer_x = tf.nn.relu(layer_x)
                    if opts['batch_norm']:
                        layer_x = ops.batch_norm(
                            opts, layer_x, is_training, reuse, scope='bn%d' % i)
                out = ops.linear(opts, layer_x, np.prod(output_shape), 'h%d_lin' % (i + 1))
                out = tf.reshape(out, [-1] + list(output_shape))
                if opts['input_normalize_sym']:
                    return tf.nn.tanh(out)
                else:
                    return tf.nn.sigmoid(out)
            elif opts['g_arch'] in ['dcgan', 'dcgan_mod']:
                return self.dcgan_like_arch(opts, noise, is_training, reuse, keep_prob)
            elif opts['g_arch'] == 'conv_up_res':
                return self.conv_up_res(opts, noise, is_training, reuse, keep_prob)
            elif opts['g_arch'] == 'ali':
                return self.ali_deconv(opts, noise, is_training, reuse, keep_prob)
            elif opts['g_arch'] == 'began':
                return self.began_dec(opts, noise, is_training, reuse, keep_prob)
            else:
github pshevche / drl-frameworks / park / envs / tf_placement / models / inception / small_cnn.py View on Github external
N = len(devices)
            if device_placement == 'random':
                dev_id = random.randint(0, N-1)
            elif device_placement == 'alternate':
                dev_id = i% N
            else:
                # device_placement == 'expert' or otherwise
                dev_id = 0
            return dev_id

        print('BS: '+ str(bs))
        import pdb; pdb.set_trace()
        if inputs is None:
            inputs = tf.ones((bs, 299, 299, 3))

        with scopes.arg_scope([ops.conv2d, ops.fc, ops.batch_norm, ops.dropout],
                            is_training=trainable):
            with tf.device(devices[get_dev_id(0)]) if devices else ExitStack() as gs:
                with scopes.arg_scope([ops.conv2d, ops.max_pool, ops.avg_pool],
                                        stride=1, padding='VALID'):
                    # 299 x 299 x 3
                    ret = ops.conv2d(inputs, num_filters, [3, 3], stride=2, scope='conv0')

        return tf.get_default_graph(), ret