How to use the dnn.layers.BatchNormalizationLayer function in dnn

To help you get started, we’ve selected a few dnn examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github satopirka / deep-learning-theano / example / cnn.py View on Github external
self.index = T.lscalar()
        self.x = T.tensor4('x')
        self.y = T.ivector('y')
        self.train = T.iscalar('train')

        # layer0_input = self.x.reshape((self.batchsize, 1, 28, 28))
        layer0_input = self.x

        layer0 = Convolutional2DLayer(
            self.rng,
            layer0_input,
            filter_shape=(20, 1, 5, 5),
            image_shape=(self.batchsize, 1, 28, 28)
        )

        layer1 = BatchNormalizationLayer(layer0.output, shape=(self.batchsize, 20, 24, 24))

        layer2_input = layer1.output.reshape((self.batchsize, 20, 24, 24))
        layer2_input = relu(layer2_input)
        layer2_input = MaxPooling2DLayer(layer2_input, poolsize=(2, 2)).output

        layer2 = Convolutional2DLayer(
            self.rng,
            layer2_input,
            filter_shape=(50, 20, 5, 5),
            image_shape=(self.batchsize, 20, 12, 12)
        )

        layer3 = BatchNormalizationLayer(layer2.output, shape=(self.batchsize, 50, 8, 8))

        layer4_input = relu(layer3.output)
        layer4_input = MaxPooling2DLayer(layer4_input, poolsize=(2, 2)).output
github satopirka / deep-learning-theano / example / cnn.py View on Github external
layer3 = BatchNormalizationLayer(layer2.output, shape=(self.batchsize, 50, 8, 8))

        layer4_input = relu(layer3.output)
        layer4_input = MaxPooling2DLayer(layer4_input, poolsize=(2, 2)).output
        layer4_input = layer4_input.reshape((self.batchsize, 50*4*4))

        layer4 = FullyConnectedLayer(
            self.rng,
            dropout(self.rng, layer4_input, self.train),
            n_input=50*4*4,
            n_output=500
        )

        layer5_input = layer4.output

        layer5 = BatchNormalizationLayer(layer5_input, shape=(self.batchsize, 500))
        layer6_input = relu(layer5.output)
        layer6 = FullyConnectedLayer(
            self.rng,
            layer6_input,
            n_input=500,
            n_output=n_output
        )

        self.metric = Metric(layer6.output, self.y)
        self.loss = self.metric.negative_log_likelihood()
        self.accuracy = self.metric.accuracy()
        params = []
        params.extend(layer6.params)
        params.extend(layer5.params)
        params.extend(layer4.params)
        params.extend(layer3.params)
github satopirka / deep-learning-theano / example / cnn.py View on Github external
)

        layer1 = BatchNormalizationLayer(layer0.output, shape=(self.batchsize, 20, 24, 24))

        layer2_input = layer1.output.reshape((self.batchsize, 20, 24, 24))
        layer2_input = relu(layer2_input)
        layer2_input = MaxPooling2DLayer(layer2_input, poolsize=(2, 2)).output

        layer2 = Convolutional2DLayer(
            self.rng,
            layer2_input,
            filter_shape=(50, 20, 5, 5),
            image_shape=(self.batchsize, 20, 12, 12)
        )

        layer3 = BatchNormalizationLayer(layer2.output, shape=(self.batchsize, 50, 8, 8))

        layer4_input = relu(layer3.output)
        layer4_input = MaxPooling2DLayer(layer4_input, poolsize=(2, 2)).output
        layer4_input = layer4_input.reshape((self.batchsize, 50*4*4))

        layer4 = FullyConnectedLayer(
            self.rng,
            dropout(self.rng, layer4_input, self.train),
            n_input=50*4*4,
            n_output=500
        )

        layer5_input = layer4.output

        layer5 = BatchNormalizationLayer(layer5_input, shape=(self.batchsize, 500))
        layer6_input = relu(layer5.output)