How to use the dnn.layers.FullyConnectedLayer function in dnn

To help you get started, we’ve selected a few dnn examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github satopirka / deep-learning-theano / example / mlp.py View on Github external
layer_input = dropout(self.rng, self.x, self.train, p=0.1)
                activation=relu
            elif i != self.n_layer:
                layer_n_input = self.n_hidden[i-1]
                layer_n_output = self.n_hidden[i]
                layer_input = dropout(self.rng, self.layers[-1].output, self.train)
                activation=relu
            else:
                """for output layer"""
                layer_n_input = self.n_hidden[-1]
                layer_n_output = self.n_output
                layer_input = self.layers[-1].output
                activation=None 


            layer = FullyConnectedLayer(
                self.rng,
                input=layer_input,
                n_input=layer_n_input,
                n_output=layer_n_output,
                activation=activation
            )
            self.layers.append(layer)
            self.params.extend(layer.params)

        """regularization"""
        # self.L1 = abs(self.h1.W).sum() + abs(self.pred_y.W).sum()
        # self.L2 = abs(self.h1.W**2).sum() + abs(self.pred_y.W**2).sum()

        """loss accuracy error"""
        self.metric = Metric(self.layers[-1].output, self.y)
        self.loss = self.metric.negative_log_likelihood()# + L1_reg*self.L1 + L2_reg*self.L2
github satopirka / deep-learning-theano / example / sa.py View on Github external
def __init__(self, rng, input=None, n_visible=784, 
        n_hidden=784, sparse_reg=1e-3, optimizer=Adam, W=None, b=None):
        self.rng = rng

        """symbol definition"""
        self.index = T.lscalar()
        self.s_level = T.fscalar()
        if input == None:
            self.x = T.matrix('x')
        else:
            self.x = input

        """network structure definition"""
        """encoder"""
        self.h = FullyConnectedLayer(
            self.rng,
            input=self.x,
            n_input=n_visible,
            n_output=n_hidden,
            activation=sigmoid,
            W=W,
            b=b
        )
        """decoder"""
        self.y = FullyConnectedLayer(
            self.rng,
            input=self.h.output,
            n_input=n_hidden,
            n_output=n_visible,
            activation=sigmoid
        )
github satopirka / deep-learning-theano / example / sa.py View on Github external
else:
            self.x = input

        """network structure definition"""
        """encoder"""
        self.h = FullyConnectedLayer(
            self.rng,
            input=self.x,
            n_input=n_visible,
            n_output=n_hidden,
            activation=sigmoid,
            W=W,
            b=b
        )
        """decoder"""
        self.y = FullyConnectedLayer(
            self.rng,
            input=self.h.output,
            n_input=n_hidden,
            n_output=n_visible,
            activation=sigmoid
        )

        """loss accuracy error"""
        self.metric = Metric(self.y.output, self.x)
        sparsity_penalty = self.sparsity_penalty(self.h.output, sparsity_level=self.s_level, sparse_reg=sparse_reg, n_units=n_hidden)
        self.loss = self.metric.mean_squared_error() + sparsity_penalty

        """parameters (i.e., weights and biases) for whole networks"""
        self.params = self.h.params + self.y.params

        """optimizer for learning parameters"""
github satopirka / deep-learning-theano / example / cnn.py View on Github external
layer4_input = relu(layer3.output)
        layer4_input = MaxPooling2DLayer(layer4_input, poolsize=(2, 2)).output
        layer4_input = layer4_input.reshape((self.batchsize, 50*4*4))

        layer4 = FullyConnectedLayer(
            self.rng,
            dropout(self.rng, layer4_input, self.train),
            n_input=50*4*4,
            n_output=500
        )

        layer5_input = layer4.output

        layer5 = BatchNormalizationLayer(layer5_input, shape=(self.batchsize, 500))
        layer6_input = relu(layer5.output)
        layer6 = FullyConnectedLayer(
            self.rng,
            layer6_input,
            n_input=500,
            n_output=n_output
        )

        self.metric = Metric(layer6.output, self.y)
        self.loss = self.metric.negative_log_likelihood()
        self.accuracy = self.metric.accuracy()
        params = []
        params.extend(layer6.params)
        params.extend(layer5.params)
        params.extend(layer4.params)
        params.extend(layer3.params)
        params.extend(layer2.params)
        params.extend(layer1.params)
github satopirka / deep-learning-theano / example / cnn.py View on Github external
layer2_input = MaxPooling2DLayer(layer2_input, poolsize=(2, 2)).output

        layer2 = Convolutional2DLayer(
            self.rng,
            layer2_input,
            filter_shape=(50, 20, 5, 5),
            image_shape=(self.batchsize, 20, 12, 12)
        )

        layer3 = BatchNormalizationLayer(layer2.output, shape=(self.batchsize, 50, 8, 8))

        layer4_input = relu(layer3.output)
        layer4_input = MaxPooling2DLayer(layer4_input, poolsize=(2, 2)).output
        layer4_input = layer4_input.reshape((self.batchsize, 50*4*4))

        layer4 = FullyConnectedLayer(
            self.rng,
            dropout(self.rng, layer4_input, self.train),
            n_input=50*4*4,
            n_output=500
        )

        layer5_input = layer4.output

        layer5 = BatchNormalizationLayer(layer5_input, shape=(self.batchsize, 500))
        layer6_input = relu(layer5.output)
        layer6 = FullyConnectedLayer(
            self.rng,
            layer6_input,
            n_input=500,
            n_output=n_output
        )
github satopirka / deep-learning-theano / example / da.py View on Github external
self.rng = rng

        """symbol definition"""
        self.index = T.lscalar()
        if input == None:
            self.x = T.matrix('x')
        else:
            self.x = input
        if train == None:
            self.train = T.iscalar('train')
        else:
            self.train = train

        """network structure definition"""
        """encoder"""
        self.h = FullyConnectedLayer(
            self.rng,
            input=get_corrupted_input(
                self.rng,
                self.x,
                self.train,
                corruption_level=corruption_level
            ),
            n_input=n_visible,
            n_output=n_hidden,
            activation=relu,
            W=W,
            b=b
        )
        """decoder"""
        self.y = FullyConnectedLayer(
            self.rng,
github satopirka / deep-learning-theano / example / da.py View on Github external
self.h = FullyConnectedLayer(
            self.rng,
            input=get_corrupted_input(
                self.rng,
                self.x,
                self.train,
                corruption_level=corruption_level
            ),
            n_input=n_visible,
            n_output=n_hidden,
            activation=relu,
            W=W,
            b=b
        )
        """decoder"""
        self.y = FullyConnectedLayer(
            self.rng,
            input=self.h.output,
            n_input=n_hidden,
            n_output=n_visible,
            activation=sigmoid
        )

        """loss accuracy error"""
        self.metric = Metric(self.y.output, self.x)
        self.loss = self.metric.mean_squared_error()

        """parameters (i.e., weights and biases) for whole networks"""
        self.params = self.h.params + self.y.params

        """optimizer for learning parameters"""
        self.optimizer = optimizer(params=self.params)