How to use the dnn.RegularizedNet function in dnn

To help you get started, we’ve selected a few dnn examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github syhw / DL4H / dnn.py View on Github external
def new_dnn(dropout=False):
            if dropout:
                print("Dropout DNN")
                return DropoutNet(numpy_rng=numpy_rng, n_ins=n_features,
                    #layers_types=[ReLU, ReLU, ReLU, LogisticRegression],
                    #layers_sizes=[1000, 1000, 1000],
                    #dropout_rates=[0., 0.5, 0.5, 0.5],
                    layers_types=[ReLU, ReLU, ReLU, ReLU, LogisticRegression],
                    layers_sizes=[2000, 2000, 2000, 2000],
                    dropout_rates=[0.2, 0.5, 0.5, 0.5, 0.5],
                    n_outs=n_outs,
                    debugprint=0)
            else:
                print("Simple (regularized) DNN")
                return RegularizedNet(numpy_rng=numpy_rng, n_ins=n_features,
                    #layers_types=[LogisticRegression],
                    #layers_sizes=[],
                    #layers_types=[SVM],
                    #layers_sizes=[],
                    #layers_types=[BatchNormalizer, ReLU, BatchNormalizer, ReLU, BatchNormalizer, ReLU, BatchNormalizer, LogisticRegression],
                    #layers_sizes=[784, 1000, 1000, 1000, 1000, 1000, 1000],
                    layers_types=[BatchNormalizer, ReLU, BatchNormalizer, LogisticRegression],
                    layers_sizes=[784, 1000, 1000],
                    #layers_types=[ReLU, LogisticRegression],
                    #layers_sizes=[1000],
                    #layers_types=[ReLU, LogisticRegression],
                    #layers_sizes=[200],
                    n_outs=n_outs,
                    L1_reg=0.,
                    L2_reg=L2_LAMBDA,
                    debugprint=1)
github syhw / DL4H / dnn.py View on Github external
def __init__(self, numpy_rng, theano_rng=None,
                 n_ins=100,
                 layers_types=[ReLU, ReLU, ReLU, LogisticRegression],
                 layers_sizes=[1024, 1024, 1024],
                 n_outs=2,
                 rho=0.95, eps=1.E-6,
                 L1_reg=0.1,
                 L2_reg=0.1,
                 debugprint=False):
        """
        A deep neural net with possible L1 and/or L2 regularization.
        """
        super(RegularizedNet, self).__init__(numpy_rng, theano_rng, n_ins,
                layers_types, layers_sizes, n_outs, rho, eps, debugprint)

        self.L1_reg = L1_reg
        self.L2_reg = L2_reg
        L1 = shared(0.)
        for param in self.params:
            L1 += T.sum(abs(param))
        if L1_reg > 0.:
            self.cost = self.cost + L1_reg * L1
        L2 = shared(0.)
        for param in self.params:
            L2 += T.sum(param ** 2)
        if L2_reg > 0.:
            self.cost = self.cost + L2_reg * L2
github syhw / DL4H / dnn.py View on Github external
ax4.plot([test_error for _ in range(10)], label=method)
        ax1.set_xlabel('epoch')
        ax1.set_ylabel('cost (log10)')
        ax2.set_xlabel('epoch')
        ax2.set_ylabel('train error')
        ax3.set_xlabel('epoch')
        ax3.set_ylabel('dev error')
        ax4.set_ylabel('test error')
        plt.legend()
        plt.tight_layout()
        plt.savefig('training_' + name + '.png')


if __name__ == "__main__":
    add_fit_and_score(DropoutNet)
    add_fit_and_score(RegularizedNet)

    def nudge_dataset(X, Y):
        """
        This produces a dataset 5 times bigger than the original one,
        by moving the 8x8 images in X around by 1px to left, right, down, up
        """
        from scipy.ndimage import convolve
        direction_vectors = [
            [[0, 1, 0],
             [0, 0, 0],
             [0, 0, 0]],
            [[0, 0, 0],
             [1, 0, 0],
             [0, 0, 0]],
            [[0, 0, 0],
             [0, 0, 1],