How to use the dnn.DropoutNet function in dnn

To help you get started, we’ve selected a few dnn examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github syhw / DL4H / dnn.py View on Github external
def new_dnn(dropout=False):
            if dropout:
                print("Dropout DNN")
                return DropoutNet(numpy_rng=numpy_rng, n_ins=n_features,
                    #layers_types=[ReLU, ReLU, ReLU, LogisticRegression],
                    #layers_sizes=[1000, 1000, 1000],
                    #dropout_rates=[0., 0.5, 0.5, 0.5],
                    layers_types=[ReLU, ReLU, ReLU, ReLU, LogisticRegression],
                    layers_sizes=[2000, 2000, 2000, 2000],
                    dropout_rates=[0.2, 0.5, 0.5, 0.5, 0.5],
                    n_outs=n_outs,
                    debugprint=0)
            else:
                print("Simple (regularized) DNN")
                return RegularizedNet(numpy_rng=numpy_rng, n_ins=n_features,
                    #layers_types=[LogisticRegression],
                    #layers_sizes=[],
                    #layers_types=[SVM],
                    #layers_sizes=[],
                    #layers_types=[BatchNormalizer, ReLU, BatchNormalizer, ReLU, BatchNormalizer, ReLU, BatchNormalizer, LogisticRegression],
github syhw / DL4H / dnn.py View on Github external
def __init__(self, numpy_rng, theano_rng=None,
                 n_ins=40*3,
                 layers_types=[ReLU, ReLU, ReLU, ReLU, LogisticRegression],
                 layers_sizes=[4000, 4000, 4000, 4000],
                 dropout_rates=[0.2, 0.5, 0.5, 0.5, 0.5],
                 n_outs=62 * 3,
                 rho=0.98, eps=1.E-6,
                 debugprint=False):
        """
        A dropout-regularized neural net.
        """
        super(DropoutNet, self).__init__(numpy_rng, theano_rng, n_ins,
                layers_types, layers_sizes, n_outs, rho, eps, debugprint)

        self.dropout_rates = dropout_rates
        dropout_layer_input = dropout(numpy_rng, self.x, p=dropout_rates[0])
        self.dropout_layers = []

        for layer, layer_type, n_in, n_out, dr in zip(self.layers,
                layers_types, self.layers_ins, self.layers_outs,
                dropout_rates[1:] + [0]):  # !!! we do not dropout anything
                                           # from the last layer !!!
            if dr:
                this_layer = layer_type(rng=numpy_rng,
                        input=dropout_layer_input, n_in=n_in, n_out=n_out,
                        W=layer.W * 1. / (1. - dr),
                        b=layer.b * 1. / (1. - dr))
                # N.B. dropout with dr==1 does not dropanything!!
github syhw / DL4H / dnn.py View on Github external
#ax4.plot(dnn._updates, label=method) TODO
            ax4.plot([test_error for _ in range(10)], label=method)
        ax1.set_xlabel('epoch')
        ax1.set_ylabel('cost (log10)')
        ax2.set_xlabel('epoch')
        ax2.set_ylabel('train error')
        ax3.set_xlabel('epoch')
        ax3.set_ylabel('dev error')
        ax4.set_ylabel('test error')
        plt.legend()
        plt.tight_layout()
        plt.savefig('training_' + name + '.png')


if __name__ == "__main__":
    add_fit_and_score(DropoutNet)
    add_fit_and_score(RegularizedNet)

    def nudge_dataset(X, Y):
        """
        This produces a dataset 5 times bigger than the original one,
        by moving the 8x8 images in X around by 1px to left, right, down, up
        """
        from scipy.ndimage import convolve
        direction_vectors = [
            [[0, 1, 0],
             [0, 0, 0],
             [0, 0, 0]],
            [[0, 0, 0],
             [1, 0, 0],
             [0, 0, 0]],
            [[0, 0, 0],
github syhw / DL4H / dnn.py View on Github external
def __repr__(self):
        return super(DropoutNet, self).__repr__() + "\n"\
                + "dropout rates: " + str(self.dropout_rates)