How to use the chainer.links.Linear function in chainer

To help you get started, we’ve selected a few chainer examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github chainer / chainer / tests / chainer_tests / link_hooks_tests / test_spectral_normalization.py View on Github external
def setUp(self):
        self.bs, self.in_size, self.out_size = 10, 20, 30
        self.x = numpy.random.normal(
            size=(self.bs, self.in_size)).astype(numpy.float32)
        self.layer = L.Linear(self.out_size)  # Lazy initialization
        in_size = None if self.lazy_init else self.in_size
        self.layer = L.Linear(in_size, self.out_size)
        self.hook = SpectralNormalization(use_gamma=self.use_gamma)
github Kaixhin / malmo-challenge / malmopy / model / chainer / qlearning.py View on Github external
def _build_model(self):
        initializer = HeUniform()
        in_shape = self.input_shape[0]

        return [L.Convolution2D(in_shape, 64, ksize=4, stride=2,
                                initialW=initializer),
                L.Convolution2D(64, 64, ksize=3, stride=1,
                                initialW=initializer),
                L.Linear(None, 512, initialW=HeUniform(0.1)),
                L.Linear(512, self.output_shape, initialW=HeUniform(0.1))]
github chainer / chainer / examples / mnist / net.py View on Github external
def __init__(self, n_in, n_units, n_out):
        super(MnistMLP, self).__init__(
            l1=L.Linear(n_in, n_units),
            l2=L.Linear(n_units, n_units),
            l3=L.Linear(n_units, n_out),
        )
github minerllabs / baselines / general / chainerrl / baselines / q_functions.py View on Github external
def __init__(self, n_input_channels=4, n_output_channels=512, activation=F.relu, bias=0.1):
        self.n_input_channels = n_input_channels
        self.activation = activation
        self.n_output_channels = n_output_channels

        layers = [
            L.Convolution2D(n_input_channels, 32, 8, stride=4, initial_bias=bias),
            L.Convolution2D(32, 64, 4, stride=2, initial_bias=bias),
            L.Convolution2D(64, 64, 3, stride=1, initial_bias=bias),
            # the shape of resulting future map is 4x4x64=1024 when the first conv input's size is 64x64.
            # NOTE: we use `None` as `in_size` to defer the parameter initialization for usability,
            # but the size should be checked with concrete input_size (for example, 1024.)
            # L.Linear(3136, n_output_channels, initial_bias=bias),
            L.Linear(None, n_output_channels, initial_bias=bias),
        ]

        super(NatureDQNHead, self).__init__(*layers)
github chainer / chainerrl / chainerrl / q_functions / state_action_q_functions.py View on Github external
def __init__(self, n_dim_obs, n_dim_action, n_hidden_channels,
                 n_hidden_layers, nonlinearity=F.relu, last_wscale=1.):
        self.n_input_channels = n_dim_obs + n_dim_action
        self.n_hidden_layers = n_hidden_layers
        self.n_hidden_channels = n_hidden_channels
        self.nonlinearity = nonlinearity
        super().__init__()
        with self.init_scope():
            self.fc = MLP(self.n_input_channels, n_hidden_channels,
                          [self.n_hidden_channels] * self.n_hidden_layers,
                          nonlinearity=nonlinearity,
                          )
            self.lstm = L.LSTM(n_hidden_channels, n_hidden_channels)
            self.out = L.Linear(n_hidden_channels, 1,
                                initialW=LeCunNormal(last_wscale))
github sharpstill / AU_R-CNN / lstm_end_to_end / model / roi_space_time_net / space_time_rnn.py View on Github external
def __init__(self, n_layers, insize, outsize, initialW=None, use_bi_lstm=False):
        super(TemporalRNN, self).__init__()
        if not initialW:
            initialW = initializers.HeNormal()
        self.n_layer = n_layers
        self.insize=  insize
        with self.init_scope():
            if use_bi_lstm:
                self.lstm = L.NStepBiLSTM(self.n_layer, 1024, 256, dropout=0.1) #dropout = 0.0
            else:
                self.lstm = L.NStepLSTM(self.n_layer, 1024, 512,  dropout=0.1)
            self.fc1 = L.Linear(insize, 1024, initialW=initialW)
            self.fc2 = L.Linear(1024, 1024, initialW=initialW)
            self.fc3 = L.Linear(512, outsize, initialW=initialW)
github odashi / chainer_examples / chainer-1.5 / mt_s2s_attention.py View on Github external
def __init__(self, hidden_size):
    super(Attention, self).__init__(
        aw = links.Linear(hidden_size, hidden_size),
        bw = links.Linear(hidden_size, hidden_size),
        pw = links.Linear(hidden_size, hidden_size),
        we = links.Linear(hidden_size, 1),
        ts = links.Linear(hidden_size, hidden_size),
        sp = links.Linear(hidden_size, 1),
    )
    self.hidden_size = hidden_size
github fukuta0614 / chainer-image-generation / VAE-GAN / vaegan.py View on Github external
def __init__(self, density=1, size=64, latent_size=128, channel=3):
        assert (size % 16 == 0)
        initial_size = size / 16
        super(Generator, self).__init__(
            g1=L.Linear(latent_size, initial_size * initial_size * 256 * density, wscale=0.02 * math.sqrt(latent_size)),
            norm1=L.BatchNormalization(initial_size * initial_size * 256 * density),
            g2=L.Deconvolution2D(256 * density, 128 * density, 4, stride=2, pad=1,
                                 wscale=0.02 * math.sqrt(4 * 4 * 256 * density)),
            norm2=L.BatchNormalization(128 * density),
            g3=L.Deconvolution2D(128 * density, 64 * density, 4, stride=2, pad=1,
                                 wscale=0.02 * math.sqrt(4 * 4 * 128 * density)),
            norm3=L.BatchNormalization(64 * density),
            g4=L.Deconvolution2D(64 * density, 32 * density, 4, stride=2, pad=1,
                                 wscale=0.02 * math.sqrt(4 * 4 * 64 * density)),
            norm4=L.BatchNormalization(32 * density),
            g5=L.Deconvolution2D(32 * density, channel, 4, stride=2, pad=1,
                                 wscale=0.02 * math.sqrt(4 * 4 * 32 * density)),
        )
        self.density = density
        self.latent_size = latent_size
        self.initial_size = initial_size
github DwangoMediaVillage / chainer_spiral / chainer_spiral / models / spiral.py View on Github external
def __init__(self, imsize, conditional):
        self.imsize = imsize
        self.conditional = conditional
        super().__init__()
        with self.init_scope():
            in_channel = 2 if self.conditional else 1
            self.l1 = L.Convolution2D(in_channel, 3, stride=1, ksize=2)
            self.l2 = L.Linear(12, 5)
            self.l3 = L.Linear(5, 1)
github quolc / neural-collage / gen_models / resnet_256_auxab.py View on Github external
self.initial_fast_alpha = initial_fast_alpha
        self.limit_fast_alpha = limit_fast_alpha
        self.step_fast_alpha = step_fast_alpha
        self.fast_loss = None

        with self.init_scope():
            # parameters to be slow-updated
            self.lA1 = L.Linear(dim_z, dim_a, initialW=initializer)
            self.lA2 = L.Linear(dim_a, dim_zeta, initialW=initializer)
            self.lB1 = L.Linear(dim_zeta, dim_b, initialW=initializer)
            self.lB2 = L.Linear(dim_b, dim_z, initialW=initializer)
            self.preluW = L.Parameter(np.ones((dim_z,), dtype=np.float32) * 0.25)
            self.preluMiddleW = L.Parameter(np.ones((dim_zeta,), dtype=np.float32) * 0.25)

            # inherited from ResNetGenerator
            self.l1 = L.Linear(dim_z, (bottom_width ** 2) * ch * 16, initialW=initializer)
            self.block2 = Block(ch * 16, ch * 16, activation=activation, upsample=True, n_classes=n_classes)
            self.block3 = Block(ch * 16, ch * 8, activation=activation, upsample=True, n_classes=n_classes)
            self.block4 = Block(ch * 8, ch * 8, activation=activation, upsample=True, n_classes=n_classes)
            self.block5 = Block(ch * 8, ch * 4, activation=activation, upsample=True, n_classes=n_classes)
            self.block6 = Block(ch * 4, ch * 2, activation=activation, upsample=True, n_classes=n_classes)
            self.block7 = Block(ch * 2, ch, activation=activation, upsample=True, n_classes=n_classes)
            self.b8 = L.BatchNormalization(ch)
            self.l8 = L.Convolution2D(ch, 3, ksize=3, stride=1, pad=1, initialW=initializer)

            if self.learned_lr:
                self._fast_alpha = chainer.links.Parameter(self.xp.ones((dim_zeta,), dtype=self.xp.float32) * initial_fast_alpha)
            else:
                self._fast_alpha = initial_fast_alpha