How to use the chainer.links function in chainer

To help you get started, we’ve selected a few chainer examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github DwangoMediaVillage / Comicolorization / comicolorization / models / discriminator.py View on Github external
def __init__(self, size, first_pooling_size=1):
        last_size = size // (2 ** 4) // first_pooling_size

        super(Discriminator, self).__init__(
            c0=chainer.links.Convolution2D(3, 64, 4, stride=2, pad=1),
            c1=chainer.links.Convolution2D(64, 128, 4, stride=2, pad=1),
            c2=chainer.links.Convolution2D(128, 256, 4, stride=2, pad=1),
            c3=chainer.links.Convolution2D(256, 512, 4, stride=2, pad=1),
            bn0=chainer.links.BatchNormalization(64),
            bn1=chainer.links.BatchNormalization(128),
            bn2=chainer.links.BatchNormalization(256),
            bn3=chainer.links.BatchNormalization(512),
            l0z=chainer.functions.Linear(last_size ** 2 * 512, 1, wscale=0.02 * math.sqrt(last_size ** 2 * 512)),
        )

        if first_pooling_size > 1:
            self.first_pooling = chainer.functions.AveragePooling2D(first_pooling_size, stride=first_pooling_size)
        else:
            self.first_pooling = lambda x: x  # through pass
github vzhong / chainer2pytorch / tests / test_nn.py View on Github external
def test_forward(self):
        x = np.random.uniform(0, 1, [self.batch_size, self.d_in]).astype(np.float32)
        c = L.Linear(self.d_in, self.d_hid, bias=np.random.uniform(0, 1, [self.d_hid]))
        t = nn.Linear.from_chainer(c)

        tx = Variable(torch.from_numpy(x))

        co = c(x)
        to = t(tx)
        self.assertTrue(np.allclose(co.data, to.data.numpy()), 'co:\n{}\nto:\n{}'.format(co.data, to.data.numpy()))
github lazykyama / chainer_profutil / tests / test_profiled_optimizer.py View on Github external
def __init__(self):
                super(InvalidNetwork, self).__init__()
                with self.init_scope():
                    self.l1 = L.Linear(None, 100)
                    self.l2 = L.Linear(None, 10)
github nknytk / ml-study / image-classification / inception_resnet_v2.py View on Github external
def __init__(self, *args, **kwargs):
        oc = kwargs['out_channels'] if 'out_channels' in kwargs else args[1]
        super().__init__(
            conv=L.Convolution2D(*args, **kwargs),
            bn=L.BatchNormalization(oc)
        )
github dhgrs / chainer-ClariNet / AutoregressiveWaveNet / WaveNet / modules.py View on Github external
def __init__(self, n_loop, n_layer, filter_size,
                 residual_channels, dilated_channels, skip_channels,

                 # arguments for output
                 output_dim, quantize, log_scale_min,

                 # arguments for conditioning
                 condition_dim,

                 # arguments for dropout
                 dropout_zero_rate):
        super(WaveNet, self).__init__()
        with self.init_scope():
            self.embed = L.Convolution2D(
                1, residual_channels, (2, 1), pad=(1, 0))

            self.resnet = ResidualNet(
                n_loop, n_layer, filter_size,
                residual_channels, dilated_channels, skip_channels,
                condition_dim, dropout_zero_rate)

            self.proj1 = L.Convolution2D(skip_channels, skip_channels, 1)
            self.proj2 = L.Convolution2D(skip_channels, output_dim, 1)
            self.output_dim = output_dim

        self.input_dim = 1
        self.quantize = quantize
        self.skip_channels = skip_channels
        self.log_scale_min = log_scale_min
github akitotakeki / pgp-chainer / models / densenet_dconv.py View on Github external
def __init__(self, in_ch, out_ch, pool=True):
        super(TransitionLayer, self).__init__()
        self.pool = pool
        with self.init_scope():
            self.bn = L.BatchNormalization(in_ch)
            self.conv = L.Convolution2D(in_ch, out_ch, 1, 1, 0)
github odashi / chainer_examples / chainer-1.5 / attention_lm.py View on Github external
def __init__(self, embed_size, hidden_size):
        super(LSTMEncoder, self).__init__(
            lstm = links.LSTM(embed_size, hidden_size),
        )
github Hakuyume / chainer-fpn / fpn / fpn.py View on Github external
def __init__(self, base, n_base_output, scales):
        super().__init__()
        with self.init_scope():
            self.base = base
            self.inner = chainer.ChainList()
            self.outer = chainer.ChainList()

        init = {'initialW': initializers.GlorotNormal()}
        for _ in range(n_base_output):
            self.inner.append(L.Convolution2D(256, 1, **init))
            self.outer.append(L.Convolution2D(256, 3, pad=1, **init))

        self.scales = scales
github hvy / chainer-gan-improvements / models.py View on Github external
def __init__(self, in_shape):
        super(Discriminator, self).__init__(
            c0=L.Convolution2D(1, 32, 4, stride=2, pad=1),
            c1=L.Convolution2D(32, 64, 4, stride=2, pad=1),
            c2=L.Convolution2D(64, 128, 4, stride=2, pad=1),
            c3=L.Convolution2D(128, 256, 4, stride=2, pad=1),
            fc4=L.Linear(lindim(in_shape, 2**4, 256), 512),
            mbd=MinibatchDiscrimination(512, 32, 8),
            fc5=L.Linear(512, 512+32),  # Alternative to minibatch discrimination
            fc6=L.Linear(512+32, 2),
            bn1=L.BatchNormalization(64),
            bn2=L.BatchNormalization(128),
            bn3=L.BatchNormalization(256)
        )
github chainer / chainerrl / chainerrl / links / noisy_linear.py View on Github external
def __init__(self, mu_link, sigma_scale=0.4):
        super(FactorizedNoisyLinear, self).__init__()
        self.out_size = mu_link.out_size
        self.nobias = not ('/b' in [name for name, _ in mu_link.namedparams()])

        W_data = mu_link.W.array
        in_size = None if W_data is None else W_data.shape[1]
        device_id = mu_link._device_id

        with self.init_scope():
            self.mu = L.Linear(in_size, self.out_size, self.nobias,
                               initialW=LeCunUniform(1 / numpy.sqrt(3)))

            self.sigma = L.Linear(in_size, self.out_size, self.nobias,
                                  initialW=VarianceScalingConstant(
                                      sigma_scale),
                                  initial_bias=VarianceScalingConstant(
                                      sigma_scale))

        if device_id is not None:
            self.to_gpu(device_id)