How to use sru - 10 common examples

To help you get started, we’ve selected a few sru examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github asappresearch / sru / misc / test_sru.py View on Github external
print('N', N, 'max_len', max_len, 'K', K, 'K_out', K_out)

    torch.manual_seed(123)
    np.random.seed(123)
    lengths = torch.from_numpy(np.random.choice(max_len, N)) + 1
    tensors = [torch.from_numpy(np.random.choice(V, l, replace=True)) for l in lengths.tolist()]
    embedder = nn.Embedding(V, K)
    tensors = nn.utils.rnn.pad_sequence(tensors)
    print('tensors.size()', tensors.size())
    embedded = embedder(tensors)
    print('embedded.size()', embedded.size())
    packed = nn.utils.rnn.pack_padded_sequence(embedded, lengths, batch_first=False, enforce_sorted=False)
    print(isinstance(packed, nn.utils.rnn.PackedSequence))

    sru = SRU(K, K_out)
    out1, state = sru(packed)
    out1, lengths1 = nn.utils.rnn.pad_packed_sequence(out1)
    print('out1.size()', out1.size())
    assert (lengths != lengths1).sum().item() == 0
    print('out1.sum()', out1.sum().item())

    # change one of the indexes taht should not be masked out
    tensors[6, 1] = 3
    embedded = embedder(tensors)
    packed = nn.utils.rnn.pack_padded_sequence(embedded, lengths, batch_first=False, enforce_sorted=False)
    out2, state = sru(packed)
    out2, lengths2 = nn.utils.rnn.pad_packed_sequence(out2)
    assert (lengths != lengths2).sum().item() == 0
    print('out2.sum()', out2.sum().item())
    assert out2.sum().item() == out1.sum().item()
github asappresearch / sru / misc / test_sru.py View on Github external
K = 8
    K_out = 11
    num_layers = 3
    bidirectional = True

    print('N', N, 'max_len', max_len, 'num_layers', num_layers, 'bidirectional', bidirectional, 'K', K, 'K_out', K_out)

    torch.manual_seed(123)
    np.random.seed(123)
    lengths = torch.from_numpy(np.random.choice(max_len, N)) + 1
    tensors = [torch.from_numpy(np.random.choice(V, l, replace=True)) for l in lengths.tolist()]
    embedder = nn.Embedding(V, K)
    tensors = nn.utils.rnn.pad_sequence(tensors)
    embedded = embedder(tensors)

    sru = SRU(K, K_out, nn_rnn_compatible_return=True, bidirectional=bidirectional, num_layers=num_layers)
    out, state = sru(embedded)
    print('out.size()', out.size())
    print('state.size()', state.size())

    gru = nn.GRU(K, K_out, bidirectional=bidirectional, num_layers=num_layers)
    gru_out, gru_state = gru(embedded)
    print('gru_state.size()', gru_state.size())
github asappresearch / sru / misc / test_cpu_impl.py View on Github external
def profile_speed():
    bcell = SRUCell(400, 200, bidirectional=True)
    bcell.eval()
    mask = torch.zeros(200, 1)
    x = torch.randn(200, 1, 400)
    pr = cProfile.Profile()
    pr.enable()
    with torch.no_grad():
        for i in range(10):
             r = bcell(x, mask_pad=mask)
    pr.disable()
    s = io.StringIO()
    sortby = 'cumulative'
    ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
    ps.print_stats()
    print(s.getvalue())

    pr = cProfile.Profile()
github asappresearch / sru / misc / test_cpu_impl.py View on Github external
def test_bi_fwd():
    cell = SRUCell(5, 5, bidirectional=True)
    x = torch.randn(7, 1, 5)
    mask = torch.zeros(7, 1)
    mask[0,0]=1
    mask[6,0]=1
    with torch.no_grad():
        out_1 = cell(x)
    out_2 = cell(x)
    print (out_1)
    print ()
    print (out_2)
github asappresearch / sru / misc / test_cpu_impl.py View on Github external
def test_fwd():
    cell = SRUCell(3, 5, use_tanh=True)
    mask = torch.zeros(7, 1)
    mask[0,0]=1
    mask[6,0]=1
    x = torch.randn(7, 1, 3)
    with torch.no_grad():
        out_1 = cell(x, mask_pad=mask)
    out_2 = cell(x, mask_pad=mask)
    print (out_1)
    print ()
    print (out_2)
github titu1994 / keras-SRU / imdb_sru.py View on Github external
print('Build model...')
ip = Input(shape=(maxlen,))
embed = Embedding(max_features, 128)(ip)

prev_input = embed
hidden_states = []

if depth > 1:
    for i in range(depth - 1):
        h, h_final, c_final = SRU(128, dropout=0.0, recurrent_dropout=0.0,
                                  return_sequences=True, return_state=True,
                                  unroll=True)(prev_input)
        prev_input = h
        hidden_states.append(c_final)

outputs = SRU(128, dropout=0.0, recurrent_dropout=0.0, unroll=True)(prev_input)
outputs = Dense(1, activation='sigmoid')(outputs)

model = Model(ip, outputs)
model.summary()

# try using different optimizers and different optimizer configs
model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

print('Train...')
model.fit(x_train, y_train,
          batch_size=batch_size,
          epochs=100,
          validation_data=(x_test, y_test))
score, acc = model.evaluate(x_test, y_test,
github musyoku / chainer-sru / benchmark / taolei87.py View on Github external
def benchmark_chainer_sru(batchsize, seq_length, feature_dimension, repeat=50):
	layer = SRU(feature_dimension)
	x_data = np.random.normal(0, 1, size=(batchsize, feature_dimension, seq_length)).astype(np.float32)
	x_data = cuda.to_gpu(x_data)
	layer.to_gpu()

	with chainer.no_backprop_mode() and chainer.using_config("train", False):
		# forward
		start_time = time.time()
		for i in range(repeat):
			output, cell, last_cell = layer(x_data, None)
		forward_time_mean = (time.time() - start_time) / repeat

	with chainer.using_config("train", True):
		# backward
		start_time = time.time()
		for i in range(repeat):
			output, cell, last_cell = layer(x_data, None)
github deepgram / kur / kur / containers / layers / recurrent.py View on Github external
name=self.name
						)

				yield merge
			else:
				kwargs['name'] = self.name
				yield func(**kwargs)

		elif backend.get_name() == 'pytorch':

			# pylint: disable=import-error
			import torch.nn as nn
			from kur.backend.pytorch.modules import swap_batch_dimension
			if self.type == 'sru':
				from sru import SRU
				_SRU = SRU
			else:
				_SRU = None
			# pylint: enable=import-error

			func = {
				'lstm' : nn.LSTM,
				'gru' : nn.GRU,
				'sru' : _SRU
			}.get(self.type)
			if func is None:
				raise ValueError('Unhandled RNN type: {}. This is a bug.'
					.format(self.type))

			if self.bidirectional and self.merge != 'concat':
				raise ValueError('PyTorch backend currently only supports '
					'"concat" mode for bidirectional RNNs.')
github BCV-Uniandes / DMS / dmn_pytorch / models / dmn.py View on Github external
if lstm:
            self.lang_model = nn.LSTM(
                emb_size, hid_size, num_layers=lang_layers)

        self.mix_we = mix_we
        lineal_in = hid_size + emb_size * int(mix_we)
        self.adaptative_filter = nn.Linear(
            in_features=lineal_in, out_features=(num_filters * (vis_size + 8)))

        self.comb_conv = nn.Conv2d(in_channels=(8 + emb_size + hid_size +
                                                vis_size + num_filters),
                                   out_channels=mixed_size,
                                   kernel_size=1,
                                   padding=0)

        self.mrnn = SRU(mixed_size, hid_mixed_size,
                        num_layers=mixed_layers)
        if lstm:
            self.mrnn = nn.LSTM(mixed_size, hid_mixed_size,
                                num_layers=mixed_layers)

        if not self.high_res:
            self.output_collapse = nn.Conv2d(in_channels=hid_mixed_size,
                                             out_channels=1,
                                             kernel_size=1)
github DLHacks / SRU / sru.py View on Github external
def __init__(self, input_size, phi_size, r_size, cell_out_size, output_size, A=[0, 0.5, 0.9, 0.99, 0.999], dropout=0, gpu=True):
        """
        input_size:   inputの特徴量数
        phi_size:      phiのユニット数。\mu^{\alpha}の次元とも等しい
        r_size:        rのユニット数
        cell_out_size: SRUCellからの出力のunit数
        output_size:   outputの次元
        A:             [\alpha_1, \alpha_2, ..., \alpha_m]
        """

        super(SRU, self).__init__()

        self._gpu = gpu
        self.n_alpha  = len(A)
        self.phi_size = phi_size
        self.mu_size  = self.phi_size * self.n_alpha # muのユニット数 = phiのユニット数 * alphaの個数

        # 各結合の定義
        self.mu2r   = nn.Linear(self.mu_size, r_size)
        self.xr2phi = nn.Linear(input_size + r_size, phi_size)
        self.mu2o   = nn.Linear(self.mu_size, cell_out_size)
        self.drop   = nn.Dropout(p=dropout)
        self.linear = nn.Linear(cell_out_size, output_size) 
        
        # muphi2phiの準備
        # A_mask: Kronecker product of (A, ones(1, phi_size)),  shape => (1, mu_dim)
        if self._gpu == True: