How to use the nnmnkwii.paramgen.mlpg function in nnmnkwii

To help you get started, we’ve selected a few nnmnkwii examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github r9y9 / nnmnkwii / tests / test_paramgen.py View on Github external
def test_mlpg():
    from nnmnkwii import paramgen as G

    static_dim = 2
    T = 10

    windows_set = _get_windows_set()
    for windows in windows_set:
        means = np.random.rand(T, static_dim * len(windows))
        variances = np.tile(np.random.rand(static_dim * len(windows)), (T, 1))

        generated = G.mlpg(means, variances, windows)
        assert generated.shape == (T, static_dim)

    # Test variances correctly expanded
    for windows in windows_set:
        for dtype in [np.float32, np.float64]:
            means = np.random.rand(T, static_dim * len(windows)).astype(dtype)
            variances = np.random.rand(static_dim * len(windows)).astype(dtype)
            variances_frames = np.tile(variances, (T, 1))

            # Explicitly give variances over frame
            generated1 = G.mlpg(means, variances_frames, windows)
            # Give global variances. This will get expanded over frames
            # internally
            generated2 = G.mlpg(means, variances, windows)

            assert generated1.dtype == dtype
github r9y9 / nnmnkwii / tests / test_autograd.py View on Github external
for windows in _get_windows_set():
        batch_size = 5
        torch.manual_seed(1234)

        # Prepare inputs
        means = torch.rand(T, static_dim * len(windows))
        means_expanded = means.expand(
            batch_size, means.shape[0], means.shape[1])
        reshaped_means = torch.from_numpy(
            G.reshape_means(means.numpy(), static_dim))
        reshaped_means_expanded = reshaped_means.expand(
            batch_size, reshaped_means.shape[0], reshaped_means.shape[1])

        # Target
        y = G.mlpg(means.numpy(), np.ones(static_dim * len(windows)), windows)
        y = torch.from_numpy(y)
        y_expanded = y.expand(batch_size, y.size(0), y.size(1))

        means.requires_grad = True
        means_expanded.requires_grad = True
        reshaped_means.requires_grad = True
        reshaped_means_expanded.requires_grad = True

        # Case 1: 2d with reshaped means
        R = torch.from_numpy(G.unit_variance_mlpg_matrix(windows, T))
        y_hat1 = AF.unit_variance_mlpg(R, reshaped_means)

        # Case 2: 3d with reshaped means
        y_hat2 = AF.unit_variance_mlpg(R, reshaped_means_expanded)
        for i in range(batch_size):
            assert np.allclose(y_hat1.data.numpy(), y_hat2[i].data.numpy())
github r9y9 / nnmnkwii / tests / test_paramgen.py View on Github external
def test_unit_variance_mlpg():
    from nnmnkwii import paramgen as G
    static_dim = 2
    T = 10

    for windows in _get_windows_set():
        means = np.random.rand(T, static_dim * len(windows))
        variances = np.ones(static_dim * len(windows))
        y = G.mlpg(means, variances, windows)

        R = G.unit_variance_mlpg_matrix(windows, T)
        y_hat = R.dot(G.reshape_means(means, static_dim))
        assert np.allclose(y_hat, y)
github r9y9 / nnmnkwii / tests / test_paramgen.py View on Github external
for windows in windows_set:
        means = np.random.rand(T, static_dim * len(windows))
        variances = np.tile(np.random.rand(static_dim * len(windows)), (T, 1))

        generated = G.mlpg(means, variances, windows)
        assert generated.shape == (T, static_dim)

    # Test variances correctly expanded
    for windows in windows_set:
        for dtype in [np.float32, np.float64]:
            means = np.random.rand(T, static_dim * len(windows)).astype(dtype)
            variances = np.random.rand(static_dim * len(windows)).astype(dtype)
            variances_frames = np.tile(variances, (T, 1))

            # Explicitly give variances over frame
            generated1 = G.mlpg(means, variances_frames, windows)
            # Give global variances. This will get expanded over frames
            # internally
            generated2 = G.mlpg(means, variances, windows)

            assert generated1.dtype == dtype
            assert np.allclose(generated1, generated2)
github r9y9 / nnmnkwii / tests / test_autograd.py View on Github external
def test_functional_mlpg():
    static_dim = 2
    T = 5

    for windows in _get_windows_set():
        torch.manual_seed(1234)
        means = torch.rand(T, static_dim * len(windows))
        variances = torch.ones(static_dim * len(windows))

        y = G.mlpg(means.numpy(), variances.numpy(), windows)
        y = torch.from_numpy(y)

        means = means.clone()
        means.requires_grad = True

        # mlpg
        y_hat = AF.mlpg(means, variances, windows)
        assert np.allclose(y.data.numpy(), y_hat.data.numpy())

        # Test backward pass
        nn.MSELoss()(y_hat, y).backward()

        # unit_variance_mlpg
        R = torch.from_numpy(G.unit_variance_mlpg_matrix(windows, T))
        y_hat = AF.unit_variance_mlpg(R, means)
        assert np.allclose(y.data.numpy(), y_hat.data.numpy())
github r9y9 / nnmnkwii / perf / autograd_mlpg_perf.py View on Github external
def benchmark_mlpg(static_dim=59, T=100, batch_size=10, use_cuda=True):
    if use_cuda and not torch.cuda.is_available():
        return

    windows = _get_windows_set()[-1]
    np.random.seed(1234)
    torch.manual_seed(1234)
    means = np.random.rand(T, static_dim * len(windows)).astype(np.float32)
    variances = np.ones(static_dim * len(windows))
    reshaped_means = G.reshape_means(means, static_dim)

    # Ppseud target
    y = G.mlpg(means, variances, windows).astype(np.float32)

    # Pack into variables
    means = Variable(torch.from_numpy(means), requires_grad=True)
    reshaped_means = Variable(
        torch.from_numpy(reshaped_means), requires_grad=True)
    y = Variable(torch.from_numpy(y), requires_grad=False)
    criterion = nn.MSELoss()

    # Case 1: MLPG
    since = time.time()
    for _ in range(batch_size):
        y_hat = AF.mlpg(means, torch.from_numpy(variances), windows)
        L = criterion(y_hat, y)
        assert np.allclose(y_hat.data.numpy(), y.data.numpy())
        L.backward()  # slow!
    elapsed_mlpg = time.time() - since
github r9y9 / nnmnkwii / nnmnkwii / baseline / gmm.py View on Github external
xx = np.linalg.solve(self.covarXX[m], src[t] - self.src_means[m])
            # Eq. (22)
            E[t] = self.tgt_means[m] + np.dot(self.covarYX[m], xx)

        # Compute D eq.(23)
        # Approximated variances with diagonals so that we can do MLPG
        # efficiently in dimention-wise manner
        D = np.empty((T, feature_dim))
        for t in range(T):
            m = optimum_mix[t]
            # Eq. (23), with approximating covariances as diagonals
            D[t] = np.diag(self.covarYY[m]) - np.diag(self.covarYX[m]) / \
                np.diag(self.covarXX[m]) * np.diag(self.covarXY[m])

        # Once we have mean and variance over frames, then we can do MLPG
        return mlpg(E, D, self.windows)
github r9y9 / nnmnkwii / nnmnkwii / autograd / _impl / mlpg.py View on Github external
def forward(self, means):
        assert means.dim() == 2  # we cannot do MLPG on minibatch
        variances = self.variances
        self.save_for_backward(means)

        T, D = means.size()
        assert means.size() == variances.size()

        means_np = means.detach().numpy()
        variances_np = variances.detach().numpy()
        y = G.mlpg(means_np, variances_np, self.windows)
        y = torch.from_numpy(y.astype(np.float32))
        return y