How to use the tensorly.norm function in tensorly

To help you get started, we’ve selected a few tensorly examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github tensorly / tensorly / tensorly / contrib / test_mps_decomposition_cross.py View on Github external
maxvoleps = 1e-4
    tol = 1e-4
    n = 10
    d = 4
    rng = 1
    grid = getEquispaceGrid(d, rng, n)
    value = evaluateGrid(grid, func)
    value = tl.tensor(value)

    # Find MPS decomposition of the tensor
    rank = [1, 4, 4, 4, 1]
    factors = matrix_product_state_cross(value, rank, tol=tol)

    approx = mps_to_tensor(factors)
    error = tl.norm(approx-value,2)
    error /= tl.norm(value, 2)

    print(error)
    tl.assert_(error < 1e-5, 'norm 2 of reconstruction higher than tol')
github tensorly / tensorly / tensorly / contrib / test_mps_decomposition_cross.py View on Github external
def test_matrix_product_state_cross_3():
    """ Test for matrix_product_state """
    rng = check_random_state(1234)

    ## Test 3
    tol = 10e-5
    tensor = tl.tensor(rng.random_sample([3, 3, 3]))
    factors = matrix_product_state_cross(tensor, (1, 3, 3, 1))
    reconstructed_tensor = mps_to_tensor(factors)
    error = tl.norm(reconstructed_tensor - tensor, 2)
    error /= tl.norm(tensor, 2)
    tl.assert_(error < tol,
              'norm 2 of reconstruction higher than tol')
github tensorly / tensorly / tensorly / decomposition / candecomp_parafac.py View on Github external
for mode in range(tl.ndim(tensor)):
            U, _, _ = svd_fun(unfold(tensor, mode), n_eigenvecs=rank)

            if tensor.shape[mode] < rank:
                # TODO: this is a hack but it seems to do the job for now
                # factor = tl.tensor(np.zeros((U.shape[0], rank)), **tl.context(tensor))
                # factor[:, tensor.shape[mode]:] = tl.tensor(rng.random_sample((U.shape[0], rank - tl.shape(tensor)[mode])), **tl.context(tensor))
                # factor[:, :tensor.shape[mode]] = U
                random_part = tl.tensor(rng.random_sample((U.shape[0], rank - tl.shape(tensor)[mode])), **tl.context(tensor))
                U = tl.concatenate([U, random_part], axis=1)
            
            factor = U[:, :rank]
            if non_negative:
                factor = tl.abs(factor)
            if normalize_factors:
                factor = factor / (tl.reshape(tl.norm(factor, axis=0), (1, -1)) + 1e-12)
            factors.append(factor)
        return factors

    raise ValueError('Initialization method "{}" not recognized'.format(init))
github tensorly / tensorly / tensorly / decomposition / _tucker.py View on Github external
svd, tl.get_backend(), tl.SVD_FUNS)
        raise ValueError(message)

    # SVD init
    if init == 'svd':
        factors = []
        for index, mode in enumerate(modes):
            eigenvecs, _, _ = svd_fun(unfold(tensor, mode), n_eigenvecs=rank[index])
            factors.append(eigenvecs)
    else:
        rng = check_random_state(random_state)
        core = tl.tensor(rng.random_sample(rank), **tl.context(tensor))
        factors = [tl.tensor(rng.random_sample((tl.shape(tensor)[mode], rank[index])), **tl.context(tensor)) for (index, mode) in enumerate(modes)]

    rec_errors = []
    norm_tensor = tl.norm(tensor, 2)

    for iteration in range(n_iter_max):
        for index, mode in enumerate(modes):
            core_approximation = multi_mode_dot(tensor, factors, modes=modes, skip=index, transpose=True)
            eigenvecs, _, _ = svd_fun(unfold(core_approximation, mode), n_eigenvecs=rank[index])
            factors[index] = eigenvecs

        core = multi_mode_dot(tensor, factors, modes=modes, transpose=True)

        # The factors are orthonormal and therefore do not affect the reconstructed tensor's norm
        rec_error = sqrt(abs(norm_tensor**2 - tl.norm(core, 2)**2)) / norm_tensor
        rec_errors.append(rec_error)

        if iteration > 1:
            if verbose:
                print('reconsturction error={}, variation={}.'.format(
github tensorly / tensorly / tensorly / decomposition / candecomp_parafac.py View on Github external
Returns
    -------
    factors : ndarray list
            list of positive factors of the CP decomposition
            element `i` is of shape ``(tensor.shape[i], rank)``

    References
    ----------
    .. [3] Casey Battaglino, Grey Ballard and Tamara G. Kolda,
       "A Practical Randomized CP Tensor Decomposition",
    """
    rng = check_random_state(random_state)
    factors = initialize_factors(tensor, rank, init=init, svd=svd, random_state=random_state)
    rec_errors = []
    n_dims = tl.ndim(tensor)
    norm_tensor = tl.norm(tensor, 2)
    min_error = 0

    weights = tl.ones(rank, **tl.context(tensor))
    for iteration in range(n_iter_max):
        for mode in range(n_dims):
            kr_prod, indices_list = sample_khatri_rao(factors, n_samples, skip_matrix=mode, random_state=rng)
            indices_list = [i.tolist() for i in indices_list]
            # Keep all the elements of the currently considered mode
            indices_list.insert(mode, slice(None, None, None))
            # MXNet will not be happy if this is a list insteaf of a tuple
            indices_list = tuple(indices_list)
            if mode:
                sampled_unfolding = tensor[indices_list]
            else:
                sampled_unfolding = tl.transpose(tensor[indices_list])
github tensorly / tensorly / tensorly / decomposition / _tucker.py View on Github external
core = tl.tensor(rng.random_sample(rank), **tl.context(tensor))
        factors = [tl.tensor(rng.random_sample((tl.shape(tensor)[mode], rank[index])), **tl.context(tensor)) for (index, mode) in enumerate(modes)]

    rec_errors = []
    norm_tensor = tl.norm(tensor, 2)

    for iteration in range(n_iter_max):
        for index, mode in enumerate(modes):
            core_approximation = multi_mode_dot(tensor, factors, modes=modes, skip=index, transpose=True)
            eigenvecs, _, _ = svd_fun(unfold(core_approximation, mode), n_eigenvecs=rank[index])
            factors[index] = eigenvecs

        core = multi_mode_dot(tensor, factors, modes=modes, transpose=True)

        # The factors are orthonormal and therefore do not affect the reconstructed tensor's norm
        rec_error = sqrt(abs(norm_tensor**2 - tl.norm(core, 2)**2)) / norm_tensor
        rec_errors.append(rec_error)

        if iteration > 1:
            if verbose:
                print('reconsturction error={}, variation={}.'.format(
                    rec_errors[-1], rec_errors[-2] - rec_errors[-1]))

            if tol and abs(rec_errors[-2] - rec_errors[-1]) < tol:
                if verbose:
                    print('converged in {} iterations.'.format(iteration))
                break

    return core, factors
github tensorly / tensorly / tensorly / decomposition / candecomp_parafac.py View on Github external
if mask is not None:
                tensor = tensor*mask + tl.kruskal_to_tensor((None, factors), mask=1-mask)

            mttkrp = unfolding_dot_khatri_rao(tensor, (None, factors), mode)

            if non_negative:
                numerator = tl.clip(mttkrp, a_min=epsilon, a_max=None)
                denominator = tl.dot(factors[mode], accum)
                denominator = tl.clip(denominator, a_min=epsilon, a_max=None)
                factor = factors[mode] * numerator / denominator
            else:
                factor = tl.transpose(tl.solve(tl.conj(tl.transpose(pseudo_inverse)),
                                      tl.transpose(mttkrp)))
            
            if normalize_factors:
                weights = tl.norm(factor, order=2, axis=0)
                weights = tl.where(tl.abs(weights) <= tl.eps(tensor.dtype), 
                                   tl.ones(tl.shape(weights), **tl.context(factors[0])),
                                   weights)
                factor = factor/(tl.reshape(weights, (1, -1)))

            factors[mode] = factor

        if tol:
            # ||tensor - rec||^2 = ||tensor||^2 + ||rec||^2 - 2*
            factors_norm = kruskal_norm((weights, factors))

            # mttkrp and factor for the last mode. This is equivalent to the
            # inner product 
            iprod = tl.sum(tl.sum(mttkrp*factor, axis=0)*weights)
            rec_error = tl.sqrt(tl.abs(norm_tensor**2 + factors_norm**2 - 2*iprod)) / norm_tensor
            rec_errors.append(rec_error)
github tensorly / tensorly / tensorly / decomposition / candecomp_parafac.py View on Github external
Returns
    -------
    factors : ndarray list
        List of initialized factors of the CP decomposition where element `i`
        is of shape (tensor.shape[i], rank)

    """
    rng = check_random_state(random_state)

    if init == 'random':
        factors = [tl.tensor(rng.random_sample((tensor.shape[i], rank)), **tl.context(tensor)) for i in range(tl.ndim(tensor))]
        if non_negative:
            factors = [tl.abs(f) for f in factors]
        if normalize_factors: 
            factors = [f/(tl.reshape(tl.norm(f, axis=0), (1, -1)) + 1e-12) for f in factors]
        return factors

    elif init == 'svd':
        try:
            svd_fun = tl.SVD_FUNS[svd]
        except KeyError:
            message = 'Got svd={}. However, for the current backend ({}), the possible choices are {}'.format(
                    svd, tl.get_backend(), tl.SVD_FUNS)
            raise ValueError(message)

        factors = []
        for mode in range(tl.ndim(tensor)):
            U, _, _ = svd_fun(unfold(tensor, mode), n_eigenvecs=rank)

            if tensor.shape[mode] < rank:
                # TODO: this is a hack but it seems to do the job for now