How to use the tensorly.base.unfold function in tensorly

To help you get started, we’ve selected a few tensorly examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github tensorly / tensorly / tensorly / decomposition / robust_decomposition.py View on Github external
D = T.zeros_like(X, **T.context(X))  # low rank part
    E = T.zeros_like(X, **T.context(X))  # sparse part
    L_x = T.zeros_like(X, **T.context(X))  # Lagrangian variables for the (X - D - E - L_x/mu) term
    J = [T.zeros_like(X, **T.context(X)) for _ in range(T.ndim(X))] # Low-rank modes of X
    L = [T.zeros_like(X, **T.context(X)) for _ in range(T.ndim(X))] # Lagrangian or J

    # Norm of the reconstructions at each iteration
    rec_X = []
    rec_D = []

    mu = mu_init

    for iteration in range(n_iter_max):

        for i in range(T.ndim(X)):
            J[i] = fold(svd_thresholding(unfold(D, i) + unfold(L[i], i)/mu, reg_J/mu), i, X.shape)

        D = L_x/mu + X - E
        for i in range(T.ndim(X)):
            D += J[i] - L[i]/mu
        D /= (T.ndim(X) + 1)

        E = soft_thresholding(X - D + L_x/mu, mask*reg_E/mu)

        # Update the lagrangian multipliers
        for i in range(T.ndim(X)):
            L[i] += mu * (D - J[i])

        L_x += mu*(X - D - E)

        mu = min(mu*learning_rate, mu_max)
github tensorly / tensorly / tensorly / tucker_tensor.py View on Github external
----------
    tucker_tensor : tl.TuckerTensor or (core, factors)
        core tensor and list of factor matrices
    mode : None or int list, optional, default is None
    skip_factor : None or int, optional, default is None
        if not None, index of a matrix to skip
        Note that in any case, `modes`, if provided, should have a length of ``tensor.ndim``
    transpose_factors : bool, optional, default is False
        if True, the matrices or vectors in in the list are transposed

    Returns
    -------
    2D-array
        unfolded tensor
    """
    return unfold(tucker_to_tensor(tucker_tensor, skip_factor=skip_factor, transpose_factors=transpose_factors), mode)
github tensorly / tensorly / tensorly / contrib / sparse / core.py View on Github external
import functools

from .backend import sparse_context
from ... import backend, base, kruskal_tensor, tucker_tensor, mps_tensor

def wrap(func):
    @functools.wraps(func, assigned=('__name__', '__qualname__',
                                     '__doc__', '__annotations__'))
    def inner(*args, **kwargs):
        with sparse_context():
            return func(*args, **kwargs)

    return inner

unfold = wrap(base.unfold)
fold = wrap(base.fold)
tensor_to_vec = wrap(base.tensor_to_vec)
vec_to_tensor = wrap(base.vec_to_tensor)
partial_unfold = wrap(base.partial_unfold)
partial_fold = wrap(base.partial_fold)
partial_tensor_to_vec = wrap(base.partial_tensor_to_vec)
partial_vec_to_tensor = wrap(base.partial_vec_to_tensor)
kruskal_to_tensor = wrap(kruskal_tensor.kruskal_to_tensor)
kruskal_to_unfolded = wrap(kruskal_tensor.kruskal_to_unfolded)
kruskal_to_vec = wrap(kruskal_tensor.kruskal_to_vec)
tucker_to_tensor = wrap(tucker_tensor.tucker_to_tensor)
tucker_to_unfolded = wrap(tucker_tensor.tucker_to_unfolded)
tucker_to_vec = wrap(tucker_tensor.tucker_to_vec)
mps_to_tensor = wrap(mps_tensor.mps_to_tensor)
mps_to_unfolded = wrap(mps_tensor.mps_to_unfolded)
mps_to_vec = wrap(mps_tensor.mps_to_vec)
github snudatalab / KegNet / src / kegnet / utils / tucker.py View on Github external
def choose_ranks(weight, ranks):
        """
        Choose the target ranks.
        """
        out_channels, in_channels, _, _ = weight.shape
        if ranks == 'evbmf':
            unfold_0 = tl.base.unfold(weight, 0)
            unfold_1 = tl.base.unfold(weight, 1)
            _, diag_0, _, _ = vbmf.EVBMF(unfold_0)
            _, diag_1, _, _ = vbmf.EVBMF(unfold_1)
            out_rank = diag_0.shape[0]
            in_rank = diag_1.shape[1]
        elif isinstance(ranks, float):
            out_rank = int(out_channels * ranks)
            in_rank = int(in_channels * ranks)
        elif isinstance(ranks, tuple):
            in_rank, out_rank = ranks
        else:
            raise ValueError(ranks)
        return out_rank, in_rank
github jacobgil / pytorch-tensor-decompositions / decompositions.py View on Github external
def estimate_ranks(layer):
    """ Unfold the 2 modes of the Tensor the decomposition will 
    be performed on, and estimates the ranks of the matrices using VBMF 
    """

    weights = layer.weight.data
    unfold_0 = tl.base.unfold(weights, 0) 
    unfold_1 = tl.base.unfold(weights, 1)
    _, diag_0, _, _ = VBMF.EVBMF(unfold_0)
    _, diag_1, _, _ = VBMF.EVBMF(unfold_1)
    ranks = [diag_0.shape[0], diag_1.shape[1]]
    return ranks
github tensorly / tensorly / tensorly / decomposition / candecomp_parafac.py View on Github external
factors = [tl.abs(f) for f in factors]
        if normalize_factors: 
            factors = [f/(tl.reshape(tl.norm(f, axis=0), (1, -1)) + 1e-12) for f in factors]
        return factors

    elif init == 'svd':
        try:
            svd_fun = tl.SVD_FUNS[svd]
        except KeyError:
            message = 'Got svd={}. However, for the current backend ({}), the possible choices are {}'.format(
                    svd, tl.get_backend(), tl.SVD_FUNS)
            raise ValueError(message)

        factors = []
        for mode in range(tl.ndim(tensor)):
            U, _, _ = svd_fun(unfold(tensor, mode), n_eigenvecs=rank)

            if tensor.shape[mode] < rank:
                # TODO: this is a hack but it seems to do the job for now
                # factor = tl.tensor(np.zeros((U.shape[0], rank)), **tl.context(tensor))
                # factor[:, tensor.shape[mode]:] = tl.tensor(rng.random_sample((U.shape[0], rank - tl.shape(tensor)[mode])), **tl.context(tensor))
                # factor[:, :tensor.shape[mode]] = U
                random_part = tl.tensor(rng.random_sample((U.shape[0], rank - tl.shape(tensor)[mode])), **tl.context(tensor))
                U = tl.concatenate([U, random_part], axis=1)
            
            factor = U[:, :rank]
            if non_negative:
                factor = tl.abs(factor)
            if normalize_factors:
                factor = factor / (tl.reshape(tl.norm(factor, axis=0), (1, -1)) + 1e-12)
            factors.append(factor)
        return factors
github snudatalab / KegNet / src / kegnet / utils / tucker.py View on Github external
def choose_ranks(weight, ranks):
        """
        Choose the target ranks.
        """
        out_channels, in_channels, _, _ = weight.shape
        if ranks == 'evbmf':
            unfold_0 = tl.base.unfold(weight, 0)
            unfold_1 = tl.base.unfold(weight, 1)
            _, diag_0, _, _ = vbmf.EVBMF(unfold_0)
            _, diag_1, _, _ = vbmf.EVBMF(unfold_1)
            out_rank = diag_0.shape[0]
            in_rank = diag_1.shape[1]
        elif isinstance(ranks, float):
            out_rank = int(out_channels * ranks)
            in_rank = int(in_channels * ranks)
        elif isinstance(ranks, tuple):
            in_rank, out_rank = ranks
        else:
            raise ValueError(ranks)
        return out_rank, in_rank
github larry0123du / Decompose-CNN / scripts / decomp.py View on Github external
def tucker_rank(layer):
    W = layer.weight.data
    mode3 = tl.base.unfold(W, 0)
    mode4 = tl.base.unfold(W, 1)
    diag_0 = EVBMF(mode3)
    diag_1 = EVBMF(mode4)
    d1 = diag_0.shape[0]
    d2 = diag_1.shape[1]

    del mode3
    del mode4
    del diag_0
    del diag_1

    # round to multiples of 16
    return [int(np.ceil(d1 / 16) * 16) \
            , int(np.ceil(d2 / 16) * 16)]