How to use the tensorly.transpose function in tensorly

To help you get started, we’ve selected a few tensorly examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github tensorly / tensorly / tensorly / decomposition / candecomp_parafac.py View on Github external
for i, factor in enumerate(factors):
                if i != mode:
                    pseudo_inverse = pseudo_inverse*tl.dot(tl.conj(tl.transpose(factor)), factor)

            if mask is not None:
                tensor = tensor*mask + tl.kruskal_to_tensor((None, factors), mask=1-mask)

            mttkrp = unfolding_dot_khatri_rao(tensor, (None, factors), mode)

            if non_negative:
                numerator = tl.clip(mttkrp, a_min=epsilon, a_max=None)
                denominator = tl.dot(factors[mode], accum)
                denominator = tl.clip(denominator, a_min=epsilon, a_max=None)
                factor = factors[mode] * numerator / denominator
            else:
                factor = tl.transpose(tl.solve(tl.conj(tl.transpose(pseudo_inverse)),
                                      tl.transpose(mttkrp)))
            
            if normalize_factors:
                weights = tl.norm(factor, order=2, axis=0)
                weights = tl.where(tl.abs(weights) <= tl.eps(tensor.dtype), 
                                   tl.ones(tl.shape(weights), **tl.context(factors[0])),
                                   weights)
                factor = factor/(tl.reshape(weights, (1, -1)))

            factors[mode] = factor

        if tol:
            # ||tensor - rec||^2 = ||tensor||^2 + ||rec||^2 - 2*
            factors_norm = kruskal_norm((weights, factors))

            # mttkrp and factor for the last mode. This is equivalent to the
github tensorly / tensorly / tensorly / tenalg / contraction.py View on Github external
if contraction_dims != [tl.shape(tensor2)[i] for i in modes2]:
        raise ValueError('Trying to contract tensors over modes of different sizes'
                         '(contracting modes of sizes {} and {}'.format(
                             contraction_dims, [tl.shape(tensor2)[i] for i in modes2]))
    shared_dim = int(np.prod(contraction_dims))

    modes1_free = [i for i in range(tl.ndim(tensor1)) if i not in modes1]
    free_shape1 = [tl.shape(tensor1)[i] for i in modes1_free]

    tensor1 = tl.reshape(tl.transpose(tensor1, modes1_free + modes1),
                         (int(np.prod(free_shape1)), shared_dim))
    
    modes2_free = [i for i in range(tl.ndim(tensor2)) if i not in modes2]
    free_shape2 = [tl.shape(tensor2)[i] for i in modes2_free]

    tensor2 = tl.reshape(tl.transpose(tensor2, modes2 + modes2_free),
                         (shared_dim, int(np.prod(free_shape2))))
    
    res = tl.dot(tensor1, tensor2)
    return tl.reshape(res, tuple(free_shape1 + free_shape2))
github tensorly / tensorly / tensorly / contrib / decomposition / mps_decomposition_cross.py View on Github external
col_idx[k - 2] = next_col_idx

            # Compute cores
            try:
                factor_new[k - 1] = tl.transpose(Q_skeleton)
                factor_new[k - 1] = tl.reshape(factor_new[k - 1], (rank[k - 1], tensor_shape[k - 1], rank[k]))
            except:
                # The rank should not be larger than the input tensor's size
                raise (ValueError("The rank is too large compared to the size of the tensor. Try with small rank."))

        # Add the last core
        idx = (slice(None, None, None),) + tuple(zip(*col_idx[0]))

        core = input_tensor[idx]
        core = tl.reshape(core, (tensor_shape[0], 1, rank[1]))
        core = tl.transpose(core, (1, 0, 2))

        factor_new[0] = core

        # end right-to-left step
        ################################################

        # check the error for while-loop
        error = tl.norm(mps_to_tensor(factor_old) - mps_to_tensor(factor_new), 2)
        threshold = tol * tl.norm(mps_to_tensor(factor_new), 2)

    # check convergence
    if iter >= n_iter_max:
        raise ValueError('Maximum number of iterations reached.')
    if tl.norm(mps_to_tensor(factor_old) - mps_to_tensor(factor_new), 2) > tol * tl.norm(mps_to_tensor(factor_new), 2):
        raise ValueError('Low Rank Approximation algorithm did not converge.')
github tensorly / tensorly / tensorly / contrib / decomposition / mps_decomposition_cross.py View on Github external
for lidx in row_idx[k]:
            for ridx in col_idx[k]:
                for j, jj in enumerate(lidx): idx[j].append(jj)
                for j, jj in enumerate(ridx): idx[len(lidx) + 1 + j].append(jj)
        idx[k] = slice(None, None, None)
        idx = tuple(idx)

    # Extract the core
    core = input_tensor[idx]
    # shape the core as a 3-tensor_order cube
    if k == 0:
        core = tl.reshape(core, (tensor_shape[k], rank[k], rank[k + 1]))
        core = tl.transpose(core, (1, 0, 2))
    else:
        core = tl.reshape(core, (rank[k], rank[k + 1], tensor_shape[k]))
        core = tl.transpose(core, (0, 2, 1))

    # merge r_k and n_k, get a matrix
    core = tl.reshape(core, (rank[k] * tensor_shape[k], rank[k + 1]))

    # Compute QR decomposition
    (Q, R) = tl.qr(core)

    # Maxvol
    (I, _) = maxvol(Q)

    # Retrive indices in folded tensor
    new_idx = [np.unravel_index(idx, [rank[k], tensor_shape[k]]) for idx in I]  # First retrive idx in folded core
    next_row_idx = [row_idx[k][ic[0]] + (ic[1],) for ic in new_idx]  # Then reconstruct the idx in the tensor

    return (next_row_idx, fibers_list)
github tensorly / tensorly / tensorly / decomposition / _tucker.py View on Github external
nn_factors = [tl.abs(f) for f in factors]
        nn_core = tl.abs(core)
    else:
        rng = check_random_state(random_state)
        core = tl.tensor(rng.random_sample(rank) + 0.01, **tl.context(tensor))  # Check this
        factors = [tl.tensor(rng.random_sample(s), **tl.context(tensor)) for s in zip(tl.shape(tensor), rank)]
        nn_factors = [tl.abs(f) for f in factors]
        nn_core = tl.abs(core)

    norm_tensor = tl.norm(tensor, 2)
    rec_errors = []

    for iteration in range(n_iter_max):
        for mode in range(tl.ndim(tensor)):
            B = tucker_to_tensor((nn_core, nn_factors), skip_factor=mode)
            B = tl.transpose(unfold(B, mode))

            numerator = tl.dot(unfold(tensor, mode), B)
            numerator = tl.clip(numerator, a_min=epsilon, a_max=None)
            denominator = tl.dot(nn_factors[mode], tl.dot(tl.transpose(B), B))
            denominator = tl.clip(denominator, a_min=epsilon, a_max=None)
            nn_factors[mode] *= numerator / denominator

        numerator = tucker_to_tensor((tensor, nn_factors), transpose_factors=True)
        numerator = tl.clip(numerator, a_min=epsilon, a_max=None)
        for i, f in enumerate(nn_factors):
            if i:
                denominator = mode_dot(denominator, tl.dot(tl.transpose(f), f), i)
            else:
                denominator = mode_dot(nn_core, tl.dot(tl.transpose(f), f), i)
        denominator = tl.clip(denominator, a_min=epsilon, a_max=None)
        nn_core *= numerator / denominator
github tensorly / tensorly / tensorly / contrib / decomposition / mps_decomposition_cross.py View on Github external
###############################################
        # right-to-left step
        right_to_left_fiberlist = []
        # list col_idx: list (tensor_order-1) of lists of right indices
        col_idx = [None] * tensor_order
        col_idx[-1] = [()]
        for k in range(tensor_order, 1, -1):
            (next_col_idx, fibers_list, Q_skeleton) = right_left_ttcross_step(input_tensor, k, rank, row_idx, col_idx)
            # update col indices
            right_to_left_fiberlist.extend(fibers_list)
            col_idx[k - 2] = next_col_idx

            # Compute cores
            try:
                factor_new[k - 1] = tl.transpose(Q_skeleton)
                factor_new[k - 1] = tl.reshape(factor_new[k - 1], (rank[k - 1], tensor_shape[k - 1], rank[k]))
            except:
                # The rank should not be larger than the input tensor's size
                raise (ValueError("The rank is too large compared to the size of the tensor. Try with small rank."))

        # Add the last core
        idx = (slice(None, None, None),) + tuple(zip(*col_idx[0]))

        core = input_tensor[idx]
        core = tl.reshape(core, (tensor_shape[0], 1, rank[1]))
        core = tl.transpose(core, (1, 0, 2))

        factor_new[0] = core

        # end right-to-left step
        ################################################
github tensorly / tensorly / tensorly / decomposition / candecomp_parafac.py View on Github external
norm_tensor = tl.norm(tensor, 2)
    min_error = 0

    weights = tl.ones(rank, **tl.context(tensor))
    for iteration in range(n_iter_max):
        for mode in range(n_dims):
            kr_prod, indices_list = sample_khatri_rao(factors, n_samples, skip_matrix=mode, random_state=rng)
            indices_list = [i.tolist() for i in indices_list]
            # Keep all the elements of the currently considered mode
            indices_list.insert(mode, slice(None, None, None))
            # MXNet will not be happy if this is a list insteaf of a tuple
            indices_list = tuple(indices_list)
            if mode:
                sampled_unfolding = tensor[indices_list]
            else:
                sampled_unfolding = tl.transpose(tensor[indices_list])

            pseudo_inverse = tl.dot(tl.transpose(kr_prod), kr_prod)
            factor = tl.dot(tl.transpose(kr_prod), sampled_unfolding)
            factor = tl.transpose(tl.solve(pseudo_inverse, factor))
            factors[mode] = factor

        if max_stagnation or tol:
            rec_error = tl.norm(tensor - kruskal_to_tensor((weights, factors)), 2) / norm_tensor
            if not min_error or rec_error < min_error:
                min_error = rec_error
                stagnation = -1
            stagnation += 1

            rec_errors.append(rec_error)

            if iteration > 1: