How to use the tensorly.reshape function in tensorly

To help you get started, we’ve selected a few tensorly examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github tensorly / tensorly / tensorly / contrib / decomposition / mps_decomposition_cross.py View on Github external
continue

        # Find the row of max norm
        max_row_idx = tl.argmax(rows_norms, axis=0)
        max_row = A[rest_of_rows[max_row_idx], :]

        # Compute the projection of max_row to other rows
        # projection a to b is computed as:  / sqrt(|a|*|b|)
        projection = tl.dot(A_new, tl.transpose(max_row))
        normalization = tl.sqrt(rows_norms[max_row_idx] * rows_norms)
        # make sure normalization vector is of the same shape of projection (causing bugs for MxNet)
        normalization = tl.reshape(normalization, tl.shape(projection))
        projection = projection/normalization

        # Subtract the projection from A_new:  b <- b - a * projection
        A_new = A_new - A_new * tl.reshape(projection, (tl.shape(A_new)[0], 1))

        # Delete the selected row
        mask.pop(max_row_idx)
        A_new = A_new[mask,:]

        # update the row_idx and rest_of_rows
        row_idx[i] = rest_of_rows[max_row_idx]
        rest_of_rows = rest_of_rows[mask]
        i = i + 1

    row_idx = tl.tensor(row_idx, dtype=tl.int64)
    inverse = tl.solve(A[row_idx,:], tl.eye(tl.shape(A[row_idx,:])[0]))
    row_idx = tl.to_numpy(row_idx)

    return row_idx, inverse
github tensorly / tensorly / tensorly / contrib / decomposition / mps_decomposition_cross.py View on Github external
idx = tuple(zip(*row_idx[k - 1])) + (slice(None, None, None),)
    else:
        idx = [[] for i in range(tensor_order)]
        for lidx in row_idx[k - 1]:
            for ridx in col_idx[k - 1]:
                for j, jj in enumerate(lidx): idx[j].append(jj)
                for j, jj in enumerate(ridx): idx[len(lidx) + 1 + j].append(jj)
        idx[k - 1] = slice(None, None, None)
        idx = tuple(idx)

    core = input_tensor[idx]
    # shape the core as a 3-tensor_order cube
    core = tl.reshape(core, (rank[k - 1], rank[k], tensor_shape[k - 1]))
    core = tl.transpose(core, (0, 2, 1))
    # merge n_{k-1} and r_k, get a matrix
    core = tl.reshape(core, (rank[k - 1], tensor_shape[k - 1] * rank[k]))
    core = tl.transpose(core)

    # Compute QR decomposition
    (Q, R) = tl.qr(core)
    # Maxvol
    (J, Q_inv) = maxvol(Q)
    Q_inv = tl.tensor(Q_inv)
    Q_skeleton = tl.dot(Q, Q_inv)

    # Retrive indices in folded tensor
    new_idx = [np.unravel_index(idx, [tensor_shape[k - 1], rank[k]]) for idx in J]  # First retrive idx in folded core
    next_col_idx = [(jc[0],) + col_idx[k - 1][jc[1]] for jc in new_idx]  # Then reconstruct the idx in the tensor

    return (next_col_idx, fibers_list, Q_skeleton)
github tensorly / tensorly / tensorly / contrib / decomposition / mps_decomposition_cross.py View on Github external
if k == 0:  # Is[k] will be empty
        idx = (slice(None, None, None),) + tuple(zip(*col_idx[k]))
    else:
        idx = [[] for i in range(tensor_order)]
        for lidx in row_idx[k]:
            for ridx in col_idx[k]:
                for j, jj in enumerate(lidx): idx[j].append(jj)
                for j, jj in enumerate(ridx): idx[len(lidx) + 1 + j].append(jj)
        idx[k] = slice(None, None, None)
        idx = tuple(idx)

    # Extract the core
    core = input_tensor[idx]
    # shape the core as a 3-tensor_order cube
    if k == 0:
        core = tl.reshape(core, (tensor_shape[k], rank[k], rank[k + 1]))
        core = tl.transpose(core, (1, 0, 2))
    else:
        core = tl.reshape(core, (rank[k], rank[k + 1], tensor_shape[k]))
        core = tl.transpose(core, (0, 2, 1))

    # merge r_k and n_k, get a matrix
    core = tl.reshape(core, (rank[k] * tensor_shape[k], rank[k + 1]))

    # Compute QR decomposition
    (Q, R) = tl.qr(core)

    # Maxvol
    (I, _) = maxvol(Q)

    # Retrive indices in folded tensor
    new_idx = [np.unravel_index(idx, [rank[k], tensor_shape[k]]) for idx in I]  # First retrive idx in folded core
github tensorly / tensorly / tensorly / decomposition / candecomp_parafac.py View on Github external
if non_negative:
                numerator = tl.clip(mttkrp, a_min=epsilon, a_max=None)
                denominator = tl.dot(factors[mode], accum)
                denominator = tl.clip(denominator, a_min=epsilon, a_max=None)
                factor = factors[mode] * numerator / denominator
            else:
                factor = tl.transpose(tl.solve(tl.conj(tl.transpose(pseudo_inverse)),
                                      tl.transpose(mttkrp)))
            
            if normalize_factors:
                weights = tl.norm(factor, order=2, axis=0)
                weights = tl.where(tl.abs(weights) <= tl.eps(tensor.dtype), 
                                   tl.ones(tl.shape(weights), **tl.context(factors[0])),
                                   weights)
                factor = factor/(tl.reshape(weights, (1, -1)))

            factors[mode] = factor

        if tol:
            # ||tensor - rec||^2 = ||tensor||^2 + ||rec||^2 - 2*
            factors_norm = kruskal_norm((weights, factors))

            # mttkrp and factor for the last mode. This is equivalent to the
            # inner product 
            iprod = tl.sum(tl.sum(mttkrp*factor, axis=0)*weights)
            rec_error = tl.sqrt(tl.abs(norm_tensor**2 + factors_norm**2 - 2*iprod)) / norm_tensor
            rec_errors.append(rec_error)

            if iteration >= 1:
                if verbose:
                    print('reconstruction error={}, variation={}.'.format(
github tensorly / tensorly / tensorly / contrib / decomposition / mps_decomposition_cross.py View on Github external
fibers_list.append(fiber)

    if k == tensor_order:  # Is[k] will be empty
        idx = tuple(zip(*row_idx[k - 1])) + (slice(None, None, None),)
    else:
        idx = [[] for i in range(tensor_order)]
        for lidx in row_idx[k - 1]:
            for ridx in col_idx[k - 1]:
                for j, jj in enumerate(lidx): idx[j].append(jj)
                for j, jj in enumerate(ridx): idx[len(lidx) + 1 + j].append(jj)
        idx[k - 1] = slice(None, None, None)
        idx = tuple(idx)

    core = input_tensor[idx]
    # shape the core as a 3-tensor_order cube
    core = tl.reshape(core, (rank[k - 1], rank[k], tensor_shape[k - 1]))
    core = tl.transpose(core, (0, 2, 1))
    # merge n_{k-1} and r_k, get a matrix
    core = tl.reshape(core, (rank[k - 1], tensor_shape[k - 1] * rank[k]))
    core = tl.transpose(core)

    # Compute QR decomposition
    (Q, R) = tl.qr(core)
    # Maxvol
    (J, Q_inv) = maxvol(Q)
    Q_inv = tl.tensor(Q_inv)
    Q_skeleton = tl.dot(Q, Q_inv)

    # Retrive indices in folded tensor
    new_idx = [np.unravel_index(idx, [tensor_shape[k - 1], rank[k]]) for idx in J]  # First retrive idx in folded core
    next_col_idx = [(jc[0],) + col_idx[k - 1][jc[1]] for jc in new_idx]  # Then reconstruct the idx in the tensor
github tensorly / tensorly / tensorly / mps_tensor.py View on Github external
Parameters
    ----------
    factors: list of 3D-arrays
              MPS factors (known as core in TT terminology)

    Returns
    -------
    output_tensor: ndarray
                   tensor whose MPS/TT decomposition was given by 'factors'
    """
    full_shape = [f.shape[1] for f in factors]
    full_tensor = tl.reshape(factors[0], (full_shape[0], -1))

    for factor in factors[1:]:
        rank_prev, _, rank_next = factor.shape
        factor = tl.reshape(factor, (rank_prev, -1))
        full_tensor = tl.dot(full_tensor, factor)
        full_tensor = tl.reshape(full_tensor, (-1, rank_next))

    return tl.reshape(full_tensor, full_shape)
github tensorly / tensorly / tensorly / mps_tensor.py View on Github external
Re-assembles 'factors', which represent a tensor in MPS/TT format
        into the corresponding full tensor

    Parameters
    ----------
    factors: list of 3D-arrays
              MPS factors (known as core in TT terminology)

    Returns
    -------
    output_tensor: ndarray
                   tensor whose MPS/TT decomposition was given by 'factors'
    """
    full_shape = [f.shape[1] for f in factors]
    full_tensor = tl.reshape(factors[0], (full_shape[0], -1))

    for factor in factors[1:]:
        rank_prev, _, rank_next = factor.shape
        factor = tl.reshape(factor, (rank_prev, -1))
        full_tensor = tl.dot(full_tensor, factor)
        full_tensor = tl.reshape(full_tensor, (-1, rank_next))

    return tl.reshape(full_tensor, full_shape)