How to use the lab.B.cholesky function in lab

To help you get started, we’ve selected a few lab examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github wesselb / stheno / tests / test_matrix.py View on Github external
allclose(B.lr_diff(lr1 + lr1 + lr2, lr1), lr1 + lr2)
    allclose(B.lr_diff(lr1 + lr1 + lr2, lr2), lr1 + lr1)
    allclose(B.lr_diff(lr1 + lr1 + lr2, lr1 + lr1), lr2)
    allclose(B.lr_diff(lr1 + lr1 + lr2, lr1 + lr2), lr1)
    allclose(B.lr_diff(lr1 + lr1 + lr2, lr1 + lr1 + lr2), B.zeros(3, 3))

    # Second, test positive definiteness.
    lr1 = LowRank(left=lr1.left, middle=lr1.middle)
    lr2 = LowRank(left=lr2.left, middle=lr2.middle)

    B.cholesky(B.lr_diff(lr1, 0.999 * lr1))
    B.cholesky(B.lr_diff(lr1 + lr2, lr1))
    B.cholesky(B.lr_diff(lr1 + lr2, lr2))
    B.cholesky(B.lr_diff(lr1 + lr1 + lr2, lr1))
    B.cholesky(B.lr_diff(lr1 + lr1 + lr2, lr1 + lr1))
    B.cholesky(B.lr_diff(lr1 + lr1 + lr2, lr1 + lr2))
    B.cholesky(B.lr_diff(lr1 + lr1 + lr2, lr1 + lr1 + 0.999 * lr2))
github wesselb / stheno / tests / test_matrix.py View on Github external
def test_cholesky():
    a = np.random.randn(5, 5)
    a = a.T.dot(a)

    # Test `Dense`.
    allclose(np.linalg.cholesky(a), B.cholesky(a))

    # Test `Diagonal`.
    d = Diagonal(np.diag(a))
    allclose(np.linalg.cholesky(to_np(d)), B.cholesky(d))

    # Test `LowRank`.
    a = np.random.randn(2, 2)
    lr = LowRank(left=np.random.randn(5, 2), middle=a.dot(a.T))
    chol = to_np(B.cholesky(lr))
    #   The Cholesky here is not technically the Cholesky decomposition. Hence
    #   we test this slightly differently.
    allclose(chol.dot(chol.T), lr)
github wesselb / stheno / stheno / matrix.py View on Github external
def logdet(a):
    a = matrix(a)
    if a.logdet is None:
        a.logdet = 2 * B.sum(B.log(B.diag(B.cholesky(a))))
    return a.logdet
github wesselb / stheno / stheno / kernel.py View on Github external
def __init__(self, k_zi, k_zj, z, A, K_z):
        self.k_zi = k_zi
        self.k_zj = k_zj
        self.z = z
        self.A = A
        self.L = B.cholesky(convert(K_z, AbstractMatrix))
github wesselb / stheno / stheno / graph.py View on Github external
# `e`.
        if isinstance(x, MultiInput):
            x_n = MultiInput(*(p(xi.get())
                               for p, xi in zip(self.e.kernel.ps, x.get())))
        else:
            x_n = x

        # Construct the noise kernel matrix.
        K_n = self.e.kernel(x_n)

        # The approximation can only handle diagonal noise matrices.
        if not isinstance(K_n, Diagonal):
            raise RuntimeError('Kernel matrix of noise must be diagonal.')

        # And construct the components for the inducing point approximation.
        L_z = B.cholesky(self._K_z)
        self._A = B.add(B.eye(self._K_z),
                        B.iqf(K_n, B.transpose(B.solve(L_z, K_zx))))
        y_bar = uprank(self.y) - self.e.mean(x_n) - self.graph.means[p_x](x)
        prod_y_bar = B.solve(L_z, B.iqf(K_n, B.transpose(K_zx), y_bar))

        # Compute the optimal mean.
        self._mu = B.add(self.graph.means[p_z](z),
                         B.iqf(self._A, B.solve(L_z, self._K_z), prod_y_bar))

        # Compute the ELBO.
        # NOTE: The calculation of `trace_part` asserts that `K_n` is diagonal.
        #       The rest, however, is completely generic.
        trace_part = B.ratio(Diagonal(self.graph.kernels[p_x].elwise(x)[:, 0]) -
                             Diagonal(B.iqf_diag(self._K_z, K_zx)), K_n)
        det_part = B.logdet(2 * B.pi * K_n) + B.logdet(self._A)
        iqf_part = B.iqf(K_n, y_bar)[0, 0] - B.iqf(self._A, prod_y_bar)[0, 0]
github wesselb / stheno / stheno / matrix.py View on Github external
def qf(a, b, c):
    if b is c:
        return B.qf(a, b)
    chol = B.cholesky(matrix(a))
    return B.matmul(B.trisolve(chol, b), B.trisolve(chol, c), tr_a=True)
github wesselb / stheno / stheno / matrix.py View on Github external
def sample(a, num=1):
    """Sample from covariance matrices.

    Args:
        a (tensor): Covariance matrix to sample from.
        num (int): Number of samples.

    Returns:
        tensor: Samples as rank 2 column vectors.
    """
    # # Convert integer data types to floats.
    dtype = float if B.issubdtype(B.dtype(a), np.integer) else B.dtype(a)

    # Perform sampling operation.
    chol = B.cholesky(matrix(a))
    return B.matmul(chol, B.randn(dtype, B.shape(chol)[1], num))
github wesselb / stheno / stheno / matrix.py View on Github external
def qf(a, b):
    """Compute the quadratic form `transpose(b) inv(a) c`.

    Args:
        a (tensor): Covariance matrix.
        b (tensor): `b`.
        c (tensor, optional): `c`. Defaults to `b`.

    Returns:
        :class:`.matrix.Dense`: Quadratic form.
    """
    prod = B.trisolve(B.cholesky(matrix(a)), b)
    return matrix(B.matmul(prod, prod, tr_a=True))
github wesselb / stheno / stheno / matrix.py View on Github external
def qf_diag(a, b, c):
    a = matrix(a)
    if b is c:
        return B.qf_diag(a, b)
    left = B.trisolve(B.cholesky(a), b)
    right = B.trisolve(B.cholesky(a), c)
    return B.sum(left * right, axis=0)