How to use the thewalrus.quantum.Amat function in thewalrus

To help you get started, we’ve selected a few thewalrus examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github XanaduAI / thewalrus / thewalrus / samples.py View on Github external
n1, n2 = cov.shape

    if n1 != n2:
        raise ValueError("Covariance matrix must be square.")

    nmodes = n1 // 2
    prev_prob = 1.0
    mu = np.zeros(n1)

    for k in range(nmodes):
        probs1 = np.zeros([2], dtype=np.float64)
        kk = np.arange(k + 1)
        _, V_red = reduced_gaussian(mu, cov, kk)

        Q = Qmat(V_red, hbar=hbar)
        A = Amat(Q, hbar=hbar, cov_is_qmat=True)
        O = Xmat(k + 1) @ A

        indices = result + [0]
        ind2 = indices + indices

        probs1[0] = tor(np.complex128(reduction(O, ind2))).real

        indices = result + [1]
        ind2 = indices + indices
        pref = np.sqrt(np.linalg.det(Q).real)
        probs1a = probs1 / pref

        probs2 = probs1a / prev_prob
        probs2[1] = 1.0 - probs2[0]
        probs1a[1] = probs2[1] * prev_prob
        probs3 = np.maximum(
github XanaduAI / thewalrus / thewalrus / operations.py View on Github external
(array): Tensor containing the Fock representation of the Gaussian unitary
    """
    # Check the matrix is symplectic
    if check_symplectic:
        if not is_symplectic(S, rtol=rtol, atol=atol):
            raise ValueError("The matrix S is not symplectic")

    # And that S and alpha have compatible dimensions
    l, _ = S.shape
    if l // 2 != len(alpha):
        raise ValueError("The matrix S and the vector alpha do not have compatible dimensions")

    # Construct its Choi expansion and then the covariance matrix and A matrix of such pure state
    S_exp = choi_expand(S, r)
    cov = S_exp @ S_exp.T
    A = Amat(cov)

    # Because the state is pure then A = B \oplus B^*. We now extract B^* and follow the procedure
    # described in the paper cited above.
    n, _ = A.shape
    N = n // 2
    B = A[0:N, 0:N].conj()

    # Now we need to figure out the loops (cf. Eq. 111 of the reference above)
    l = len(alpha)
    alphat = np.array(list(alpha) + ([0] * l))
    zeta = alphat - B @ alphat.conj()

    # Finally, there are the prefactors (cf. Eq. 113 of the reference above).
    # Note that the factorials that are not included here from Eq. 113 are calculated
    # internally by hafnian_batched when the argument renorm is set to True
    pref_exp = -0.5 * alphat.conj() @ zeta
github XanaduAI / thewalrus / thewalrus / samples.py View on Github external
n1, n2 = cov.shape

    if n1 != n2:
        raise ValueError("Covariance matrix must be square.")

    nmodes = n1 // 2
    prev_prob = 1.0
    mu = np.zeros(n1)

    for k in range(nmodes):
        probs1 = np.zeros([2], dtype=np.float64)
        kk = np.arange(k + 1)
        _, V_red = reduced_gaussian(mu, cov, kk)

        Q = Qmat(V_red, hbar=hbar)
        A = Amat(Q, hbar=hbar, cov_is_qmat=True)
        O = Xmat(k + 1) @ A

        indices = result + [0]
        ind2 = indices + indices

        probs1[0] = tor(np.complex128(reduction(O, ind2))).real

        indices = result + [1]
        ind2 = indices + indices
        pref = np.sqrt(np.linalg.det(Q).real)
        probs1a = probs1 / pref

        probs2 = probs1a / prev_prob
        probs2[1] = 1.0 - probs2[0]
        probs1a[1] = probs2[1] * prev_prob
        probs3 = np.maximum(
github XanaduAI / thewalrus / thewalrus / samples.py View on Github external
prev_prob = 1.0
    nmodes = N
    if mean is None:
        local_mu = np.zeros(2 * N)
    else:
        local_mu = mean
    A = Amat(Qmat(cov), hbar=hbar)

    for k in range(nmodes):
        probs1 = np.zeros([cutoff + 1], dtype=np.float64)
        kk = np.arange(k + 1)
        mu_red, V_red = reduced_gaussian(local_mu, cov, kk)

        if approx:
            Q = Qmat(V_red, hbar=hbar)
            A = Amat(Q, hbar=hbar, cov_is_qmat=True)

        for i in range(cutoff):
            indices = result + [i]
            ind2 = indices + indices
            if approx:
                factpref = np.prod(fac(indices))
                mat = reduction(A, ind2)
                probs1[i] = (
                    hafnian(np.abs(mat.real), approx=True, num_samples=approx_samples) / factpref
                )
            else:
                probs1[i] = density_matrix_element(
                    mu_red, V_red, indices, indices, include_prefactor=True, hbar=hbar
                ).real

        if approx:
github XanaduAI / thewalrus / thewalrus / samples.py View on Github external
prev_prob = 1.0
    nmodes = N
    if mean is None:
        local_mu = np.zeros(2 * N)
    else:
        local_mu = mean
    A = Amat(Qmat(cov), hbar=hbar)

    for k in range(nmodes):
        probs1 = np.zeros([cutoff + 1], dtype=np.float64)
        kk = np.arange(k + 1)
        mu_red, V_red = reduced_gaussian(local_mu, cov, kk)

        if approx:
            Q = Qmat(V_red, hbar=hbar)
            A = Amat(Q, hbar=hbar, cov_is_qmat=True)

        for i in range(cutoff):
            indices = result + [i]
            ind2 = indices + indices
            if approx:
                factpref = np.prod(fac(indices))
                mat = reduction(A, ind2)
                probs1[i] = (
                    hafnian(np.abs(mat.real), approx=True, num_samples=approx_samples) / factpref
                )
            else:
                probs1[i] = density_matrix_element(
                    mu_red, V_red, indices, indices, include_prefactor=True, hbar=hbar
                ).real

        if approx: