How to use the gpflow.config.default_float function in gpflow

To help you get started, we’ve selected a few gpflow examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github GPflow / GPflow / tests / test_conditionals.py View on Github external
def test_whiten(Xdata, Xnew, kernel, mu, sqrt):
    """
    Make sure that predicting using the whitened representation is the
    sameas the non-whitened one.
    """

    K = kernel(Xdata) + tf.eye(Nn, dtype=default_float()) * 1e-6
    L = tf.linalg.cholesky(K)
    V = tf.linalg.triangular_solve(L, mu, lower=True)
    mean1, var1 = conditional(Xnew, Xdata, kernel, mu)
    mean2, var2 = conditional(Xnew, Xdata, kernel, V, white=True)

    assert_allclose(mean1, mean2)
    assert_allclose(var1, var2)
github GPflow / GPflow / tests / test_likelihoods.py View on Github external
self.likelihood = likelihood
        self.Y = Y
        self.rtol = rtol
        self.atol = atol

    def __repr__(self):
        name = self.likelihood.__class__.__name__
        return f"{name}-rtol={self.rtol}-atol={self.atol}"


likelihood_setups = [
    LikelihoodSetup(Gaussian()),
    LikelihoodSetup(StudentT()),
    LikelihoodSetup(Beta(), Y=tf.random.uniform(Datum.Yshape, dtype=default_float())),
    LikelihoodSetup(Ordinal(np.array([-1, 1])), Y=tf.random.uniform(Datum.Yshape, 0, 3, dtype=default_int())),
    LikelihoodSetup(Poisson(invlink=tf.square), Y=tf.random.poisson(Datum.Yshape, 1.0, dtype=default_float())),
    LikelihoodSetup(Exponential(invlink=tf.square), Y=tf.random.uniform(Datum.Yshape, dtype=default_float())),
    LikelihoodSetup(Gamma(invlink=tf.square), Y=tf.random.uniform(Datum.Yshape, dtype=default_float())),
    LikelihoodSetup(Bernoulli(invlink=tf.sigmoid), Y=tf.random.uniform(Datum.Yshape, dtype=default_float())),
    pytest.param(LikelihoodSetup(MultiClass(2), Y=tf.argmax(Datum.Y, 1).numpy().reshape(-1, 1), rtol=1e-3, atol=1e-3),
                 marks=pytest.mark.skip),
]


def get_likelihood(likelihood_setup):
    if not isinstance(likelihood_setup, LikelihoodSetup):
        # pytest.param()
        likelihood_setup, = likelihood_setup.values
    return likelihood_setup.likelihood


def test_no_missing_likelihoods():
github GPflow / GPflow / tests / test_methods.py View on Github external
"""
    This test makes sure that when the inducing points are the same as the data
    points, the sparse mcmc is the same as full mcmc
    """
    rng = Datum().rng
    X, Y = rng.randn(10, 1), rng.randn(10, 1)
    v_vals = rng.randn(10, 1)

    likelihood = gpflow.likelihoods.StudentT()
    model_1 = gpflow.models.GPMC(data=(X, Y), kernel=gpflow.kernels.Exponential(), likelihood=likelihood)
    model_2 = gpflow.models.SGPMC(data=(X, Y),
                                  kernel=gpflow.kernels.Exponential(),
                                  inducing_variable=X.copy(),
                                  likelihood=likelihood)
    model_1.V = tf.convert_to_tensor(v_vals, dtype=default_float())
    model_2.V = tf.convert_to_tensor(v_vals, dtype=default_float())
    model_1.kernel.lengthscale.assign(0.8)
    model_2.kernel.lengthscale.assign(0.8)
    model_1.kernel.variance.assign(4.2)
    model_2.kernel.variance.assign(4.2)

    assert_allclose(model_1.log_likelihood(), model_2.log_likelihood(), rtol=1e-5, atol=1e-5)
github GPflow / GPflow / tests / test_uncertain_conditional.py View on Github external
def gen_q_sqrt(D_out, *shape):
    return tf.convert_to_tensor(np.array(
        [np.tril(rng.randn(*shape)) for _ in range(D_out)]
        ), dtype=default_float())
github GPflow / GPflow / gpflow / quadrature.py View on Github external
def hermgauss(n: int):
    x, w = np.polynomial.hermite.hermgauss(n)
    x, w = x.astype(default_float()), w.astype(default_float())
    return x, w
github GPflow / GPflow / gpflow / quadrature.py View on Github external
def eval_func(func):
        feval = func(mc_Xr, **Ys)
        feval = tf.reshape(feval, (S, N, -1))
        if logspace:
            log_S = tf.math.log(tf.cast(S, default_float()))
            return tf.reduce_logsumexp(feval, axis=0) - log_S  # [N, D]
        else:
            return tf.reduce_mean(feval, axis=0)
github GPflow / GPflow / gpflow / models / vgp.py View on Github external
which is:

            E_{q(F)} [ \log p(Y|F) ] - KL[ q(F) || p(F)]

        with

            q(\mathbf f) = N(\mathbf f \,|\, \boldsymbol \mu, \boldsymbol \Sigma)

        """

        x_data, y_data = self.data
        # Get prior KL.
        KL = gauss_kl(self.q_mu, self.q_sqrt)

        # Get conditionals
        K = self.kernel(x_data) + tf.eye(self.num_data, dtype=default_float()) * default_jitter()
        L = tf.linalg.cholesky(K)
        fmean = tf.linalg.matmul(L, self.q_mu) + self.mean_function(x_data)  # [NN, ND] -> ND
        q_sqrt_dnn = tf.linalg.band_part(self.q_sqrt, -1, 0)  # [D, N, N]
        L_tiled = tf.tile(tf.expand_dims(L, 0), tf.stack([self.num_latent, 1, 1]))
        LTA = tf.linalg.matmul(L_tiled, q_sqrt_dnn)  # [D, N, N]
        fvar = tf.reduce_sum(tf.square(LTA), 2)

        fvar = tf.transpose(fvar)

        # Get variational expectations.
        var_exp = self.likelihood.variational_expectations(fmean, fvar, y_data)

        return tf.reduce_sum(var_exp) - KL
github GPflow / GPflow / gpflow / models / sgpr.py View on Github external
def upper_bound(self):
        x_data, y_data = self.data
        num_data = tf.cast(tf.shape(y_data)[0], default_float())

        Kdiag = self.kernel(x_data, full=False)
        kuu = Kuu(self.inducing_variable, self.kernel, jitter=default_jitter())
        kuf = Kuf(self.inducing_variable, self.kernel, x_data)

        I = tf.eye(tf.shape(kuu)[0], dtype=default_float())

        L = tf.linalg.cholesky(kuu)
        A = tf.linalg.triangular_solve(L, kuf, lower=True)
        AAT = tf.linalg.matmul(A, A, transpose_b=True)
        B = I + AAT / self.likelihood.variance
        LB = tf.linalg.cholesky(B)

        # Using the Trace bound, from Titsias' presentation
        c = tf.reduce_sum(Kdiag) - tf.reduce_sum(tf.square(A))
github GPflow / GPflow / gpflow / covariances / kuus.py View on Github external
def Kuu_conv_patch(feat, kern, jitter=0.0):
    return kern.basekern.K(feat.Z) + jitter * tf.eye(len(feat), dtype=default_float())
github GPflow / GPflow / gpflow / models / sgpr.py View on Github external
def log_likelihood(self):
        """
        Construct a tensorflow function to compute the bound on the marginal
        likelihood. For a derivation of the terms in here, see the associated
        SGPR notebook.
        """
        x_data, y_data = self.data
        num_inducing = len(self.inducing_variable)
        num_data = tf.cast(tf.shape(y_data)[0], default_float())
        output_dim = tf.cast(tf.shape(y_data)[1], default_float())

        err = y_data - self.mean_function(x_data)
        Kdiag = self.kernel(x_data, full=False)
        kuf = Kuf(self.inducing_variable, self.kernel, x_data)
        kuu = Kuu(self.inducing_variable, self.kernel, jitter=default_jitter())
        L = tf.linalg.cholesky(kuu)
        sigma = tf.sqrt(self.likelihood.variance)

        # Compute intermediate matrices
        A = tf.linalg.triangular_solve(L, kuf, lower=True) / sigma
        AAT = tf.linalg.matmul(A, A, transpose_b=True)
        B = AAT + tf.eye(num_inducing, dtype=default_float())
        LB = tf.linalg.cholesky(B)
        Aerr = tf.linalg.matmul(A, err)
        c = tf.linalg.triangular_solve(LB, Aerr, lower=True) / sigma