How to use the gpflow.models.GPR function in gpflow

To help you get started, we’ve selected a few gpflow examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github GPflow / GPflow / tests / test_mean_functions.py View on Github external
def _create_GPR_model_with_bias(X, Y, mean_function):
    return gpflow.models.GPR((X, Y), mean_function=mean_function, kernel=gpflow.kernels.Bias(Datum.input_dim))
github GPflow / GPflow / tests / test_optimizers.py View on Github external
def test_other_XiTransform_VGP_vs_GPR(session_tf, xi_transform=XiSqrtMeanVar()):
    """
    With other transforms the solution is not given in a single step, but it should still give the same answer
    after a number of smaller steps. 
    """
    N, D = 3, 2
    X = np.random.randn(N, D)
    Y = np.random.randn(N, 1)
    kern = gpflow.kernels.RBF(D)
    lik_var = 0.1
    lik = gpflow.likelihoods.Gaussian()
    lik.variance = lik_var

    m_vgp = gpflow.models.VGP(X, Y, kern, lik)
    m_gpr = gpflow.models.GPR(X, Y, kern)
    m_gpr.likelihood.variance = lik_var

    m_vgp.set_trainable(False)
    m_vgp.q_mu.set_trainable(True)
    m_vgp.q_sqrt.set_trainable(True)
    NatGradOptimizer(0.01).minimize(m_vgp, [[m_vgp.q_mu, m_vgp.q_sqrt, xi_transform]], maxiter=500)

    assert_allclose(m_gpr.compute_log_likelihood(),
                    m_vgp.compute_log_likelihood(), atol=1e-4)
github GPflow / GPflow / _unsorted / _test_method_equivalence.py View on Github external
def test_few_inducing_points(self):
        with self.test_context() as session:
            vfe = gpflow.models.SGPR(self.X, self.Y, gpflow.kernels.SquaredExponential(1), self.X[:10, :].copy())
            opt = gpflow.train.ScipyOptimizer()
            opt.minimize(vfe)

            full = gpflow.models.GPR(self.X, self.Y, gpflow.kernels.SquaredExponential(1))
            full.kernel.lengthscale = vfe.kernel.lengthscale.read_value()
            full.kernel.variance = vfe.kernel.variance.read_value()
            full.likelihood.variance = vfe.likelihood.variance.read_value()

            lml_upper = vfe.compute_upper_bound()
            lml_vfe = - session.run(vfe.objective)
            lml_full = - session.run(full.objective)

            self.assertTrue(lml_upper > lml_full > lml_vfe)
github GPflow / GPflow / tests / test_method_equivalence.py View on Github external
def prepare(self):
        rng = np.random.RandomState(0)
        X = rng.rand(20, 1) * 10
        Y = np.sin(X) + 0.9 * np.cos(X * 1.6) + rng.randn(*X.shape) * 0.8
        Y = np.tile(Y, 2)  # two identical columns
        self.Xtest = rng.rand(10, 1) * 10

        m1 = gpflow.models.GPR(
            X, Y, kern=gpflow.kernels.RBF(1),
            mean_function=gpflow.mean_functions.Constant())
        m2 = gpflow.models.VGP(
            X, Y, gpflow.kernels.RBF(1), likelihood=gpflow.likelihoods.Gaussian(),
            mean_function=gpflow.mean_functions.Constant())
        m3 = gpflow.models.SVGP(
            X, Y, gpflow.kernels.RBF(1),
            likelihood=gpflow.likelihoods.Gaussian(),
            Z=X.copy(),
            q_diag=False,
            mean_function=gpflow.mean_functions.Constant())
        m3.feature.trainable = False
        m4 = gpflow.models.SVGP(
            X, Y, gpflow.kernels.RBF(1),
            likelihood=gpflow.likelihoods.Gaussian(),
            Z=X.copy(), q_diag=False, whiten=True,
github GPflow / GPflow / unsorted_tests / test_predict.py View on Github external
def prepare(self):
        self.rng = np.random.RandomState(0)
        self.X = self.rng.randn(100, 2)
        self.Y = self.rng.randn(100, 1)
        self.kern = gpflow.kernels.Matern32(2) + gpflow.kernels.White(1)
        self.Xtest = self.rng.randn(10, 2)
        self.Ytest = self.rng.randn(10, 1)
        # make a Gaussian model
        return gpflow.models.GPR(self.X, self.Y, kern=self.kern)
github GPflow / GPflow / tests / test_profiling.py View on Github external
def prepare(self):
        with gpflow.defer_build():
            X = np.random.rand(100, 1)
            Y = np.sin(X) + np.random.randn(*X.shape) * 0.01
            k = gpflow.kernels.RBF(1)
            return gpflow.models.GPR(X, Y, k)
github GPflow / GPflow / tests / test_optimizers.py View on Github external
def test_VGP_vs_GPR(session_tf):
    """
    With a Gaussian likelihood the Gaussian variational (VGP) model should be equivalent to the exact 
     regression model (GPR) after a single nat grad step of size 1
    """
    N, D = 3, 2
    X = np.random.randn(N, D)
    Y = np.random.randn(N, 1)
    kern = gpflow.kernels.RBF(D)
    lik_var = 0.1
    lik = gpflow.likelihoods.Gaussian()
    lik.variance = lik_var

    m_vgp = gpflow.models.VGP(X, Y, kern, lik)
    m_gpr = gpflow.models.GPR(X, Y, kern)
    m_gpr.likelihood.variance = lik_var

    m_vgp.set_trainable(False)
    m_vgp.q_mu.set_trainable(True)
    m_vgp.q_sqrt.set_trainable(True)
    NatGradOptimizer(1.).minimize(m_vgp, [(m_vgp.q_mu, m_vgp.q_sqrt)], maxiter=1)

    assert_allclose(m_gpr.compute_log_likelihood(),
                    m_vgp.compute_log_likelihood(), atol=1e-4)
github GPflow / GPflow / tests / test_model.py View on Github external
def model():
    return gpflow.models.GPR(
        (Data.X, Data.Y),
        kernel=gpflow.kernels.SquaredExponential(lengthscale=Data.ls, variance=Data.var),
    )
github cmu-db / ottertune / server / analysis / gpr / gprc.py View on Github external
#
# OtterTune - analysis/gprc.py
#
# Copyright (c) 2017-18, Carnegie Mellon University Database Group
#
# Author: Dana Van Aken

from __future__ import absolute_import

import tensorflow as tf
from gpflow import settings
from gpflow.decors import autoflow, name_scope, params_as_tensors
from gpflow.models import GPR


class GPRC(GPR):

    def __init__(self, X, Y, kern, mean_function=None, **kwargs):
        super(GPRC, self).__init__(X, Y, kern, mean_function, **kwargs)
        self.cholesky = None
        self.alpha = None

    @autoflow()
    def _compute_cache(self):
        K = self.kern.K(self.X) + tf.eye(tf.shape(self.X)[0], dtype=settings.float_type) * self.likelihood.variance
        L = tf.cholesky(K, name='gp_cholesky')
        V = tf.matrix_triangular_solve(L, self.Y - self.mean_function(self.X), name='gp_alpha')
        return L, V

    def update_cache(self):
        self.cholesky, self.alpha = self._compute_cache()