How to use the gpytorch.likelihoods.GaussianLikelihood function in gpytorch

To help you get started, we’ve selected a few gpytorch examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github cornellius-gp / gpytorch / test / examples / test_whitened_svgp_gp_regression.py View on Github external
def test_regression_error(self, cuda=False, skip_logdet_forward=False, cholesky=False):
        train_x, train_y = train_data(cuda=cuda)
        likelihood = GaussianLikelihood()
        inducing_points = torch.linspace(0, 1, 25)
        model = SVGPRegressionModel(inducing_points=inducing_points, learn_locs=False)
        if cuda:
            likelihood.cuda()
            model.cuda()
        mll = gpytorch.mlls.VariationalELBO(likelihood, model, num_data=len(train_y))

        # Find optimal model hyperparameters
        model.train()
        likelihood.train()
        optimizer = optim.Adam([{"params": model.parameters()}, {"params": likelihood.parameters()}], lr=0.01)

        _wrapped_cg = MagicMock(wraps=gpytorch.utils.linear_cg)
        with gpytorch.settings.max_cholesky_size(math.inf if cholesky else 0), \
                gpytorch.settings.skip_logdet_forward(skip_logdet_forward), \
                warnings.catch_warnings(record=True) as w, \
github cornellius-gp / gpytorch / test / examples / test_simple_gp_regression.py View on Github external
def test_fantasy_updates(self, cuda=False):
        train_x, test_x, train_y, test_y = self._get_data(cuda=cuda)
        # We're manually going to set the hyperparameters to something they shouldn't be
        likelihood = GaussianLikelihood()
        gp_model = ExactGPModel(train_x, train_y, likelihood)
        mll = gpytorch.ExactMarginalLogLikelihood(likelihood, gp_model)
        gp_model.covar_module.base_kernel.initialize(lengthscale=exp(1))
        gp_model.mean_module.initialize(constant=0)
        likelihood.initialize(noise=exp(1))

        if cuda:
            gp_model.cuda()
            likelihood.cuda()

        # Find optimal model hyperparameters
        gp_model.train()
        likelihood.train()
        optimizer = optim.Adam(list(gp_model.parameters()) + list(likelihood.parameters()), lr=0.15)
        for _ in range(50):
            optimizer.zero_grad()
github cornellius-gp / gpytorch / test / examples / test_simple_gp_regression.py View on Github external
def test_recursive_initialize(self, cuda=False):
        train_x, test_x, train_y, test_y = self._get_data(cuda=cuda)

        likelihood_1 = GaussianLikelihood()
        gp_model_1 = ExactGPModel(train_x, train_y, likelihood_1)

        likelihood_2 = GaussianLikelihood()
        gp_model_2 = ExactGPModel(train_x, train_y, likelihood_2)

        gp_model_1.initialize(**{"likelihood.noise": 1e-2, "covar_module.base_kernel.lengthscale": 1e-1})
        gp_model_2.likelihood.initialize(noise=1e-2)
        gp_model_2.covar_module.base_kernel.initialize(lengthscale=1e-1)
        self.assertTrue(torch.equal(gp_model_1.likelihood.noise, gp_model_2.likelihood.noise))
        self.assertTrue(
            torch.equal(
                gp_model_1.covar_module.base_kernel.lengthscale, gp_model_2.covar_module.base_kernel.lengthscale
            )
github cornellius-gp / gpytorch / test / examples / test_simple_gp_regression.py View on Github external
def test_fantasy_updates_batch(self, cuda=False):
        train_x, test_x, train_y, test_y = self._get_data(cuda=cuda)
        # We're manually going to set the hyperparameters to something they shouldn't be
        likelihood = GaussianLikelihood()
        gp_model = ExactGPModel(train_x, train_y, likelihood)
        mll = gpytorch.ExactMarginalLogLikelihood(likelihood, gp_model)
        gp_model.covar_module.base_kernel.initialize(lengthscale=exp(1))
        gp_model.mean_module.initialize(constant=0)
        likelihood.initialize(noise=exp(1))

        if cuda:
            gp_model.cuda()
            likelihood.cuda()

        # Find optimal model hyperparameters
        gp_model.train()
        likelihood.train()
        optimizer = optim.Adam(list(gp_model.parameters()) + list(likelihood.parameters()), lr=0.15)
        for _ in range(50):
            optimizer.zero_grad()
github cornellius-gp / gpytorch / test / examples / test_model_list_gp_regression.py View on Github external
def test_simple_model_list_gp_regression(self, cuda=False):
        train_x1 = torch.linspace(0, 0.95, 25) + 0.05 * torch.rand(25)
        train_x2 = torch.linspace(0, 0.95, 15) + 0.05 * torch.rand(15)

        train_y1 = torch.sin(train_x1 * (2 * math.pi)) + 0.2 * torch.randn_like(train_x1)
        train_y2 = torch.cos(train_x2 * (2 * math.pi)) + 0.2 * torch.randn_like(train_x2)

        likelihood1 = GaussianLikelihood()
        model1 = ExactGPModel(train_x1, train_y1, likelihood1)

        likelihood2 = GaussianLikelihood()
        model2 = ExactGPModel(train_x2, train_y2, likelihood2)

        model = IndependentModelList(model1, model2)
        likelihood = LikelihoodList(model1.likelihood, model2.likelihood)

        if cuda:
            model = model.cuda()

        model.train()
        likelihood.train()

        mll = SumMarginalLogLikelihood(likelihood, model)
github cornellius-gp / gpytorch / test / examples / test_batch_gp_regression.py View on Github external
def test_train_on_single_set_test_on_batch(self):
        # We're manually going to set the hyperparameters to something they shouldn't be
        likelihood = GaussianLikelihood()
        gp_model = ExactGPModel(train_x1, train_y1, likelihood)
        mll = gpytorch.ExactMarginalLogLikelihood(likelihood, gp_model)

        # Find optimal model hyperparameters
        gp_model.train()
        likelihood.train()
        optimizer = optim.Adam(gp_model.parameters(), lr=0.1)
        optimizer.n_iter = 0
        for _ in range(75):
            optimizer.zero_grad()
            output = gp_model(train_x1)
            loss = -mll(output, train_y1).sum()
            loss.backward()
            optimizer.step()

            for param in gp_model.parameters():
github cornellius-gp / gpytorch / test / models / test_exact_gp.py View on Github external
def create_likelihood_and_labels(self):
        likelihood = gpytorch.likelihoods.GaussianLikelihood()
        labels = torch.randn(50) + 2
        return likelihood, labels
github robotlearn / pyrobolearn / pyrobolearn / models / gp / gp.py View on Github external
def likelihood_prob(self, likelihood):
        r"""Set the likelihood probability density function p(y|f,x)."""
        if likelihood is None:
            likelihood = gpytorch.likelihoods.GaussianLikelihood()
        if not isinstance(likelihood, gpytorch.likelihoods.Likelihood):
            raise TypeError("Expecting the likelihood to be an instance of `gpytorch.likelihood.Likelihood`, got "
                            "instead {}".format(type(likelihood)))
        self.model.likelihood = likelihood
github MasashiSode / MOBO / mobo / bayesopt / core.py View on Github external
def _train_likelihood(self):

        for i_obj in range(self.n_objective_dimension):
            self.likelihood[i_obj] = \
                gpytorch.likelihoods.GaussianLikelihood()
            self.model[i_obj] = self.surrogate_model(
                self.train_x, self.train_y[:, i_obj], self.likelihood[i_obj])
            self.model[i_obj].train()
            self.likelihood[i_obj].train()

            # Use the adam optimizer for likelihood optimization
            optimizer_likelihood = torch.optim.Adam([
                # Includes GaussianLikelihood parameters
                {'params': self.model[i_obj].parameters()},
            ], lr=0.1)

            # "Loss" for GPs - the marginal log likelihood
            mll = gpytorch.mlls.ExactMarginalLogLikelihood(
                self.likelihood[i_obj], self.model[i_obj])

            loss_prev = 0.1