How to use the gpytorch.settings function in gpytorch

To help you get started, we’ve selected a few gpytorch examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github cornellius-gp / gpytorch / test / examples / test_kissgp_white_noise_regression.py View on Github external
def test_kissgp_gp_mean_abs_error(self):
        train_x, train_y, test_x, test_y = make_data()
        likelihood = FixedNoiseGaussianLikelihood(torch.ones(100) * 0.001)
        gp_model = GPRegressionModel(train_x, train_y, likelihood)
        mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model)

        # Optimize the model
        gp_model.train()
        likelihood.train()

        optimizer = optim.Adam(list(gp_model.parameters()) + list(likelihood.parameters()), lr=0.1)
        optimizer.n_iter = 0
        with gpytorch.settings.debug(False):
            for _ in range(25):
                optimizer.zero_grad()
                output = gp_model(train_x)
                loss = -mll(output, train_y)
                loss.backward()
                optimizer.n_iter += 1
                optimizer.step()

            for param in gp_model.parameters():
                self.assertTrue(param.grad is not None)
                self.assertGreater(param.grad.norm().item(), 0)
            for param in likelihood.parameters():
                self.assertTrue(param.grad is not None)
                self.assertGreater(param.grad.norm().item(), 0)

            # Test the model
github cornellius-gp / gpytorch / test / examples / test_fixed_noise_fanatasy_updates.py View on Github external
gp_model = ExactGPModel(train_x, train_y, likelihood)
        mll = gpytorch.ExactMarginalLogLikelihood(likelihood, gp_model)
        gp_model.covar_module.base_kernel.initialize(lengthscale=exp(1))
        gp_model.mean_module.initialize(constant=0)

        if cuda:
            gp_model.cuda()
            likelihood.cuda()

        # Find optimal model hyperparameters
        gp_model.train()
        likelihood.train()
        optimizer = optim.Adam(list(gp_model.parameters()) + list(likelihood.parameters()), lr=0.15)
        for _ in range(50):
            optimizer.zero_grad()
            with gpytorch.settings.debug(False):
                output = gp_model(train_x)
            loss = -mll(output, train_y)
            loss.backward()
            optimizer.step()

        for param in gp_model.parameters():
            self.assertTrue(param.grad is not None)
            self.assertGreater(param.grad.norm().item(), 0)
        optimizer.step()

        with gpytorch.settings.fast_pred_var():
            # Test the model
            gp_model.eval()
            likelihood.eval()
            test_function_predictions = likelihood(gp_model(test_x), noise=test_noise)
github cornellius-gp / gpytorch / test / functions / test_inv_matmul.py View on Github external
def test_inv_matmul_multiple_vecs(self):
        mat = self._create_mat().detach().requires_grad_(True)
        mat_copy = mat.detach().clone().requires_grad_(True)
        mat_copy.register_hook(_ensure_symmetric_grad)
        vecs = torch.randn(*mat.shape[:-2], mat.size(-1), 4).detach().requires_grad_(True)
        vecs_copy = vecs.detach().clone().requires_grad_(True)

        # Forward
        with settings.terminate_cg_by_size(False):
            res = NonLazyTensor(mat).inv_matmul(vecs)
            actual = mat_copy.inverse().matmul(vecs_copy)
            self.assertAllClose(res, actual)

            # Backward
            grad_output = torch.randn_like(vecs)
            res.backward(gradient=grad_output)
            actual.backward(gradient=grad_output)
            self.assertAllClose(mat.grad, mat_copy.grad)
            self.assertAllClose(vecs.grad, vecs_copy.grad)
github cornellius-gp / gpytorch / test / examples / test_simple_gp_regression.py View on Github external
if not torch.cuda.is_available():
            return
        with least_used_cuda_device():
            train_x, test_x, train_y, _ = self._get_data(cuda=True)
            likelihood = GaussianLikelihood()
            gp_model = ExactGPModel(train_x, train_y, likelihood)

            gp_model.cuda()
            likelihood.cuda()

            # Compute posterior distribution
            gp_model.eval()
            likelihood.eval()

            with gpytorch.settings.fast_pred_var(False):
                with gpytorch.settings.skip_posterior_variances(True):
                    mean_skip_var = gp_model(test_x).mean
                mean = gp_model(test_x).mean
                likelihood_mean = likelihood(gp_model(test_x)).mean
            self.assertTrue(torch.allclose(mean_skip_var, mean))
            self.assertTrue(torch.allclose(mean_skip_var, likelihood_mean))
github cornellius-gp / gpytorch / test / lazy / test_cached_cg_lazy_tensor.py View on Github external
def create_lazy_tensor(self, with_solves=False, with_logdet=False):
        mat = torch.randn(5, 6)
        mat = mat.matmul(mat.transpose(-1, -2))
        mat.requires_grad_(True)

        lazy_tensor = NonLazyTensor(mat)
        eager_rhs = torch.randn(5, 10).detach()
        if with_solves:
            with gpytorch.settings.num_trace_samples(1000 if with_logdet else 1):  # For inv_quad_logdet tests
                solve, probe_vecs, probe_vec_norms, probe_vec_solves, tmats = CachedCGLazyTensor.precompute_terms(
                    lazy_tensor, eager_rhs.detach(), logdet_terms=with_logdet
                )
                eager_rhss = [eager_rhs.detach(), eager_rhs[..., -2:-1].detach()]
                solves = [solve.detach(), solve[..., -2:-1].detach()]
        else:
            eager_rhss = [eager_rhs]
            solves = []
            probe_vecs = torch.tensor([], dtype=mat.dtype, device=mat.device)
            probe_vec_norms = torch.tensor([], dtype=mat.dtype, device=mat.device)
            probe_vec_solves = torch.tensor([], dtype=mat.dtype, device=mat.device)
            tmats = torch.tensor([], dtype=mat.dtype, device=mat.device)

        return CachedCGLazyTensor(lazy_tensor, eager_rhss, solves, probe_vecs, probe_vec_norms, probe_vec_solves, tmats)
github cornellius-gp / gpytorch / gpytorch / kernels / kernel.py View on Github external
x2_ = x2_.index_select(-1, self.active_dims)

        # Give x1_ and x2_ a last dimension, if necessary
        if x1_.ndimension() == 1:
            x1_ = x1_.unsqueeze(1)
        if x2_ is not None:
            if x2_.ndimension() == 1:
                x2_ = x2_.unsqueeze(1)
            if not x1_.size(-1) == x2_.size(-1):
                raise RuntimeError("x1_ and x2_ must have the same number of dimensions!")

        if x2_ is None:
            x2_ = x1_

        # Check that ard_num_dims matches the supplied number of dimensions
        if settings.debug.on():
            if self.ard_num_dims is not None and self.ard_num_dims != x1_.size(-1):
                raise RuntimeError(
                    "Expected the input to have {} dimensionality "
                    "(based on the ard_num_dims argument). Got {}.".format(self.ard_num_dims, x1_.size(-1))
                )

        if diag:
            res = super(Kernel, self).__call__(x1_, x2_, diag=True, last_dim_is_batch=last_dim_is_batch, **params)
            # Did this Kernel eat the diag option?
            # If it does not return a LazyEvaluatedKernelTensor, we can call diag on the output
            if not isinstance(res, LazyEvaluatedKernelTensor):
                if res.dim() == x1_.dim() and res.shape[-2:] == torch.Size((x1_.size(-2), x2_.size(-2))):
                    res = res.diag()
            return res

        else:
github cornellius-gp / gpytorch / gpytorch / variational / unwhitened_variational_strategy.py View on Github external
# Expand everything to the right size
        shapes = [mean_diff.shape[:-1], induc_data_covar.shape[:-1], induc_induc_covar.shape[:-1]]
        if variational_inducing_covar is not None:
            root_variational_covar = variational_inducing_covar.root_decomposition().root.evaluate()
            shapes.append(root_variational_covar.shape[:-1])
        shape = _mul_broadcast_shape(*shapes)
        mean_diff = mean_diff.expand(*shape, mean_diff.size(-1))
        induc_data_covar = induc_data_covar.expand(*shape, induc_data_covar.size(-1))
        induc_induc_covar = induc_induc_covar.expand(*shape, induc_induc_covar.size(-1))
        if variational_inducing_covar is not None:
            root_variational_covar = root_variational_covar.expand(*shape, root_variational_covar.size(-1))

        # Cache the CG results
        # For now: run variational inference without a preconditioner
        # The preconditioner screws things up for some reason
        with settings.max_preconditioner_size(0):
            # Cache the CG results
            if variational_inducing_covar is None:
                left_tensors = mean_diff
            else:
                left_tensors = torch.cat([mean_diff, root_variational_covar], -1)

            with torch.no_grad():
                eager_rhs = torch.cat([left_tensors, induc_data_covar], -1)
                solve, probe_vecs, probe_vec_norms, probe_vec_solves, tmats = CachedCGLazyTensor.precompute_terms(
                    induc_induc_covar,
                    eager_rhs.detach(),
                    logdet_terms=(not cholesky),
                    include_tmats=(not settings.skip_logdet_forward.on() and not cholesky),
                )
                eager_rhss = [
                    eager_rhs.detach(),
github cornellius-gp / gpytorch / gpytorch / utils / trace.py View on Github external
tensor_cls = type(right_matrix)
        right_matmul_closure = right_matrix.matmul

    if left_matmul_closure is None:
        left_matmul_closure = _identity

    if right_matmul_closure is None:
        right_matmul_closure = _identity

    if size is None:
        raise RuntimeError('Size must be specified, since neither left_matmul_closure nor'
                           ' right_matmul_closure are Tensors/Variables')

    # Default num_samples, tensor_cls
    if num_samples is None:
        num_samples = settings.num_trace_samples.value()

    if tensor_cls is None:
        tensor_cls = torch.Tensor

    # Return A and B if we're using deterministic mode
    if not settings.num_trace_samples.value() or size < num_samples:
        eye = tensor_cls(size).fill_(1).diag()
        if use_vars:
            eye = Variable(eye)
        if dim_num is not None:
            eye = eye.expand(dim_num, size, size)
        return left_matmul_closure(eye), right_matmul_closure(eye)

    # Call appropriate estimator
    if estimator_type == 'mub':
        return mubs_trace_components(left_matmul_closure, right_matmul_closure, size, num_samples,
github cornellius-gp / gpytorch / gpytorch / lazy / lazy_tensor.py View on Github external
def _root_decomposition_size(self):
        """
        This is the inner size of the root decomposition.
        This is primarily used to determine if it will be cheaper to compute a
        different root or not
        """
        return settings.max_root_decomposition_size.value()
github cornellius-gp / gpytorch / gpytorch / functions / _inv_quad_log_det.py View on Github external
inv_quad_rhs = None
        if ctx.inv_quad:
            matrix_args = args[1:]
            inv_quad_rhs = args[0]
        else:
            matrix_args = args

        # Get closure for matmul
        lazy_tsr = ctx.representation_tree(*matrix_args)
        with torch.no_grad():
            preconditioner, precond_lt, logdet_correction = lazy_tsr._preconditioner()

        ctx.preconditioner = preconditioner

        if (probe_vectors is None or probe_vector_norms is None) and logdet:
            num_random_probes = settings.num_trace_samples.value()
            if preconditioner is None:
                if settings.deterministic_probes.on():
                    if settings.deterministic_probes.probe_vectors is None:
                        probe_vectors = torch.empty(matrix_shape[-1], num_random_probes, dtype=dtype, device=device)
                        probe_vectors.bernoulli_().mul_(2).add_(-1)
                        settings.deterministic_probes.probe_vectors = probe_vectors
                    else:
                        probe_vectors = settings.deterministic_probes.probe_vectors
                else:
                    probe_vectors = torch.empty(matrix_shape[-1], num_random_probes, dtype=dtype, device=device)
                    probe_vectors.bernoulli_().mul_(2).add_(-1)

                probe_vector_norms = torch.norm(probe_vectors, 2, dim=-2, keepdim=True)
                if batch_shape is not None:
                    probe_vectors = probe_vectors.expand(*batch_shape, matrix_shape[-1], num_random_probes)
                    probe_vector_norms = probe_vector_norms.expand(*batch_shape, 1, num_random_probes)