Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_kissgp_gp_mean_abs_error(self):
train_x, train_y, test_x, test_y = make_data()
likelihood = FixedNoiseGaussianLikelihood(torch.ones(100) * 0.001)
gp_model = GPRegressionModel(train_x, train_y, likelihood)
mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model)
# Optimize the model
gp_model.train()
likelihood.train()
optimizer = optim.Adam(list(gp_model.parameters()) + list(likelihood.parameters()), lr=0.1)
optimizer.n_iter = 0
with gpytorch.settings.debug(False):
for _ in range(25):
optimizer.zero_grad()
output = gp_model(train_x)
loss = -mll(output, train_y)
loss.backward()
optimizer.n_iter += 1
optimizer.step()
for param in gp_model.parameters():
self.assertTrue(param.grad is not None)
self.assertGreater(param.grad.norm().item(), 0)
for param in likelihood.parameters():
self.assertTrue(param.grad is not None)
self.assertGreater(param.grad.norm().item(), 0)
# Test the model
gp_model = ExactGPModel(train_x, train_y, likelihood)
mll = gpytorch.ExactMarginalLogLikelihood(likelihood, gp_model)
gp_model.covar_module.base_kernel.initialize(lengthscale=exp(1))
gp_model.mean_module.initialize(constant=0)
if cuda:
gp_model.cuda()
likelihood.cuda()
# Find optimal model hyperparameters
gp_model.train()
likelihood.train()
optimizer = optim.Adam(list(gp_model.parameters()) + list(likelihood.parameters()), lr=0.15)
for _ in range(50):
optimizer.zero_grad()
with gpytorch.settings.debug(False):
output = gp_model(train_x)
loss = -mll(output, train_y)
loss.backward()
optimizer.step()
for param in gp_model.parameters():
self.assertTrue(param.grad is not None)
self.assertGreater(param.grad.norm().item(), 0)
optimizer.step()
with gpytorch.settings.fast_pred_var():
# Test the model
gp_model.eval()
likelihood.eval()
test_function_predictions = likelihood(gp_model(test_x), noise=test_noise)
def test_inv_matmul_multiple_vecs(self):
mat = self._create_mat().detach().requires_grad_(True)
mat_copy = mat.detach().clone().requires_grad_(True)
mat_copy.register_hook(_ensure_symmetric_grad)
vecs = torch.randn(*mat.shape[:-2], mat.size(-1), 4).detach().requires_grad_(True)
vecs_copy = vecs.detach().clone().requires_grad_(True)
# Forward
with settings.terminate_cg_by_size(False):
res = NonLazyTensor(mat).inv_matmul(vecs)
actual = mat_copy.inverse().matmul(vecs_copy)
self.assertAllClose(res, actual)
# Backward
grad_output = torch.randn_like(vecs)
res.backward(gradient=grad_output)
actual.backward(gradient=grad_output)
self.assertAllClose(mat.grad, mat_copy.grad)
self.assertAllClose(vecs.grad, vecs_copy.grad)
if not torch.cuda.is_available():
return
with least_used_cuda_device():
train_x, test_x, train_y, _ = self._get_data(cuda=True)
likelihood = GaussianLikelihood()
gp_model = ExactGPModel(train_x, train_y, likelihood)
gp_model.cuda()
likelihood.cuda()
# Compute posterior distribution
gp_model.eval()
likelihood.eval()
with gpytorch.settings.fast_pred_var(False):
with gpytorch.settings.skip_posterior_variances(True):
mean_skip_var = gp_model(test_x).mean
mean = gp_model(test_x).mean
likelihood_mean = likelihood(gp_model(test_x)).mean
self.assertTrue(torch.allclose(mean_skip_var, mean))
self.assertTrue(torch.allclose(mean_skip_var, likelihood_mean))
def create_lazy_tensor(self, with_solves=False, with_logdet=False):
mat = torch.randn(5, 6)
mat = mat.matmul(mat.transpose(-1, -2))
mat.requires_grad_(True)
lazy_tensor = NonLazyTensor(mat)
eager_rhs = torch.randn(5, 10).detach()
if with_solves:
with gpytorch.settings.num_trace_samples(1000 if with_logdet else 1): # For inv_quad_logdet tests
solve, probe_vecs, probe_vec_norms, probe_vec_solves, tmats = CachedCGLazyTensor.precompute_terms(
lazy_tensor, eager_rhs.detach(), logdet_terms=with_logdet
)
eager_rhss = [eager_rhs.detach(), eager_rhs[..., -2:-1].detach()]
solves = [solve.detach(), solve[..., -2:-1].detach()]
else:
eager_rhss = [eager_rhs]
solves = []
probe_vecs = torch.tensor([], dtype=mat.dtype, device=mat.device)
probe_vec_norms = torch.tensor([], dtype=mat.dtype, device=mat.device)
probe_vec_solves = torch.tensor([], dtype=mat.dtype, device=mat.device)
tmats = torch.tensor([], dtype=mat.dtype, device=mat.device)
return CachedCGLazyTensor(lazy_tensor, eager_rhss, solves, probe_vecs, probe_vec_norms, probe_vec_solves, tmats)
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
latent_pred = gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
return latent_pred
def _get_test_posterior_batched(device, dtype=torch.float):
mean = torch.zeros(3, 2, device=device, dtype=dtype)
cov = torch.eye(2, device=device, dtype=dtype).repeat(3, 1, 1)
mvn = MultivariateNormal(mean, cov)
return GPyTorchPosterior(mvn)
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
res = gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
return res
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
latent_pred = MultivariateNormal(mean_x, covar_x)
return latent_pred