Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
for dtype in (torch.float, torch.double):
mean = torch.randn(4, 3, device=device, dtype=dtype)
var = torch.randn(12, device=device, dtype=dtype).abs_()
values = mean + 0.5
diffs = (values - mean).view(-1)
res = MultitaskMultivariateNormal(mean, DiagLazyTensor(var)).log_prob(values)
actual = -0.5 * (math.log(math.pi * 2) * 12 + var.log().sum() + (diffs / var * diffs).sum())
self.assertLess((res - actual).div(res).abs().item(), 1e-2)
mean = torch.randn(3, 4, 3, device=device, dtype=dtype)
var = torch.randn(3, 12, device=device, dtype=dtype).abs_()
values = mean + 0.5
diffs = (values - mean).view(3, -1)
res = MultitaskMultivariateNormal(mean, DiagLazyTensor(var)).log_prob(values)
actual = -0.5 * (math.log(math.pi * 2) * 12 + var.log().sum(-1) + (diffs / var * diffs).sum(-1))
self.assertLess((res - actual).div(res).abs().norm(), 1e-2)
def create_lazy_tensor(self):
diag = torch.randn(6, 3, 5).pow_(2)
diag.requires_grad_(True)
return DiagLazyTensor(diag)
lkhd = FixedNoiseGaussianLikelihood(noise=noise)
# test basics
self.assertIsInstance(lkhd.noise_covar, FixedGaussianNoise)
self.assertTrue(torch.equal(noise, lkhd.noise))
new_noise = 0.1 + torch.rand(4, device=device, dtype=dtype)
lkhd.noise = new_noise
self.assertTrue(torch.equal(lkhd.noise, new_noise))
# test __call__
mean = torch.zeros(4, device=device, dtype=dtype)
covar = DiagLazyTensor(torch.ones(4, device=device, dtype=dtype))
mvn = MultivariateNormal(mean, covar)
out = lkhd(mvn)
self.assertTrue(torch.allclose(out.variance, 1 + new_noise))
# things should break if dimensions mismatch
mean = torch.zeros(5, device=device, dtype=dtype)
covar = DiagLazyTensor(torch.ones(5, device=device, dtype=dtype))
mvn = MultivariateNormal(mean, covar)
with self.assertWarns(UserWarning):
lkhd(mvn)
# test __call__ w/ observation noise
obs_noise = 0.1 + torch.rand(5, device=device, dtype=dtype)
out = lkhd(mvn, noise=obs_noise)
self.assertTrue(torch.allclose(out.variance, 1 + obs_noise))
def test_log_prob(self, cuda=False):
device = torch.device("cuda") if cuda else torch.device("cpu")
for dtype in (torch.float, torch.double):
mean = torch.randn(4, device=device, dtype=dtype)
var = torch.randn(4, device=device, dtype=dtype).abs_()
values = torch.randn(4, device=device, dtype=dtype)
res = MultivariateNormal(mean, DiagLazyTensor(var)).log_prob(values)
actual = TMultivariateNormal(mean, torch.eye(4, device=device, dtype=dtype) * var).log_prob(values)
self.assertLess((res - actual).div(res).abs().item(), 1e-2)
mean = torch.randn(3, 4, device=device, dtype=dtype)
var = torch.randn(3, 4, device=device, dtype=dtype).abs_()
values = torch.randn(3, 4, device=device, dtype=dtype)
res = MultivariateNormal(mean, DiagLazyTensor(var)).log_prob(values)
actual = TMultivariateNormal(
mean, var.unsqueeze(-1) * torch.eye(4, device=device, dtype=dtype).repeat(3, 1, 1)
).log_prob(values)
self.assertLess((res - actual).div(res).abs().norm(), 1e-2)
def create_lazy_tensor(self):
diag = torch.tensor([1.0, 2.0, 4.0, 2.0, 3.0], requires_grad=True)
return DiagLazyTensor(diag)
def sample_inducing_values(self):
"""
Sample values from the inducing point distribution `p(u)` or `q(u)`.
This should only be re-defined to note any conditional independences in
the `inducing_values_dist` distribution. (By default, all batch dimensions
are not marked as conditionally indendent.)
"""
beta = self.beta if self.beta > 0.0 else 1.0e-20
prior_dist = MultivariateNormal(self.prior_mean, DiagLazyTensor(self.prior_var))
with pyro.poutine.scale(scale=beta / self.num_data):
return pyro.sample(self.name_prefix + ".inducing_values", prior_dist)
if noise is not None:
return DiagLazyTensor(noise)
training = self.noise_model.training # keep track of mode
self.noise_model.eval() # we want the posterior prediction of the noise model
with settings.detach_test_caches(False), settings.debug(False):
if len(params) == 1 and not torch.is_tensor(params[0]):
output = self.noise_model(*params[0])
else:
output = self.noise_model(*params)
self.noise_model.train(training)
if not isinstance(output, MultivariateNormal):
raise NotImplementedError("Currently only noise models that return a MultivariateNormal are supported")
# note: this also works with MultitaskMultivariateNormal, where this
# will return a batched DiagLazyTensors of size n x num_tasks x num_tasks
noise_diag = output.mean if self._noise_indices is None else output.mean[..., self._noise_indices]
return DiagLazyTensor(self._noise_constraint.transform(noise_diag))
def _covar_diag(self, inputs):
if inputs.ndimension() == 1:
inputs = inputs.unsqueeze(1)
# Get diagonal of covar
covar_diag = delazify(self.base_kernel(inputs, diag=True))
return DiagLazyTensor(covar_diag)