Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def _init_kernel(self, D, lengthscale, variance, period):
base = gpflow.kernels.Matern32(D, variance=variance, lengthscales=lengthscale)
return gpflow.kernels.Periodic(base=base, period=period)
def __init__(self, X, Y, ms, a, b, kern, likelihood,
mean_function=gpflow.mean_functions.Zero()):
"""
Here we assume the interval is [a,b]
"""
assert X.shape[1] == 1
assert isinstance(kern, (gpflow.kernels.Matern12,
gpflow.kernels.Matern32,
gpflow.kernels.Matern52))
kern = kern
gpflow.models.GPModel.__init__(self, X, Y, kern,
likelihood, mean_function)
self.num_data = X.shape[0]
self.num_latent = Y.shape[1]
self.a = a
self.b = b
self.ms = ms
# initialize variational parameters
Ncos = self.ms.size
Nsin = self.ms.size - 1
if isinstance(self.kern, gpflow.kernels.Matern12):
Ncos += 1
def __init__(self, Y, latent_dim, X_mean=None, kernel=None, mean_function=None, **kwargs):
"""
Initialise GPLVM object. This method only works with a Gaussian likelihood.
:param Y: data matrix, size N (number of points) x D (dimensions)
:param Z: matrix of inducing points, size M (inducing points) x Q (latent dimensions)
:param X_mean: latent positions ([N, Q]), for the initialisation of the latent space.
:param kernel: kernel specification, by default RBF
:param mean_function: mean function, by default None.
"""
if mean_function is None:
mean_function = Zero()
if kernel is None:
kernel = kernels.RBF(latent_dim, ARD=True)
if X_mean is None:
X_mean = PCA_reduce(Y, latent_dim)
num_latent = X_mean.shape[1]
if num_latent != latent_dim:
msg = 'Passed in number of latent {0} does not match initial X {1}.'
raise ValueError(msg.format(latent_dim, num_latent))
if Y.shape[1] < num_latent:
raise ValueError('More latent dimensions than observed.')
GPR.__init__(self, X_mean, Y, kernel, mean_function=mean_function, **kwargs)
del self.X # in GPLVM this is a Param
self.X = Parameter(X_mean)
def convgp_setup_model(train_data, batch_size,
patch_shape, num_inducing_points):
X, Y = train_data
H = int(X.shape[1] ** .5)
likelihood = get_likelihood()
num_latent = likelihood.num_classes if hasattr(likelihood, 'num_classes') else 1
patches = patch_initializer(X[:400], num_inducing_points, patch_shape)
kern = gpflow.kernels.WeightedConvolutional(gpflow.kernels.SquaredExponential(np.prod(patch_shape)),
[H, H], patch_size=patch_shape)
feat = gpflow.features.InducingPatch(patches)
kern.basekern.variance = 25.0
kern.basekern.lengthscales = 1.2
model = gpflow.models.SVGP(X, Y, kern, likelihood, num_latent=num_latent, feat=feat, minibatch_size=batch_size,
name="gp_model")
model.q_mu = np.random.randn(*(model.q_mu.read_value().shape)).astype(gpflow.settings.float_type)
return model
import sys
import csv
import numpy as np
import gpflow
import tensorflow as tf
Xtrain = np.loadtxt('notebooks/data/banana_X_train', delimiter=',')
Ytrain = np.loadtxt('notebooks/data/banana_Y_train', delimiter=',').reshape(-1, 1)
idx = np.random.choice(range(Xtrain.shape[0]), size=3, replace=False)
feature = Xtrain[idx, ...]
# 1. `input_dim` is not required anymore.
kernel = gpflow.kernels.RBF()
# 2. Assigned value (10.0) here is constrained.
kernel.lengthscale <<= 10.0
kernel.variance.trainable = False
likelihood = gpflow.likelihoods.Bernoulli()
# 3. Constrained vs unconstrained values.
print(f"Unconstrained parameter value of `kernel.lengthscale` = {kernel.lengthscale}")
print(f"Constrained parameter value of `kernel.lengthscale` = {kernel.lengthscale}")
# 4. X's and Y's are no longer part of the model.
m = gpflow.models.SVGP(kernel=kernel, feature=feature, likelihood=likelihood)
X, Y = tf.convert_to_tensor(Xtrain), tf.convert_to_tensor(Ytrain)
def loss_cb():
return m.neg_log_marginal_likelihood(X, Y)
gpflow.kernels.Matern52))
mf = gpflow.mean_functions.Zero()
gpflow.model.GPModel.__init__(self, X, Y, kern=None,
likelihood=likelihood, mean_function=mf)
self.num_data = X.shape[0]
self.num_latent = 1 # multiple columns not supported in this version
self.a = a
self.b = b
self.ms = ms
# initialize variational parameters
self.Ms = []
for kern in kerns:
Ncos_d = self.ms.size
Nsin_d = self.ms.size - 1
if isinstance(kern, gpflow.kernels.Matern12):
Ncos_d += 1
elif isinstance(kern, gpflow.kernels.Matern32):
Ncos_d += 1
Nsin_d += 1
elif isinstance(kern, gpflow.kernels.Matern32):
Ncos_d += 2
Nsin_d += 1
else:
raise NotImplementedError
self.Ms.append(Ncos_d + Nsin_d)
self.kerns = gpflow.param.ParamList(kerns)
self.V = gpflow.param.Param(np.zeros((np.prod(self.Ms), 1)))
self.V.prior = gpflow.priors.Gaussian(0., 1.)
@dispatch.expectation.register((Gaussian, MarkovGaussian), mfn.Identity, NoneType, kernels.Linear, InducingPoints)
def _E(p, mean, _, kernel, inducing_variable, nghp=None):
"""
Compute the expectation:
expectation[n] = _p(x_n)
- K_{.,} :: Linear kernel
or the equivalent for MarkovGaussian
:return: NxDxM
"""
return tf.linalg.adjoint(expectation(p, (kernel, inducing_variable), mean))