Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def get_model(num_data):
return gpflow.models.SVGP(kernel=gpflow.kernels.SquaredExponential(),
num_data=num_data,
likelihood=gpflow.likelihoods.Gaussian(),
inducing_variable=Z)
def getLikelihoodSetups(includeMultiClass=True, addNonStandardLinks=False):
test_setups = []
rng = np.random.RandomState(1)
for likelihoodClass in gpflow.likelihoods.Likelihood.__subclasses__():
if likelihoodClass == gpflow.likelihoods.MonteCarloLikelihood:
continue # abstract base class
if likelihoodClass == gpflow.likelihoods.Ordinal:
test_setups.append(
LikelihoodSetup(likelihoodClass(np.array([-1, 1])),
rng.randint(0, 3, (10, 2)), 1e-6))
elif likelihoodClass == gpflow.likelihoods.SwitchedLikelihood:
continue # switched likelihood tested separately
elif likelihoodClass == gpflow.likelihoods.MultiClass:
if includeMultiClass:
sample = rng.randn(10, 2)
# Multiclass needs a less tight tolerance due to presence of clipping.
tolerance = 1e-3
test_setups.append(
LikelihoodSetup(likelihoodClass(2),
np.argmax(sample, 1).reshape(-1, 1), tolerance))
else:
# most likelihoods follow this standard:
test_setups.append(
LikelihoodSetup(likelihoodClass(),
rng.rand(10, 2).astype(default_float()), 1e-6))
if addNonStandardLinks:
test_setups.append(LikelihoodSetup(gpflow.likelihoods.Poisson(invlink=tf.square),
rng.rand(10, 2).astype(default_float()), 1e-6))
def getLikelihoodSetups(includeMultiClass=True, addNonStandardLinks=False):
test_setups = []
rng = np.random.RandomState(1)
for likelihoodClass in gpflow.likelihoods.Likelihood.__subclasses__():
if likelihoodClass == gpflow.likelihoods.MonteCarloLikelihood:
continue # abstract base class
if likelihoodClass == gpflow.likelihoods.Ordinal:
test_setups.append(
LikelihoodSetup(likelihoodClass(np.array([-1, 1])),
rng.randint(0, 3, (10, 2)), 1e-6))
elif likelihoodClass == gpflow.likelihoods.SwitchedLikelihood:
continue # switched likelihood tested separately
elif likelihoodClass == gpflow.likelihoods.MultiClass:
if includeMultiClass:
sample = rng.randn(10, 2)
# Multiclass needs a less tight tolerance due to presence of clipping.
tolerance = 1e-3
test_setups.append(
LikelihoodSetup(likelihoodClass(2),
np.argmax(sample, 1).reshape(-1, 1), tolerance))
else:
Y_augumented = np.hstack([np.concatenate(Y), np.concatenate(label)])
# 1. Two independent VGPs for two sets of data
k0 = gpflow.kernels.RBF(2)
k0.lengthscales.trainable = False
vgp0 = gpflow.models.VGP(
X[0], Y[0], kern=k0,
mean_function=gpflow.mean_functions.Constant(),
likelihood=gpflow.likelihoods.Gaussian())
k1 = gpflow.kernels.RBF(2)
k1.lengthscales.trainable = False
vgp1 = gpflow.models.VGP(
X[1], Y[1], kern=k1,
mean_function=gpflow.mean_functions.Constant(),
likelihood=gpflow.likelihoods.Gaussian())
# 2. Coregionalized GPR
lik = gpflow.likelihoods.SwitchedLikelihood(
[gpflow.likelihoods.Gaussian(), gpflow.likelihoods.Gaussian()])
kc = gpflow.kernels.RBF(2)
kc.trainable = False # lengthscale and variance is fixed.
coreg = gpflow.kernels.Coregion(1, output_dim=2, rank=1, active_dims=[2])
coreg.W.trainable = False
mean_c = gpflow.mean_functions.SwitchedMeanFunction(
[gpflow.mean_functions.Constant(), gpflow.mean_functions.Constant()])
cvgp = gpflow.models.VGP(
X_augumented, Y_augumented,
kern=kc * coreg,
def test_svgp(self):
with self.test_context():
X, Y, kern, rng = self.prepare()
m = gpflow.models.SVGP(X, Y, kern, likelihood=gpflow.likelihoods.StudentT(), Z=X[::2])
m.compile()
m.X = rng.randn(*X.shape)
m.X = rng.randn(30, 1)
def setUp(self):
with self.test_context():
rng = np.random.RandomState(0)
X = rng.randn(10, 1)
Y = rng.randn(10, 1)
Z = rng.randn(5, 1)
self.m = gpflow.models.SVGP(
X, Y, Z=Z,
likelihood=gpflow.likelihoods.Gaussian(),
kern=gpflow.kernels.RBF(1))
def get_gpmc_model_params():
kernel = gpflow.kernels.Matern32()
likelihood = gpflow.likelihoods.Gaussian()
data = [np.arange(5), np.arange(5)]
return data, kernel, likelihood
def likelihood():
return gpflow.likelihoods.Gaussian(variance=Setup.likelihood_variance)
def run_keras_fit():
model_gp = SVGP(gpflow.kernels.RBF(), gpflow.likelihoods.Gaussian(),
feature=np.linspace(0, 10, 10).reshape(10, 1))
class Metrics(tf.keras.callbacks.Callback):
def __init__(self, validation_data):
super().__init__()
self.validation_data = validation_data
def on_train_begin(self, logs=None):
self._data = []
def on_epoch_end(self, epoch, logs=None):
X_val, y_val = self.validation_data
if epoch % 100 == 0:
Y_predict_gp = self.model.bayesian_model.predict_y(X_val)[0]
plt.plot(Xtest, Ytest, 'b.')
plt.plot(Xtest, Y_predict_gp, 'r.')
def main():
f, axarr = plt.subplots(1, 2, figsize=(15, 7.5))
TRAIN_KERNEL = True
USE_ADAM = False
# VGP Model:
print("Running VGP model.")
m = gpflow.models.VGP(Xtrain, Ytrain,
kern=gpflow.kernels.RBF(2),
likelihood=gpflow.likelihoods.Bernoulli())
if not TRAIN_KERNEL:
m.kern.lengthscales.set_trainable(False)
m.kern.variance.set_trainable(False)
m.compile()
print("VGP model's initial model log likelihood: {}".format(m.compute_log_likelihood()))
if USE_ADAM:
gpflow.train.AdamOptimizer().minimize(m, maxiter=500)
else:
gpflow.train.ScipyOptimizer(options=dict(maxiter=100)).minimize(m)
plot(m, axarr[0])
print("VGP model's final model log likelihood: {}".format(m.compute_log_likelihood()))
print("VGP model's final kernel variance: {}".format(m.kern.variance.read_value()))
print("VGP model's final kernel lengthscale: {}".format(m.kern.lengthscales.read_value()))
print("=================================\n\n")
# EP Binary Classification Model: