Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
l3 = m2.compute_log_likelihood()
l4 = m3.compute_log_likelihood()
self.assertTrue(l1 == l2 == l3 == l4)
# make sure predictions still match (this tests AutoFlow)
pX = np.linspace(-3, 3, 10)[:, None]
p1, _ = self.m.predict_y(pX)
p2, _ = m1.predict_y(pX)
p3, _ = m2.predict_y(pX)
p4, _ = m3.predict_y(pX)
self.assertTrue(np.all(p1 == p2))
self.assertTrue(np.all(p1 == p3))
self.assertTrue(np.all(p1 == p4))
class TestPickleFix(GPflowTestCase):
"""
Make sure a kernel with a fixed parameter can be computed after pickling
"""
def test(self):
with self.test_context():
k = gpflow.kernels.PeriodicKernel(1)
k.period.fixed = True
k = pickle.loads(pickle.dumps(k))
x = np.linspace(0,1,100).reshape([-1,1])
k.compute_K(x, x)
class TestPickleSVGP(GPflowTestCase):
"""
Like the TestPickleGPR test, but with svgp (since it has extra tf variables
for minibatching)
"""
# X = np.random.randn(N, DX)
# Y = np.random.randn(N, DY)
# kern = gpflow.kernels.Matern52(DX)
# likelihood = gpflow.likelihoods.StudentT()
# m_vgp = gpflow.models.VGP(X, Y, kern, likelihood)
# m_vgp_oa = gpflow.models.VGP_opper_archambeau(X, Y, kern, likelihood)
# for m in [m_vgp, m_vgp_oa]:
# m.compile()
# opt = gpflow.train.ScipyOptimizer()
# opt.minimize(m, maxiter=1)
# m.X = X[:-1, :]
# m.Y = Y[:-1, :]
# opt.minimize(m, maxiter=1)
class TestUpperBound(GPflowTestCase):
"""
Test for upper bound for regression marginal likelihood
"""
def setUp(self):
self.X = np.random.rand(100, 1)
self.Y = np.sin(1.5 * 2 * np.pi * self.X) + np.random.randn(*self.X.shape) * 0.1
def test_few_inducing_points(self):
with self.test_context() as session:
vfe = gpflow.models.SGPR(self.X, self.Y, gpflow.kernels.RBF(1), self.X[:10, :].copy())
opt = gpflow.train.ScipyOptimizer()
opt.minimize(vfe)
full = gpflow.models.GPR(self.X, self.Y, gpflow.kernels.RBF(1))
full.kern.lengthscales = vfe.kern.lengthscales.read_value()
variances.append(m.kernel.rbf.variance.read_value())
lengthscale.append(m.kernel.rbf.lengthscale.read_value())
else:
variances.append(m.kernel.variance.read_value())
lengthscale.append(m.kernel.lengthscale.read_value())
variances, lengthscale = np.array(variances), np.array(lengthscale)
assert_allclose(variances, variances[0], 1e-5)
assert_allclose(lengthscale, lengthscale.mean(), 1e-4)
mu0, var0 = models[0].predict_y(self.Xtest)
for i, m in enumerate(models[1:]):
mu, var = m.predict_y(self.Xtest)
assert_allclose(mu, mu0, 1e-3)
assert_allclose(var, var0, 1e-4)
class VGPTest(GPflowTestCase):
def test_vgp_vs_svgp(self):
with self.test_context():
N, Ns, DX, DY = 100, 10, 2, 2
np.random.seed(1)
X = np.random.randn(N, DX)
Xs = np.random.randn(Ns, DX)
Y = np.random.randn(N, DY)
kernel = gpflow.kernels.Matern52(DX)
likelihood = gpflow.likelihoods.StudentT()
m_svgp = gpflow.models.SVGP(
X, Y, kernel, likelihood, X.copy(), whiten=True, q_diag=False)
m_vgp = gpflow.models.VGP(X, Y, kernel, likelihood)
m_svgp.compile()
p.set_trainable(False)
self.assertFalse(all([c.trainable for c in p.parameters]))
self.assertFalse(p.trainable)
p.set_trainable(True)
self.assertTrue(all([c.trainable for c in p.parameters]))
self.assertTrue(p.trainable)
values = [None, "test", "", 1]
for v in values:
with self.assertRaises(ValueError, msg='Caught exception for "{}"'.format(v)):
p.set_trainable(v)
class TestParameterizedNoParameters(GPflowTestCase):
def setUp(self):
with self.test_context(), gpflow.defer_build():
self.m = gpflow.params.Parameterized(name='m')
self.m.p = gpflow.params.Parameterized()
self.m.b = gpflow.params.Parameterized()
def test_feeds_empty(self):
with self.test_context():
p = gpflow.Parameterized()
self.assertEqual(p.initializables, [])
self.assertEqual(p.initializable_feeds, {})
self.assertEqual(p.feeds, {})
def test_is_built(self):
with self.test_context():
self.assertEqual(self.m.is_built_coherence(), gpflow.Build.YES)
self.assertEqual(list(self.m.parameters), [])
self.assertEqual(list(self.m.data_holders), [])
self.assertEqual(len(list(self.m.params)), 2)
def test_add_parameter_to_empty_parameterized(self):
with self.test_context():
self.m.compile()
self.m.a = gpflow.Param(10)
self.assertEqual(self.m.is_built_coherence(), gpflow.Build.NO)
self.m.compile()
self.assertEqual(self.m.is_built_coherence(), gpflow.Build.YES)
with self.assertRaises(GPflowError):
self.m.b = gpflow.Param(20)
class TestParameterizedCompile(GPflowTestCase):
def setUp(self):
self.test_graph = tf.Graph()
with self.test_context() as session:
self.graph = session.graph
tensor = tf.get_variable('a', shape=())
self.m = gpflow.params.Parameterized(name='m')
self.m.p = gpflow.params.Parameterized()
self.m.a = gpflow.Param(tensor)
self.m.b = gpflow.Param(1.0, trainable=False)
self.m.c = gpflow.Param(np.array([1.0, 2.0]))
self.m.p.d = gpflow.Param(1.0)
def test_compile(self):
with self.test_context():
tensor = self.m.a.parameter_tensor
self.m.compile()
def testTwo(self):
with self.test_context():
self.compare_models(
[self.indexA, self.indexB],
[self.indexA, self.indexA],
batchOne=1, batchTwo=2, maxiter=1)
def testThree(self):
with self.test_context():
self.compare_models(
[self.indexA, self.indexA],
[self.indexA, self.indexB],
batchOne=1, batchTwo=1, maxiter=2)
class TestSparseMCMC(GPflowTestCase):
"""
This test makes sure that when the inducing points are the same as the data
points, the sparse mcmc is the same as full mcmc
"""
def test_likelihoods_and_gradients(self):
with self.test_context() as session:
rng = np.random.RandomState(0)
X = rng.randn(10, 1)
Y = rng.randn(10, 1)
v_vals = rng.randn(10, 1)
lik = gpflow.likelihoods.StudentT
m1 = gpflow.models.GPMC(
X=X, Y=Y,
kern=gpflow.kernels.Exponential(1),
def test_asymm(self):
for k1, k2, k3 in self.kernels():
with self.test_context(graph=tf.Graph()):
rng = np.random.RandomState(0)
X = rng.randn(20, 2)
Z = rng.randn(10, 2)
k1i, k2i, k3i = k1(), k2(), k3()
K1 = k1i.compute_K(X, Z)
K2 = k2i.compute_K(X, Z)
K3 = k3i.compute_K(X[:, :1], Z[:, :1])
K4 = k3i.compute_K(X[:, 1:], Z[:, 1:])
self.assertTrue(np.allclose(K1, K3))
self.assertTrue(np.allclose(K2, K4))
class TestProd(GPflowTestCase):
def setUp(self):
self.test_graph = tf.Graph()
with self.test_context():
k1 = gpflow.kernels.Matern32(2)
k2 = gpflow.kernels.Matern52(2, lengthscales=0.3)
k3 = k1 * k2
self.kernels = [k1, k2, k3]
def tearDown(self):
GPflowTestCase.tearDown(self)
self.kernels[2].clear()
def test_prod(self):
with self.test_context() as session:
self.kernels[2].compile()
import gpflow
from gpflow.test_util import GPflowTestCase
class Quadratic(gpflow.models.Model):
def __init__(self):
rng = np.random.RandomState(0)
gpflow.models.Model.__init__(self)
self.x = gpflow.Param(rng.randn(10))
@gpflow.params_as_tensors
def _build_likelihood(self):
return tf.negative(tf.reduce_sum(tf.square(self.x)))
class TestOptimize(GPflowTestCase):
def test_adam(self):
with self.test_context():
m = Quadratic()
opt = gpflow.train.AdamOptimizer(0.01)
opt.minimize(m, maxiter=5000)
self.assertTrue(m.x.read_value().max() < 1e-2)
def test_lbfgsb(self):
with self.test_context():
m = Quadratic()
opt = gpflow.train.ScipyOptimizer()
opt.minimize(m, maxiter=1000)
self.assertTrue(m.x.read_value().max() < 1e-6)
class Empty(gpflow.models.Model):
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import numpy as np
from numpy.testing import assert_allclose
from gpflow.config import default_jitter
import gpflow
from gpflow.test_util import GPflowTestCase
class TestEquivalence(GPflowTestCase):
"""
With a Gaussian likelihood, and inducing points (where appropriate)
positioned at the data, many of the gpflow methods are equivalent (perhaps
subject to some optimization).
Here, we make 5 models that should be the same, and make sure some
similarites hold. The models are:
1) GP Regression
2) Variational GP (with the likelihood set to Gaussian)
3) Sparse variational GP (likelihood is Gaussian, inducing points
at the data)
4) Sparse variational GP (as above, but with the whitening rotation
of the inducing variables)
5) Sparse variational GP Regression (as above, but there the inducing
variables are 'collapsed' out, as in Titsias 2009)
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import numpy as np
import gpflow
from gpflow.test_util import GPflowTestCase
class TestGaussian(GPflowTestCase):
def prepare(self):
self.rng = np.random.RandomState(0)
self.X = self.rng.randn(100, 2)
self.Y = self.rng.randn(100, 1)
self.kern = gpflow.kernels.Matern32(2) + gpflow.kernels.White(1)
self.Xtest = self.rng.randn(10, 2)
self.Ytest = self.rng.randn(10, 1)
# make a Gaussian model
return gpflow.models.GPR(self.X, self.Y, kern=self.kern)
def test_all(self):
with self.test_context():
m = self.prepare()
mu_f, var_f = m.predict_f(self.Xtest)
mu_y, var_y = m.predict_y(self.Xtest)