How to use the celerite.terms.RealTerm function in celerite

To help you get started, we’ve selected a few celerite examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github dfm / celerite / examples / benchmark / run.py View on Github external
# Simulate a "dataset"
np.random.seed(42)
t = np.sort(np.random.rand(np.max(N)))
yerr = np.random.uniform(0.1, 0.2, len(t))
y = np.sin(t)

def numpy_compute(kernel, x, yerr):
    K = kernel.get_value(x[:, None] - x[None, :])
    K[np.diag_indices_from(K)] += yerr ** 2
    return cho_factor(K)

def numpy_log_like(factor, y):
    np.dot(y, cho_solve(factor, y)) + np.sum(np.log(np.diag(factor[0])))

for xi, j in enumerate(J):
    kernel = terms.RealTerm(1.0, 0.1)
    for k in range((2*j - 1) % 2):
        kernel += terms.RealTerm(1.0, 0.1)
    for k in range((2*j - 1) // 2):
        kernel += terms.ComplexTerm(0.1, 2.0, 1.6)
    coeffs = kernel.coefficients
    assert 2*j == len(coeffs[0]) + 2*len(coeffs[2]), "Wrong number of terms"

    if args.george:
        george_kernel = None
        for a, c in zip(*(coeffs[:2])):
            k = CeleriteKernel(a=a, b=0.0, c=c, d=0.0)
            george_kernel = k if george_kernel is None else george_kernel + k
        for a, b, c, d in zip(*(coeffs[2:])):
            k = CeleriteKernel(a=a, b=0.0, c=c, d=0.0)
            george_kernel = k if george_kernel is None else george_kernel + k
        solver = george.GP(george_kernel, solver=george.HODLRSolver)
github dfm / celerite / tests / test_celerite.py View on Github external
solver2.dot_solve(y))

    s = pickle.dumps(solver, -1)
    solver2 = pickle.loads(s)
    compare(solver, solver2)

    solver.compute(
        0.0, alpha_real, beta_real, alpha_complex_real, alpha_complex_imag,
        beta_complex_real, beta_complex_imag,
        A, U, V, t, diag
    )
    solver2 = pickle.loads(pickle.dumps(solver, -1))
    compare(solver, solver2)

    # Test that models can be pickled too.
    kernel = terms.RealTerm(0.5, 0.1)
    kernel += terms.ComplexTerm(0.6, 0.7, 1.0)
    gp1 = GP(kernel)
    gp1.compute(t, diag)
    s = pickle.dumps(gp1, -1)
    gp2 = pickle.loads(s)
    assert np.allclose(gp1.log_likelihood(y), gp2.log_likelihood(y))
github dfm / celerite / tests / test_celerite.py View on Github external
def test_predict(seed=42):
    np.random.seed(seed)
    x = np.linspace(1, 59, 300)
    t = np.sort(np.random.uniform(10, 50, 100))
    yerr = np.random.uniform(0.1, 0.5, len(t))
    y = np.sin(t)

    kernel = terms.RealTerm(0.1, 0.5)
    for term in [(0.6, 0.7, 1.0), (0.1, 0.05, 0.5, -0.1)]:
        kernel += terms.ComplexTerm(*term)
    gp = GP(kernel)

    gp.compute(t, yerr)
    K = gp.get_matrix(include_diagonal=True)
    Ks = gp.get_matrix(x, t)
    true_mu = np.dot(Ks, np.linalg.solve(K, y))
    true_cov = gp.get_matrix(x, x) - np.dot(Ks, np.linalg.solve(K, Ks.T))

    mu, cov = gp.predict(y, x)

    _, var = gp.predict(y, x, return_var=True)
    assert np.allclose(mu, true_mu)
    assert np.allclose(cov, true_cov)
    assert np.allclose(var, np.diag(true_cov))
github dfm / celerite / tests / test_terms.py View on Github external
        terms.RealTerm(log_a=0.1, log_c=0.5) +
        terms.RealTerm(log_a=-0.1, log_c=0.7),
        terms.ComplexTerm(log_a=0.1, log_c=0.5, log_d=0.1),
        terms.ComplexTerm(log_a=0.1, log_b=-0.2, log_c=0.5, log_d=0.1),
        terms.SHOTerm(log_S0=0.1, log_Q=-1, log_omega0=0.5),
        terms.SHOTerm(log_S0=0.1, log_Q=1.0, log_omega0=0.5),
        terms.SHOTerm(log_S0=0.1, log_Q=1.0, log_omega0=0.5) +
        terms.RealTerm(log_a=0.1, log_c=0.4),
        terms.SHOTerm(log_S0=0.1, log_Q=1.0, log_omega0=0.5) *
        terms.RealTerm(log_a=0.1, log_c=0.4),
    ]
)
def test_jacobian(k, eps=1.34e-7):
    if not terms.HAS_AUTOGRAD:
        with pytest.raises(ImportError):
            jac = k.get_coeffs_jacobian()
        return
github dfm / celerite / tests / test_terms.py View on Github external
        terms.RealTerm(log_a=0.5, log_c=0.1),
        terms.RealTerm(log_a=0.5, log_c=0.1) + terms.JitterTerm(log_sigma=0.3),
        terms.JitterTerm(log_sigma=0.5) + terms.JitterTerm(log_sigma=0.1),
    ]
)
def test_jitter_jacobian(k, eps=1.34e-7):
    if not terms.HAS_AUTOGRAD:
        with pytest.raises(ImportError):
            jac = k.get_jitter_jacobian()
        return

    v = k.get_parameter_vector()
    jac = k.get_jitter_jacobian()
    assert len(jac) == len(v)
    jac0 = np.empty_like(jac)
    for i, pval in enumerate(v):
        v[i] = pval + eps
github dfm / celerite / tests / test_terms.py View on Github external
def test_bounds(seed=42):
    bounds = [(-1.0, 0.3), (-2.0, 5.0)]
    kernel = terms.RealTerm(log_a=0.1, log_c=0.5, bounds=bounds)
    b0 = kernel.get_parameter_bounds()
    assert all(np.allclose(a, b) for a, b in zip(b0, bounds))

    kernel = terms.RealTerm(log_a=0.1, log_c=0.5,
                            bounds=dict(zip(["log_a", "log_c"], bounds)))
    assert all(np.allclose(a, b)
               for a, b in zip(b0, kernel.get_parameter_bounds()))
github California-Planet-Search / radvel / radvel / likelihood.py View on Github external
np.array: numpy array of predictive standard deviations
        """

        self.update_kernel_params()

        B = self.kernel.hparams['gp_B'].value
        C = self.kernel.hparams['gp_C'].value
        L = self.kernel.hparams['gp_L'].value
        Prot = self.kernel.hparams['gp_Prot'].value

        # build celerite kernel with current values of hparams
        kernel = celerite.terms.JitterTerm(
                log_sigma = np.log(self.params[self.jit_param].value)
                )

        kernel += celerite.terms.RealTerm(
            log_a=np.log(B*(1+C)/(2+C)),
            log_c=np.log(1/L)
        )

        kernel += celerite.terms.ComplexTerm(
            log_a=np.log(B/(2+C)),
            log_b=-np.inf,
            log_c=np.log(1/L),
            log_d=np.log(2*np.pi/Prot)
        )

        gp = celerite.GP(kernel)
        gp.compute(self.x, self.yerr)
        mu, var = gp.predict(self._resids(), xpred, return_var=True)

        stdev = np.sqrt(var)
github MNGuenther / allesfitter / allesfitter / computer.py View on Github external
def baseline_get_gp(params, inst, key):
    
    #::: kernel
    if config.BASEMENT.settings['baseline_'+key+'_'+inst] == 'sample_GP_real':
        kernel = terms.RealTerm(log_a=params['baseline_gp_real_lna_'+key+'_'+inst], 
                                log_c=params['baseline_gp_real_lnc_'+key+'_'+inst])
        
    elif config.BASEMENT.settings['baseline_'+key+'_'+inst] == 'sample_GP_complex':
        kernel = terms.ComplexTerm(log_a=params['baseline_gp_complex_lna_'+key+'_'+inst], 
                                log_b=params['baseline_gp_complex_lnb_'+key+'_'+inst], 
                                log_c=params['baseline_gp_complex_lnc_'+key+'_'+inst], 
                                log_d=params['baseline_gp_complex_lnd_'+key+'_'+inst])
                  
    elif config.BASEMENT.settings['baseline_'+key+'_'+inst] == 'sample_GP_Matern32':
        kernel = terms.Matern32Term(log_sigma=params['baseline_gp_matern32_lnsigma_'+key+'_'+inst], 
                                    log_rho=params['baseline_gp_matern32_lnrho_'+key+'_'+inst])
        
    elif config.BASEMENT.settings['baseline_'+key+'_'+inst] == 'sample_GP_SHO':
        kernel = terms.SHOTerm(log_S0=params['baseline_gp_sho_lnS0_'+key+'_'+inst], 
                               log_Q=params['baseline_gp_sho_lnQ_'+key+'_'+inst],
                               log_omega0=params['baseline_gp_sho_lnomega0_'+key+'_'+inst])
github dfm / celerite / paper / figures / error / error.py View on Github external
plot_setup.setup(auto=True)

K = 10
J = np.arange(2, 64, 8)
N = 2**np.arange(6, 16)

alpha_error = np.empty((K, len(J), len(N)))
logdet_error = np.empty((K, len(J), len(N)))
logdet_error[:, :, :] = np.nan

for k in range(K):
    t = np.sort(np.random.uniform(0, N.max() * 0.8, N.max()))
    yerr = np.random.uniform(1.0, 1.5, len(t))

    for ix, j in enumerate(J):
        kernel = terms.RealTerm(np.random.uniform(-1, 1),
                                np.random.uniform(-5, -1))
        kernel += terms.RealTerm(np.random.uniform(-1, 1),
                                 np.random.uniform(-5, -1))
        while (len(kernel.coefficients[0]) + 2*len(kernel.coefficients[2]) <
               2*j):
            kernel += terms.SHOTerm(
                log_S0=np.random.uniform(-1, 1),
                log_omega0=np.random.uniform(-5, 0),
                log_Q=np.random.uniform(0, 1),
            )
        kernel += terms.JitterTerm(np.random.uniform(-1, 1))
        assert (
            len(kernel.coefficients[0]) + 2*len(kernel.coefficients[2]) == 2*j
        )

        gp = celerite.GP(kernel)