How to use the celerite.GP function in celerite

To help you get started, we’ve selected a few celerite examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github dfm / celerite / tests / test_celerite.py View on Github external
def test_build_gp(seed=42):
    kernel = terms.RealTerm(0.5, 0.1)
    kernel += terms.ComplexTerm(0.6, 0.7, 1.0)
    gp = GP(kernel)

    assert gp.vector_size == 5
    p = gp.get_parameter_vector()
    assert np.allclose(p, [0.5, 0.1, 0.6, 0.7, 1.0])

    gp.set_parameter_vector([0.5, 0.8, 0.6, 0.7, 2.0])
    p = gp.get_parameter_vector()
    assert np.allclose(p, [0.5, 0.8, 0.6, 0.7, 2.0])

    with pytest.raises(ValueError):
        gp.set_parameter_vector([0.5, 0.8, -0.6])

    with pytest.raises(ValueError):
        gp.set_parameter_vector("face1")
github dfm / celerite / tests / test_celerite.py View on Github external
def get_gp(solver, kernel):
    if type(solver) == SparseSolver:
        return GP(kernel, sparse=True)
    return GP(kernel, use_lapack=solver.use_lapack)
github nespinoza / juliet / juliet.py View on Github external
for pnames in priors.keys():
                vec = pnames.split('_')
                if (vec[0] == 'GP') and (GPvariable in vec[1]) and ('rv' in vec[-1].lower()):
                    rv_dictionary['GP_'+GPvariable] = '_'.join(vec[2:])

        #for instrument in inames_rv:
        rot_kernel = terms.TermSum(RotationTerm(log_amp=np.log(10.),\
                                                log_timescale=np.log(10.0),\
                                                log_period=np.log(3.0),\
                                                log_factor=np.log(1.0)))
        # Jitter term; dont add it, jitters will be added directly on the log-like (see Espinoza+2018).
        #kernel_jitter = terms.JitterTerm(np.log(100*1e-6))

        # Wrap GP object to compute likelihood:
        kernel = rot_kernel #+ kernel_jitter
        rv_dictionary['GPObject'] = celerite.GP(kernel, mean=0.0)
        # Note order of GP Vector: logB, logL, logProt, logC, logJitter
        rv_dictionary['GPVector'] = np.zeros(5)
        rv_dictionary['GPObject'].compute(rv_dictionary['X'],yerr=rverr_rv)

    if rv_dictionary['GPType'] == 'ExpSineSquaredSEKernel':
        for GPvariable in ['sigma','alpha','Gamma','Prot']:
            for pnames in priors.keys():
                vec = pnames.split('_')
                if (vec[0] == 'GP') and (GPvariable in vec[1]) and ('rv' in vec[-1].lower()):
                    rv_dictionary['GP_'+GPvariable] = '_'.join(vec[2:])

        #for instrument in inames_rv:
        # Generate GP Base Kernel (Constant * ExpSquared * ExpSine2):
        K1 = 1.*george.kernels.ExpSquaredKernel(metric = 1.0)
        K2 = george.kernels.ExpSine2Kernel(gamma=1.0,log_period=1.0)
github MNGuenther / allesfitter / allesfitter / exoworlds_rdx / lightcurves / gp_decor.py View on Github external
def call_gp(params):
    log_sigma, log_rho, log_error_scale = params
    if GP_CODE=='celerite':
        kernel = terms.Matern32Term(log_sigma=log_sigma, log_rho=log_rho)
        gp = celerite.GP(kernel, mean=MEAN, fit_mean=False) #log_white_noise=np.log(yerr), 
        gp.compute(xx, yerr=yyerr/err_norm*np.exp(log_error_scale))
        return gp
    elif GP_CODE=='george':
        kernel = np.exp(log_sigma) * kernels.Matern32Kernel(log_rho)
        gp = george.GP(kernel, mean=MEAN, fit_mean=False) #log_white_noise=np.log(yerr), 
        gp.compute(xx, yerr=yyerr/err_norm*np.exp(log_error_scale))
        return gp
    else:
        raise ValueError('A bad thing happened.')
github California-Planet-Search / radvel / radvel / likelihood.py View on Github external
log_sigma = np.log(self.params[self.jit_param].value)
                )

        kernel += celerite.terms.RealTerm(
            log_a=np.log(B*(1+C)/(2+C)),
            log_c=np.log(1/L)
        )

        kernel += celerite.terms.ComplexTerm(
            log_a=np.log(B/(2+C)),
            log_b=-np.inf,
            log_c=np.log(1/L),
            log_d=np.log(2*np.pi/Prot)
        )

        gp = celerite.GP(kernel)
        gp.compute(self.x, self.yerr)
        mu, var = gp.predict(self._resids(), xpred, return_var=True)

        stdev = np.sqrt(var)

        return mu, stdev
github dfm / celerite / paper / figures / astero_sample.py View on Github external
np.log(np.var(y)),
    2.0,
    np.log(nu_max*uHz_conv),        # log(nu_max)
    np.log(delta_nu*uHz_conv),      # log(delta_nu)
    0.0,                            # offset between nu_max and central mode
    np.log(np.var(y)),              # log(amp_max)
    5.0,                            # log(q_factor)
    np.log(delta_nu*uHz_conv),      # width of envelope
    bounds=bounds,
    nterms=2,
)
log_white_noise = modeling.ConstantModel(
    2.0*np.log(np.median(np.abs(np.diff(fit_y)))),
    bounds=[(-15, 15)]
)
gp = celerite.GP(kernel, log_white_noise=log_white_noise)
gp.compute(fit_x, fit_yerr)
print("Initial log-likelihood: {0}".format(gp.log_likelihood(fit_y)))
print(gp.get_parameter_dict(include_frozen=True))

# The objective function for optimization
def nll(params):
    gp.set_parameter_vector(params)
    ll = gp.log_likelihood(fit_y)
    if not np.isfinite(ll):
        return 1e10
    return -ll+0.5*gp.kernel.epsilon**2

# Grid initialize
print("Running a grid of optimizations...")
gp.log_white_noise.thaw_all_parameters()
gp.kernel.thaw_all_parameters()
github dfm / celerite / papers / paper1 / figures / simulated / wrong-qpo.py View on Github external
N = 100
t = np.sort(np.random.uniform(0, 20, N))
yerr = 0.5
K = true_model.get_K(t)
K[np.diag_indices_from(K)] += yerr**2
y = np.random.multivariate_normal(np.zeros(N), K)

# Set up the celerite model that we will use to fit - product of two SHOs
log_Q = 1.0
kernel = terms.SHOTerm(log_S0=np.log(np.var(y))-2*log_Q, log_Q=log_Q,
                       log_omega0=np.log(2*np.pi))
kernel *= terms.SHOTerm(log_S0=0.0, log_omega0=0.0, log_Q=-0.5*np.log(2))
kernel.freeze_parameter("k2:log_S0")
kernel.freeze_parameter("k2:log_Q")

gp = celerite.GP(kernel)
gp.compute(t, yerr)

# Fit for the maximum likelihood
def nll(params, gp, y):
    gp.set_parameter_vector(params)
    if not np.isfinite(gp.log_prior()):
        return 1e10
    ll = gp.log_likelihood(y)
    return -ll if np.isfinite(ll) else 1e10

p0 = gp.get_parameter_vector()
soln = minimize(nll, p0, method="L-BFGS-B", args=(gp, y))
gp.set_parameter_vector(soln.x)

kernel.freeze_parameter("k1:log_S0")
p0 = gp.get_parameter_vector()
github MNGuenther / allesfitter / allesfitter / computer.py View on Github external
kernel = terms.Matern32Term(log_sigma=params['stellar_var_gp_matern32_lnsigma_'+key], 
                                    log_rho=params['stellar_var_gp_matern32_lnrho_'+key])
        
    elif config.BASEMENT.settings['stellar_var_'+key] == 'sample_GP_SHO':
        kernel = terms.SHOTerm(log_S0=params['stellar_var_gp_sho_lnS0_'+key], 
                               log_Q=params['stellar_var_gp_sho_lnQ_'+key],
                               log_omega0=params['stellar_var_gp_sho_lnomega0_'+key])                               
        
    else: 
        KeyError('GP settings and params do not match.')
    
    #::: GP and mean (simple offset)  
    if 'stellar_var_gp_offset_'+key in params:
        gp = celerite.GP(kernel, mean=params['stellar_var_gp_offset_'+key], fit_mean=True)
    else:
        gp = celerite.GP(kernel, mean=0.)
        
        
    return gp
github iancze / PSOAP / psoap / sample.py View on Github external
N = data.N

# Initialize the orbit
orb = orbit.models[model](**pars, obs_dates=date1D)


# term = terms.SHOTerm(log_S0=-7.0, log_omega0=10, log_Q=2.)
term1 = terms.Matern32Term(log_sigma=-4.0, log_rho=-22)# ) #, log_Q=-0.5*np.log(2))
term2 = terms.Matern32Term(log_sigma=-6.0, log_rho=-22)# ) #, log_Q=-0.5*np.log(2))
# term += terms.JitterTerm(log_sigma=np.log(np.median(sigma)))

# term = terms.Matern32Term(log_sigma=-1.53, log_rho=-10.7)
# term += terms.JitterTerm(log_sigma=np.log(np.median(sigma)))


gp1 = celerite.GP(term1)
gp2 = celerite.GP(term2)

# def lnprob(p):
#     '''
#     Unified lnprob interface.
#
#     Args:
#         p (np.float): vector containing the model parameters
#
#     Returns:
#         float : the lnlikelihood of the model parameters.
#     '''
#
#     # separate the parameters into orbital and GP based upon the model type
#     # also backfill any parameters that we have fixed for this analysis
#     p_orb, p_GP = convert_vector_p(p)
github MNGuenther / allesfitter / allesfitter / computer.py View on Github external
elif config.BASEMENT.settings['baseline_'+key+'_'+inst] == 'sample_GP_Matern32':
        kernel = terms.Matern32Term(log_sigma=params['baseline_gp_matern32_lnsigma_'+key+'_'+inst], 
                                    log_rho=params['baseline_gp_matern32_lnrho_'+key+'_'+inst])
        
    elif config.BASEMENT.settings['baseline_'+key+'_'+inst] == 'sample_GP_SHO':
        kernel = terms.SHOTerm(log_S0=params['baseline_gp_sho_lnS0_'+key+'_'+inst], 
                               log_Q=params['baseline_gp_sho_lnQ_'+key+'_'+inst],
                               log_omega0=params['baseline_gp_sho_lnomega0_'+key+'_'+inst])                               
        
    else: 
        KeyError('GP settings and params do not match.')
    
    #::: GP and mean (simple offset)  
    if 'baseline_gp_offset_'+key+'_'+inst in params:
        gp = celerite.GP(kernel, mean=params['baseline_gp_offset_'+key+'_'+inst], fit_mean=True)
    else:
        gp = celerite.GP(kernel, mean=0.)
        
        
    return gp