Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
from numpy import (sqrt, ndarray, asmatrix, array, prod,
asarray, atleast_1d, iterable, linspace, diff,
around, log10, zeros, arange, digitize, apply_along_axis,
concatenate, bincount, sort, hsplit, argsort, inf, shape,
ndim, swapaxes, ravel, diag, cov, transpose as tr)
__all__ = ['append', 'check_list', 'autocorr', 'calc_min_interval',
'check_type', 'ar1',
'ar1_gen', 'draw_random', 'histogram', 'hpd', 'invcdf',
'make_indices', 'normcdf', 'quantiles', 'rec_getattr',
'rec_setattr', 'round_array', 'trace_generator','msqrt','safe_len',
'log_difference', 'find_generations','crawl_dataless', 'logit',
'invlogit','stukel_logit','stukel_invlogit','symmetrize','value']
symmetrize = flib.symmetrize
def value(a):
"""
Returns a.value if a is a Variable, or just a otherwise.
"""
if isinstance(a, Variable):
return a.value
else:
return a
# =====================================================================
# = Please don't use numpy.vectorize with these! It will leak memory. =
# =====================================================================
def logit(theta):
return flib.logit(ravel(theta)).reshape(shape(theta))
- `p` : Probability of success. :math:`0 < p < 1`.
:Example:
>>> from pymc import bernoulli_like
>>> bernoulli_like([0,1,0,1], .4)
-2.854232711280291
.. note::
- :math:`E(x)= p`
- :math:`Var(x)= p(1-p)`
"""
return flib.bernoulli(x, p)
bernoulli_grad_like = {'p' : flib.bern_grad_p}
# Beta----------------------------------------------
@randomwrap
def rbeta(alpha, beta, size=None):
"""
Random beta variates.
"""
return np.random.beta(alpha, beta,size)
def beta_expval(alpha, beta):
"""
Expected value of beta distribution.
"""
return 1.0 * alpha / (alpha + beta)
:Example:
>>> raftery_lewis(x, q=.025, r=.005)
:Reference:
Raftery, A.E. and Lewis, S.M. (1995). The number of iterations,
convergence diagnostics and generic Metropolis algorithms. In
Practical Markov Chain Monte Carlo (W.R. Gilks, D.J. Spiegelhalter
and S. Richardson, eds.). London, U.K.: Chapman and Hall.
See the fortran source file `gibbsit.f` for more details and references.
"""
if np.rank(x)>1:
return [raftery_lewis(y, q, r, s, epsilon, verbose) for y in np.transpose(x)]
output = nmin, kthin, nburn, nprec, kmind = pymc.flib.gibbmain(x, q, r, s, epsilon)
if verbose:
print_("\n========================")
print_("Raftery-Lewis Diagnostic")
print_("========================")
print_()
print_("%s iterations required (assuming independence) to achieve %s accuracy with %i percent probability." % (nmin, r, 100*s))
print_()
print_("Thinning factor of %i required to produce a first-order Markov chain." % kthin)
print_()
print_("%i iterations to be discarded at the beginning of the simulation (burn-in)." % nburn)
print_()
print_("%s subsequent iterations required." % nprec)
print_()
print_("Thinning factor of %i required to produce an independence chain." % kmind)
:Parameters:
- `x` : [int] Number of successes, > 0.
- `n` : [int] Number of Bernoulli trials, > x.
- `p` : Probability of success in each trial, :math:`p \in [0,1]`.
.. note::
- :math:`E(X)=np`
- :math:`Var(X)=np(1-p)`
"""
return flib.binomial(x,n,p)
binomial_grad_like = {'p' : flib.binomial_gp}
# Beta----------------------------------------------
@randomwrap
def rbetabin(alpha, beta, n, size=None):
"""
Random beta-binomial variates.
"""
phi = np.random.beta(alpha, beta, size)
return np.random.binomial(n,phi)
def betabin_expval(alpha, beta, n):
"""
Expected value of beta-binomial distribution.
"""
- `x` : x > 0
- `alpha` : Shape parameter (alpha > 0).
- `beta` : Scale parameter (beta > 0).
.. note::
:math:`E(X)=\frac{\beta}{\alpha-1}` for :math:`\alpha > 1`
:math:`Var(X)=\frac{\beta^2}{(\alpha-1)^2(\alpha)}` for :math:`\alpha > 2`
"""
return flib.igamma(x, alpha, beta)
inverse_gamma_grad_like = {'value' : flib.igamma_grad_x,
'alpha' : flib.igamma_grad_alpha,
'beta' : flib.igamma_grad_beta}
# Inverse Wishart---------------------------------------------------
# def rinverse_wishart(n, C):
# """
# Return an inverse Wishart random matrix.
# :Parameters:
# - `n` : [int] Degrees of freedom (n > 0).
# - `C` : Symmetric and positive definite scale matrix
# """
# wi = rwishart(n, np.asmatrix(C).I).I
# flib.symmetrize(wi)
# return wi
# def inverse_wishart_expval(n, C):
:Parameters:
- `x` : x > 0
- `mu` : Location parameter.
- `tau` : Scale parameter (tau > 0).
.. note::
:math:`E(X)=e^{\mu+\frac{1}{2\tau}}`
:math:`Var(X)=(e^{1/\tau}-1)e^{2\mu+\frac{1}{\tau}}`
"""
return flib.lognormal(x,mu,tau)
lognormal_grad_like = {'value' : flib.lognormal_gradx,
'mu' : flib.lognormal_gradmu,
'tau' : flib.lognormal_gradtau}
# Multinomial----------------------------------------------
#@randomwrap
def rmultinomial(n,p,size=None):
"""
Random multinomial variates.
"""
# Leaving size=None as the default means return value is 1d array
# if not specified-- nicer.
# Single value for p:
if len(np.shape(p))==1:
return np.random.multinomial(n,p,size)
# Multiple values for p:
0.182321556793954
.. note::
- :math:`E(X)=\frac{\alpha}{\alpha+\beta}`
- :math:`Var(X)=\frac{\alpha \beta}{(\alpha+\beta)^2(\alpha+\beta+1)}`
"""
# try:
# constrain(alpha, lower=0, allow_equal=True)
# constrain(beta, lower=0, allow_equal=True)
# constrain(x, 0, 1, allow_equal=True)
# except ZeroProbability:
# return -np.Inf
return flib.beta_like(x, alpha, beta)
beta_grad_like = {'value' : flib.beta_grad_x,
'alpha' : flib.beta_grad_a,
'beta' : flib.beta_grad_b}
# Binomial----------------------------------------------
@randomwrap
def rbinomial(n, p, size=None):
"""
Random binomial variates.
"""
# return np.random.binomial(n,p,size)
return np.random.binomial(np.ravel(n),np.ravel(p),size)
def binomial_expval(n, p):
"""
Expected value of binomial distribution.
:Parameters:
- `x` : :math:`x \ge 0`
- `alpha` : alpha > 0
- `beta` : beta > 0
.. note::
- :math:`E(x)=\beta \Gamma(1+\frac{1}{\alpha})`
- :math:`Var(x)=\beta^2 \Gamma(1+\frac{2}{\alpha} - \mu^2)`
"""
return flib.weibull(x, alpha, beta)
weibull_grad_like = {'value' : flib.weibull_gx,
'alpha' : flib.weibull_ga,
'beta' : flib.weibull_gb}
# Wishart---------------------------------------------------
def rwishart(n, Tau):
"""
Return a Wishart random matrix.
Tau is the inverse of the 'covariance' matrix :math:`C`.
"""
p = np.shape(Tau)[0]
sig = np.linalg.cholesky(Tau)
if n
:Parameters:
- `x` : x > 0
- `mu` : Location parameter.
- `tau` : Scale parameter (tau > 0).
.. note::
:math:`E(X)=e^{\mu+\frac{1}{2\tau}}`
:math:`Var(X)=(e^{1/\tau}-1)e^{2\mu+\frac{1}{\tau}}`
"""
return flib.lognormal(x,mu,tau)
lognormal_grad_like = {'value' : flib.lognormal_gradx,
'mu' : flib.lognormal_gradmu,
'tau' : flib.lognormal_gradtau}
# Multinomial----------------------------------------------
#@randomwrap
def rmultinomial(n,p,size=None):
"""
Random multinomial variates.
"""
# Leaving size=None as the default means return value is 1d array
# if not specified-- nicer.
# Single value for p:
if len(np.shape(p))==1:
return np.random.multinomial(n,p,size)
R"""
Half-normal log-likelihood, a normal distribution with mean 0 limited
to the domain :math:`x \in [0, \infty)`.
.. math::
f(x \mid \tau) = \sqrt{\frac{2\tau}{\pi}}\exp\left\{ {\frac{-x^2 \tau}{2}}\right\}
:Parameters:
- `x` : :math:`x \ge 0`
- `tau` : tau > 0
"""
return flib.hnormal(x, tau)
half_normal_grad_like = {'value' : flib.hnormal_gradx,
'tau' : flib.hnormal_gradtau}
# Hypergeometric----------------------------------------------
def rhypergeometric(n, m, N, size=None):
"""
Returns hypergeometric random variates.
"""
if n==0:
return np.zeros(size,dtype=int)
elif n==N:
out = np.empty(size,dtype=int)
out.fill(m)
return out
return np.random.hypergeometric(n, N-n, m, size)
def hypergeometric_expval(n, m, N):