Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
"""
__version__ = '2.2'
try:
import numpy
except ImportError:
raise ImportError('NumPy does not seem to be installed. Please see the user guide.')
# Core modules
from .threadpool import *
import os
import pymc
if os.getcwd().find(os.path.abspath(os.path.split(os.path.split(pymc.__file__)[0])[0]))>-1:
from .six import print_
print_('\n\tWarning: You are importing PyMC from inside its source tree.')
from .Node import *
from .Container import *
from .PyMCObjects import *
from .InstantiationDecorators import *
from .CommonDeterministics import *
from .NumpyDeterministics import *
from .distributions import *
from .Model import *
from .StepMethods import *
from .MCMC import *
from .NormalApproximation import *
from .tests import test
cmds = """
Commands:
i -- index: print current iteration index
p -- pause: interrupt sampling and return to the main console.
Sampling can be resumed later with icontinue().
h -- halt: stop sampling and truncate trace. Sampling cannot be
resumed for this chain.
b -- bg: return to the main console. The sampling will still
run in a background thread. There is a possibility of
malfunction if you interfere with the Sampler's
state or the database during sampling. Use this at your
own risk.
"""
print_("""==============
PyMC console
==============
PyMC is now sampling. Use the following commands to query or pause the sampler.
""", file=out)
print_(cmds, file=out)
prompt = True
try:
while self.status in ['running', 'paused']:
# sys.stdout.write('pymc> ')
if prompt:
out.write('pymc > ')
out.flush()
if self._exc_info is not None:
quantiles[s.__name__].append(open01(q))
# Replace data values
for o in sampler.observed_stochastics:
o.revert()
finally:
# Replace data values
for o in sampler.observed_stochastics:
o.revert()
# Replace backend
sampler._assign_database_backend(original_backend)
if not i % 10 and i and verbose:
print_("\tCompleted validation replicate", i)
# Replace backend
sampler._assign_database_backend(original_backend)
stats = {}
# Calculate chi-square statistics
for param in quantiles:
q = quantiles[param]
# Calculate chi-square statistics
X2 = sum(sp.special.ndtri(q)**2)
# Calculate p-value
p = sp.special.chdtrc(replicates, X2)
stats[param] = (X2, p)
elif acc_rate>0.5:
# increase by ten percent
self.adaptive_scale_factor *= 1.1
else:
tuning = False
# Re-initialize rejection count
self.rejected = 0.
self.accepted = 0.
# More verbose feedback, if requested
if verbose > 0:
if hasattr(self, 'stochastic'):
print_('\t\tvalue:', self.stochastic.value)
print_('\t\tacceptance rate:', acc_rate)
print_('\t\tadaptive scale factor:', self.adaptive_scale_factor)
print_()
return tuning
prompt = True
try:
while self.status in ['running', 'paused']:
# sys.stdout.write('pymc> ')
if prompt:
out.write('pymc > ')
out.flush()
if self._exc_info is not None:
a,b,c = self._exc_info
reraise(a, b, c)
cmd = utils.getInput().strip()
if cmd == 'i':
print_('Current iteration: %i of %i' % (self._current_iter, self._iter), file=out)
prompt = True
elif cmd == 'p':
self.status = 'paused'
break
elif cmd == 'h':
self.status = 'halt'
break
elif cmd == 'b':
return
elif cmd == '\n':
prompt = True
pass
elif cmd == '':
prompt = False
else:
print_('Unknown command: ', cmd, file=out)
# return
if self.verbose > 0:
print_('\tTuning at iteration', self._current_iter)
# Initialize counter for number of tuning stochastics
tuning_count = 0
for step_method in self.step_methods:
verbose = self.verbose
if step_method.verbose > -1:
verbose = step_method.verbose
# Tune step methods
tuning_count += step_method.tune(verbose=self.verbose)
if verbose > 1:
print_('\t\tTuning step method %s, returned %i\n' %(step_method._id, tuning_count))
sys.stdout.flush()
if self._burn_till_tuned:
if not tuning_count:
# If no step methods needed tuning, increment count
self._tuned_count += 1
else:
# Otherwise re-initialize count
self._tuned_count = 0
# n consecutive clean intervals removed tuning
# n is equal to self._stop_tuning_after
if self._tuned_count == self._stop_tuning_after:
if self.verbose > 0: print_('\nFinished tuning')
self._tuning = False
def get_value(self):
# Define value attribute
if self.verbose > 1:
print_('\t' + self.__name__ + ': value accessed.' )
return self._value
prompt = True
elif cmd == 'p':
self.status = 'paused'
break
elif cmd == 'h':
self.status = 'halt'
break
elif cmd == 'b':
return
elif cmd == '\n':
prompt = True
pass
elif cmd == '':
prompt = False
else:
print_('Unknown command: ', cmd, file=out)
print_(cmds, file=out)
prompt = True
except KeyboardInterrupt:
if not self.status == 'ready':
self.status = 'halt'
if self.status == 'ready':
print_("Sampling terminated successfully.", file=out)
else:
print_('Waiting for current iteration to finish...', file=out)
while self._sampling_thread.isAlive():
sleep(.1)
print_('Exiting interactive prompt...', file=out)
if self.status == 'paused':
R"""
Categorical log-likelihood. The most general discrete distribution.
.. math:: f(x=i \mid p) = p_i
for :math:`i \in 0 \ldots k-1`.
:Parameters:
- `x` : [int] :math:`x \in 0\ldots k-1`
- `p` : [float] :math:`p > 0`, :math:`\sum p = 1`
"""
p = np.atleast_2d(p)
if np.any(abs(np.sum(p, 1)-1)>0.0001):
print_("Probabilities in categorical_like sum to", np.sum(p, 1))
if np.array(x).dtype != int:
#print_("Non-integer values in categorical_like")
return -inf
return flib.categorical(x, p)