Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
if par not in thawed:
raise ConfidenceErr('thawed', par.fullname, fit.model.name)
# If "fast" option enabled, set fitting method to
# lmdif if stat is chi-squared,
# else set to neldermead.
# If current method is not LM or NM, warn it is not a good
# method for estimating parameter limits.
if type(fit.method) not in (NelderMead, LevMar):
warning(fit.method.name + " is inappropriate for confidence " +
"limit estimation")
oldfitmethod = fit.method
if (bool_cast(self.fast) is True and methoddict is not None):
if (isinstance(fit.stat, Likelihood)):
if (type(fit.method) is not NelderMead):
fit.method = methoddict['neldermead']
warning("Setting optimization to " + fit.method.name +
" for interval projection plot")
else:
if (type(fit.method) is not LevMar):
fit.method = methoddict['levmar']
warning("Setting optimization to " + fit.method.name +
" for interval projection plot")
xvals = self._interval_init(fit, par)
oldpars = fit.model.thawedpars
par.freeze()
try:
def get_indep(self, filter=False, model=None):
filter = bool_cast(filter)
if filter:
return (self._x0lo, self._x1lo, self._x0hi, self._x1hi)
return (self.x0lo, self.x1lo, self.x0hi, self.x1hi)
Examples
--------
>>> dy = dset.get_staterror()
Ensure that there is no pre-defined statistical-error column
and then use the Chi2DataVar statistic to calculate the errors:
>>> stat = sherpa.stats.Chi2DataVar()
>>> dset.set_staterror(None)
>>> dy = dset.get_staterror(staterrfunc=stat.calc_staterror)
"""
staterr = self.staterror
filter = bool_cast(filter)
if filter:
staterr = self.apply_filter(staterr, self._sum_sq)
else:
staterr = self.apply_grouping(staterr, self._sum_sq)
# The source AREASCAL is not applied here, but the
# background term is.
#
if (staterr is None) and (staterrfunc is not None):
cnts = self.counts
if filter:
cnts = self.apply_filter(cnts)
else:
cnts = self.apply_grouping(cnts)
def _set_rate(self, val):
self._rate = bool_cast(val)
for id in self.background_ids:
# TODO: shouldn't this store bool_cast(val) instead?
self.get_background(id).rate = val
self.method = methoddict['levmar']
warning("Setting optimization to " + self.method.name
+ " for confidence limit search")
# Now, set up before we call the confidence limit function
# Keep track of starting values, will need to set parameters
# back to starting values when we are done.
startpars = self.model.thawedpars
startsoftmins = self.model.thawedparmins
startsoftmaxs = self.model.thawedparmaxes
starthardmins = self.model.thawedparhardmins
starthardmaxs = self.model.thawedparhardmaxes
# If restricted to soft_limits, only send soft limits to
# method, and do not reset model limits
if (bool_cast(self.estmethod.soft_limits) is True):
starthardmins = self.model.thawedparmins
starthardmaxs = self.model.thawedparmaxes
else:
self.model.thawedparmins = starthardmins
self.model.thawedparmaxes = starthardmaxs
self.current_frozen = -1
# parnums is the list of indices of the thawed parameters
# we want to visit. For example, if there are three thawed
# parameters, and we want to derive limits for only the first
# and third, then parnums = [0,2]. We construct the list by
# comparing each parameter in parlist to the thawed model
# parameters. (In the default case, when parlist is None,
# that means get limits for all thawed parameters, so parnums
# is [0, ... , numpars - 1], if the number of thawed parameters
Returns
-------
axis: array
The dependent axis values for the data set. This gives
the value of each point in the data set.
See Also
--------
get_indep : Return the independent axis of a data set.
get_error : Return the errors on the dependent axis of a data set.
get_staterror : Return the statistical errors on the dependent axis of a data set.
get_syserror : Return the systematic errors on the dependent axis of a data set.
"""
dep = self.dep
filter = bool_cast(filter)
if filter:
dep = self.apply_filter(dep)
return dep
def get_y(self, filter=False, yfunc=None, response_id=None, use_evaluation_space=False):
vallist = Data.get_y(self, yfunc=yfunc)
filter = bool_cast(filter)
if not isinstance(vallist, tuple):
vallist = (vallist,)
newvallist = []
for val in vallist:
if filter:
val = self.apply_filter(val)
else:
val = self.apply_grouping(val)
val = self._fix_y_units(val, filter, response_id)
newvallist.append(val)
if len(vallist) == 1:
vallist = newvallist[0]
Returns
-------
axis : array or `None`
The systematic error for each data point. A value of
`None` is returned if the data set has no systematic
errors.
See Also
--------
get_error : Return the errors on the dependent axis of a data set.
get_indep : Return the independent axis of a data set.
get_staterror : Return the statistical errors on the dependent axis of a data set.
"""
syserr = getattr(self, 'syserror', None)
filter = bool_cast(filter)
if filter:
syserr = self.apply_filter(syserr)
return syserr
def image(self, array, shape=None, newframe=False, tile=False):
newframe = bool_cast(newframe)
tile = bool_cast(tile)
if shape is None:
backend.image(array, newframe, tile)
else:
backend.image(array.reshape(shape), newframe, tile)