Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def build_point_source_jl(self):
data_list = DataList(self._plugin)
jls = {}
for key in self._shapes.keys():
ps = PointSource('test',0,0,spectral_shape=self._shapes[key])
model = Model(ps)
jls[key] = JointLikelihood(model,data_list)
return jls
def build_point_source_bayes(self):
data_list = DataList(self._plugin)
jls = {}
for key in self._shapes.keys():
ps = PointSource('test', 0, 0, spectral_shape=self._shapes[key])
model = Model(ps)
jls[key] = JointLikelihood(model, data_list)
return jls
def fit(self, function, minimizer="minuit", verbose=False):
"""
Fit the data with the provided function (an astromodels function)
:param function: astromodels function
:param minimizer: the minimizer to use
:param verbose: print every step of the fit procedure
:return: best fit results
"""
# This is a wrapper to give an easier way to fit simple data without having to go through the definition
# of sources
pts = PointSource("source", 0.0, 0.0, function)
model = Model(pts)
self.set_model(model)
self._joint_like_obj = JointLikelihood(model, DataList(self), verbose=verbose)
self._joint_like_obj.set_minimizer(minimizer)
return self._joint_like_obj.fit()
def fit(self, function, minimizer="minuit", verbose=False):
"""
Fit the data with the provided function (an astromodels function)
:param function: astromodels function
:param minimizer: the minimizer to use
:param verbose: print every step of the fit procedure
:return: best fit results
"""
# This is a wrapper to give an easier way to fit simple data without having to go through the definition
# of sources
pts = PointSource("source", 0.0, 0.0, function)
model = Model(pts)
self.set_model(model)
self._joint_like_obj = JointLikelihood(model, DataList(self), verbose=verbose)
self._joint_like_obj.set_minimizer(minimizer)
return self._joint_like_obj.fit()
# Make sure we have a fit
assert (
self._jl_instance.results is not None
), "You have to perform a fit before using GoodnessOfFit"
if like_data_frame is None:
like_data_frame = self._jl_instance.results.get_statistic_frame()
# Restore best fit and store the reference value for the likelihood
self._jl_instance.restore_best_fit()
self._reference_like = like_data_frame["-log(likelihood)"]
# Store best model
self._best_fit_model = clone_model(self._jl_instance.likelihood_model)
def compute_TS(self, source_name, alt_hyp_mlike_df):
"""
Computes the Likelihood Ratio Test statistic (TS) for the provided source
:param source_name: name for the source
:param alt_hyp_mlike_df: likelihood dataframe (it is the second output of the .fit() method)
:return: a DataFrame containing the null hypothesis and the alternative hypothesis -log(likelihood) values and
the value for TS for the source for each loaded dataset
"""
assert source_name in self._likelihood_model, (
"Source %s is not in the current model" % source_name
)
# Clone model
model_clone = clone_model(self._likelihood_model)
# Remove this source from the model
_ = model_clone.remove_source(source_name)
# Fit
another_jl = JointLikelihood(model_clone, self._data_list)
# Use the same minimizer as the parent object
another_jl.set_minimizer(self.minimizer_in_use)
# We do not need the covariance matrix, just the likelihood value
_, null_hyp_mlike_df = another_jl.fit(
quiet=True, compute_covariance=False, n_samples=1
)
# Compute TS for all datasets
thisNamesV = pyLike.StringVector()
thisSrc = self.like.logLike.getSource(srcName)
thisSrc.spectrum().getFreeParamNames(thisNamesV)
thisNames = map(lambda x: "%s_%s" % (srcName, x), thisNamesV)
freeParamNames.extend(thisNames)
pass
nuisanceParameters = collections.OrderedDict()
for name in freeParamNames:
value = self.getNuisanceParameterValue(name)
bounds = self.getNuisanceParameterBounds(name)
delta = self.getNuisanceParameterDelta(name)
nuisanceParameters["%s_%s" % (self.name, name)] = Parameter("%s_%s" % (self.name, name),
value,
min_value=bounds[0],
max_value=bounds[1],
delta=delta)
nuisanceParameters["%s_%s" % (self.name, name)].free = self.innerMinimization
# Prepare a callback which will set the parameter value in the pyLikelihood object if it gets
# changed
# def this_callback(parameter):
#
# _, src, pname = parameter.name.split("_")
#
# try:
#
# self.like.model[src].funcs['Spectrum'].getParam(pname).setValue(parameter.value)
# Use the internal representation (see the Parameter class)
parameter._set_internal_value(trial_values[i])
# Now profile out nuisance parameters and compute the new value
# for the likelihood
summed_log_likelihood = 0
for dataset in list(self._data_list.values()):
try:
this_log_like = dataset.inner_fit()
except ModelAssertionViolation:
# This is a zone of the parameter space which is not allowed. Return
# a big number for the likelihood so that the fit engine will avoid it
custom_warnings.warn(
"Fitting engine in forbidden space: %s" % (trial_values,),
custom_exceptions.ForbiddenRegionOfParameterSpace,
)
return minimization.FIT_FAILED
except:
# Do not intercept other errors
raise
assert self._is_setup, "You forgot to setup the sampler!"
loud = not quiet
self._update_free_parameters()
n_dim = len(list(self._free_parameters.keys()))
# Get starting point
p0 = self._get_starting_points(self._n_walkers)
# Deactivate memoization in astromodels, which is useless in this case since we will never use twice the
# same set of parameters
with use_astromodels_memoization(False):
if using_mpi:
with MPIPoolExecutor() as executor:
sampler = zeus.sampler(
logprob_fn=self.get_posterior,
nwalkers=self._n_walkers,
ndim=n_dim,
pool=executor,
)
# if self._seed is not None:
# sampler._random.seed(self._seed)
else:
sampler_class = dynesty.NestedSampler
self._update_free_parameters()
n_dim = len(self._free_parameters.keys())
sampling_procedure = sample_without_progress
# dynesty uses a different call signiture for
# sampling so we construct callbakcs
loglike, dynesty_prior = self._construct_dynesty_posterior()
with use_astromodels_memoization(False):
if threeML_config['parallel']['use-parallel']:
c = ParallelClient()
view = c[:]
## remap the map_sync
pool = DynestyPool(view)
dynesty_kwargs['pool'] = pool
# we let the use setup the pool args
# create the class
self._sampler = sampler_class(loglike, dynesty_prior, ndim=n_dim, **dynesty_kwargs)