Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
# Make sure we have a fit
assert (
self._jl_instance.results is not None
), "You have to perform a fit before using GoodnessOfFit"
if like_data_frame is None:
like_data_frame = self._jl_instance.results.get_statistic_frame()
# Restore best fit and store the reference value for the likelihood
self._jl_instance.restore_best_fit()
self._reference_like = like_data_frame["-log(likelihood)"]
# Store best model
self._best_fit_model = clone_model(self._jl_instance.likelihood_model)
def compute_TS(self, source_name, alt_hyp_mlike_df):
"""
Computes the Likelihood Ratio Test statistic (TS) for the provided source
:param source_name: name for the source
:param alt_hyp_mlike_df: likelihood dataframe (it is the second output of the .fit() method)
:return: a DataFrame containing the null hypothesis and the alternative hypothesis -log(likelihood) values and
the value for TS for the source for each loaded dataset
"""
assert source_name in self._likelihood_model, (
"Source %s is not in the current model" % source_name
)
# Clone model
model_clone = clone_model(self._likelihood_model)
# Remove this source from the model
_ = model_clone.remove_source(source_name)
# Fit
another_jl = JointLikelihood(model_clone, self._data_list)
# Use the same minimizer as the parent object
another_jl.set_minimizer(self.minimizer_in_use)
# We do not need the covariance matrix, just the likelihood value
_, null_hyp_mlike_df = another_jl.fit(
quiet=True, compute_covariance=False, n_samples=1
)
# Compute TS for all datasets
def __init__(self, optimized_model, samples, statistic_values, analysis_type, statistical_measures):
# Safety checks
self._n_free_parameters = len(optimized_model.free_parameters)
assert samples.shape[1] == self._n_free_parameters, "Number of free parameters (%s) and set of samples (%s) " \
"do not agree." % (samples.shape[1],
self._n_free_parameters)
# NOTE: we clone the model so that whatever happens outside or after, this copy of the model will not be
# changed
self._optimized_model = astromodels.clone_model(optimized_model)
# Save a transposed version of the samples for easier access
self._samples_transposed = samples.T
# Store likelihood values in a pandas Series
self._optimal_statistic_values = pd.Series(statistic_values)
# Store the statistical measures as a pandas Series
self._statistical_measures = pd.Series(statistical_measures)
# The .free_parameters property of the model is pretty costly because it needs to update all the parameters
# to see if they are free. Since the saved model will not be touched we can cache that
self._free_parameters = self._optimized_model.free_parameters
def optimized_model(self):
"""
Returns a copy of the optimized model
:return: a copy of the optimized model
"""
return astromodels.clone_model(self._optimized_model)
# Apply the same selections as the current data set
if original_rebinner is not None:
# Apply rebinning, which also applies the mask
new_spectrum_plugin._apply_rebinner(original_rebinner)
else:
# Only apply the mask
new_spectrum_plugin._mask = original_mask
new_spectrum_plugin._apply_mask_to_original_vectors()
# We want to store the simulated parameters so that the user
# can recall them later
new_spectrum_plugin._simulation_storage = clone_model(self._like_model)
# TODO: nuisance parameters
return new_spectrum_plugin
def get_model(self, id):
# Make a copy of the best fit model, so that we don't touch the original model during the fit, and we
# also always restart from the best fit (instead of the last iteration)
new_model = clone_model(self._best_fit_model)
return new_model