Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
'modifiers': [
{
'name': 'bkg_norm',
'type': 'histosys',
'data': {
'lo_data': source['bindata']['bkgsys_dn'],
'hi_data': source['bindata']['bkgsys_up'],
},
}
],
},
],
}
]
}
pdf = pyhf.Model(spec)
data = source['bindata']['data'] + pdf.config.auxdata
assert pytest.approx([-17.648827643136507], rel=5e-5) == pyhf.tensorlib.tolist(
pdf.logpdf(pdf.config.suggested_init(), data)
)
def test_optim_uncerts(backend, source, spec, mu):
pdf = pyhf.Model(spec)
data = source['bindata']['data'] + pdf.config.auxdata
init_pars = pdf.config.suggested_init()
par_bounds = pdf.config.suggested_bounds()
optim = pyhf.optimizer
result = optim.minimize(pyhf.infer.mle.twice_nll, data, pdf, init_pars, par_bounds)
assert pyhf.tensorlib.tolist(result)
result = optim.minimize(
pyhf.infer.mle.twice_nll,
data,
pdf,
init_pars,
par_bounds,
)
constraint_term = tensorlib.poisson_logpdf(thisauxdata, paralphas)
summands = (
constraint_term
if summands is None
else tensorlib.concatenate([summands, constraint_term])
)
return tensorlib.sum(summands) if summands is not None else 0
def fast(self, auxdata, pars):
return self.constraint_logpdf(auxdata, pars)
auxd = pyhf.tensorlib.astensor(m.config.auxdata)
pars = pyhf.tensorlib.astensor(m.config.suggested_init())
slow_result = pyhf.tensorlib.tolist(slow(m, auxd, pars))
fast_result = pyhf.tensorlib.tolist(fast(m, auxd, pars))
assert pytest.approx(slow_result) == fast_result
def test_optim_with_value(backend, source, spec, mu):
pdf = pyhf.Model(spec)
data = source['bindata']['data'] + pdf.config.auxdata
init_pars = pdf.config.suggested_init()
par_bounds = pdf.config.suggested_bounds()
optim = pyhf.optimizer
result = optim.minimize(pyhf.infer.mle.twice_nll, data, pdf, init_pars, par_bounds)
assert pyhf.tensorlib.tolist(result)
result, fitted_val = optim.minimize(
pyhf.infer.mle.twice_nll,
data,
pdf,
init_pars,
par_bounds,
[(pdf.config.poi_index, mu)],
return_fitted_val=True,
)
assert pyhf.tensorlib.tolist(result)
test_statistic = []
for backend in backends:
if backend.name == 'tensorflow':
tf.reset_default_graph()
backend.session = tf.compat.v1.Session()
pyhf.set_backend(backend)
q_mu = pyhf.infer.hypotest(
1.0,
data,
pdf,
pdf.config.suggested_init(),
pdf.config.suggested_bounds(),
return_test_statistics=True,
)[-1][0]
test_statistic.append(pyhf.tensorlib.tolist(q_mu))
# compare to NumPy/SciPy
test_statistic = np.array(test_statistic)
numpy_ratio = np.divide(test_statistic, test_statistic[0])
numpy_ratio_delta_unity = np.absolute(np.subtract(numpy_ratio, 1))
# compare tensor libraries to each other
tensors_ratio = np.divide(test_statistic[1], test_statistic[2])
tensors_ratio_delta_unity = np.absolute(np.subtract(tensors_ratio, 1))
try:
assert (numpy_ratio_delta_unity < tolerance['numpy']).all()
except AssertionError:
print(
'Ratio to NumPy+SciPy exceeded tolerance of {}: {}'.format(
tolerance['numpy'], numpy_ratio_delta_unity.tolist()
),
axis=0,
)
constraint_term = tensorlib.poisson_logpdf(thisauxdata, paralphas)
summands = (
constraint_term
if summands is None
else tensorlib.concatenate([summands, constraint_term])
)
return tensorlib.sum(summands) if summands is not None else 0
def fast(self, auxdata, pars):
return self.constraint_logpdf(auxdata, pars)
auxd = pyhf.tensorlib.astensor(m.config.auxdata)
pars = pyhf.tensorlib.astensor(m.config.suggested_init())
slow_result = pyhf.tensorlib.tolist(slow(m, auxd, pars))
fast_result = pyhf.tensorlib.tolist(fast(m, auxd, pars))
assert pytest.approx(slow_result) == fast_result
assert pdf.spec['channels'][0]['samples'][1]['modifiers'][0]['type'] == 'lumi'
assert pdf.spec['channels'][0]['samples'][2]['modifiers'][0]['type'] == 'lumi'
assert pdf.spec['channels'][0]['samples'][2]['modifiers'][1]['type'] == 'staterror'
assert pdf.spec['channels'][0]['samples'][2]['modifiers'][1]['data'] == [0, 10.0]
assert pdf.spec['channels'][0]['samples'][1]['modifiers'][1]['type'] == 'staterror'
assert all(
np.isclose(
pdf.spec['channels'][0]['samples'][1]['modifiers'][1]['data'], [5.0, 0.0]
)
)
assert pdf.expected_actualdata(
pyhf.tensorlib.astensor(pdf.config.suggested_init())
).tolist() == [120.0, 110.0]
assert pdf.config.auxdata_order == sorted(
['lumi', 'syst1', 'staterror_channel1', 'syst2', 'syst3']
)
assert data == [122.0, 112.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0]
pars = pdf.config.suggested_init()
pars[pdf.config.par_slice('SigXsecOverSM')] = [2.0]
assert pdf.expected_data(pars, include_auxdata=False).tolist() == [140, 120]
def test_optim_uncerts(backend, source, spec, mu):
pdf = pyhf.Model(spec)
data = source['bindata']['data'] + pdf.config.auxdata
init_pars = pdf.config.suggested_init()
par_bounds = pdf.config.suggested_bounds()
optim = pyhf.optimizer
result = optim.minimize(pyhf.infer.mle.twice_nll, data, pdf, init_pars, par_bounds)
assert pyhf.tensorlib.tolist(result)
result = optim.minimize(
pyhf.infer.mle.twice_nll,
data,
pdf,
init_pars,
par_bounds,
[(pdf.config.poi_index, mu)],
return_uncertainties=True,
)
assert result.shape[1] == 2
assert pyhf.tensorlib.tolist(result)
def validate_hypotest(pdf, data, mu_test, expected_result, tolerance=1e-6):
init_pars = pdf.config.suggested_init()
par_bounds = pdf.config.suggested_bounds()
CLs_obs, CLs_exp_set = pyhf.infer.hypotest(
mu_test,
data,
pdf,
init_pars,
par_bounds,
return_expected_set=True,
qtilde=False,
)
assert abs(CLs_obs - expected_result['obs']) / expected_result['obs'] < tolerance
for result, expected in zip(CLs_exp_set, expected_result['exp']):
assert abs(result - expected) / expected < tolerance
def hypotest(pdf, data):
return pyhf.infer.hypotest(
1.0,
data,
pdf,
pdf.config.suggested_init(),
pdf.config.suggested_bounds(),
return_tail_probs=True,
return_expected=True,
return_expected_set=True,
return_test_statistics=True,
)