From 6758acdb9a78fd00df8f7cecc222227ba70269e8 Mon Sep 17 00:00:00 2001 From: Alexander Held Date: Wed, 7 Sep 2022 19:08:53 +0200 Subject: [PATCH 1/2] support customizable POI name for significance --- src/cabinetry/fit/__init__.py | 18 +++++++++++++++++- tests/fit/test_fit.py | 21 +++++++++++++++++++-- 2 files changed, 36 insertions(+), 3 deletions(-) diff --git a/src/cabinetry/fit/__init__.py b/src/cabinetry/fit/__init__.py index bafa0b46..2c1cee6e 100644 --- a/src/cabinetry/fit/__init__.py +++ b/src/cabinetry/fit/__init__.py @@ -1040,6 +1040,7 @@ def significance( model: pyhf.pdf.Model, data: List[float], *, + poi_name: Optional[str] = None, init_pars: Optional[List[float]] = None, fix_pars: Optional[List[bool]] = None, par_bounds: Optional[List[Tuple[float, float]]] = None, @@ -1054,6 +1055,8 @@ def significance( Args: model (pyhf.pdf.Model): model to use in fits data (List[float]): data (including auxdata) the model is fit to + poi_name (Optional[str], optional): significance is calculated for this + parameter, defaults to None (use POI specified in workspace) init_pars (Optional[List[float]], optional): list of initial parameter settings, defaults to None (use ``pyhf`` suggested inits) fix_pars (Optional[List[bool]], optional): list of booleans specifying which @@ -1080,7 +1083,17 @@ def significance( ), ) - log.info("calculating discovery significance") + # use POI given by kwarg, fall back to POI specified in model + poi_index = model_utils._poi_index(model, poi_name=poi_name) + if poi_index is None: + raise ValueError("no POI specified, cannot calculate significance") + + # set POI name in model config to desired value, hypotest will pick this up + # save original value to reset model later + original_model_poi_name = model.config.poi_name + model.config.set_poi(model.config.par_names()[poi_index]) + + log.info(f"calculating discovery significance for {model.config.poi_name}") obs_p_val, exp_p_val = pyhf.infer.hypotest( 0.0, data, @@ -1096,6 +1109,9 @@ def significance( obs_significance = scipy.stats.norm.isf(obs_p_val, 0, 1) exp_significance = scipy.stats.norm.isf(exp_p_val, 0, 1) + # set POI in model back to original values + model.config.set_poi(original_model_poi_name) + if obs_p_val >= 1e-3: log.info(f"observed p-value: {obs_p_val:.3%}") else: diff --git a/tests/fit/test_fit.py b/tests/fit/test_fit.py index d9fd8913..b2eb07dd 100644 --- a/tests/fit/test_fit.py +++ b/tests/fit/test_fit.py @@ -863,13 +863,18 @@ def test_significance(example_spec_with_background): # reduce signal for larger expected p-value example_spec_with_background["channels"][0]["samples"][0]["data"] = [30] - # Asimov dataset, observed = expected + # Asimov dataset, observed = expected, POI removed from measurement config + example_spec_with_background["measurements"][0]["config"]["poi"] = "" model, data = model_utils.model_and_data(example_spec_with_background, asimov=True) - significance_results = fit.significance(model, data) + assert model.config.poi_index is None # no POI set before calculation + assert model.config.poi_name is None + significance_results = fit.significance(model, data, poi_name="Signal strength") assert np.allclose(significance_results.observed_p_value, 0.02062714) assert np.allclose(significance_results.observed_significance, 2.04096523) assert np.allclose(significance_results.expected_p_value, 0.02062714) assert np.allclose(significance_results.expected_significance, 2.04096523) + assert model.config.poi_index is None # model config is preserved + assert model.config.poi_name is None # init/fixed pars, par bounds model, data = model_utils.model_and_data(example_spec_with_background) @@ -877,6 +882,7 @@ def test_significance(example_spec_with_background): fit.significance( model, data, + poi_name="Signal strength", init_pars=[0.9, 1.0], fix_pars=[False, True], par_bounds=[(0, 5), (0.1, 10.0)], @@ -894,6 +900,17 @@ def test_significance(example_spec_with_background): ) ] + # no POI specified anywhere + with pytest.raises( + ValueError, match="no POI specified, cannot calculate significance" + ): + fit.significance(model, data) + + # add POI back to model and reset backend for testing optimizer customization + example_spec_with_background["measurements"][0]["config"]["poi"] = "Signal strength" + model, data = model_utils.model_and_data(example_spec_with_background) + pyhf.set_backend("numpy", "scipy") + # default strategy/maxiter/tolerance with mock.patch("pyhf.set_backend") as mock_backend: fit.significance(model, data) From 6a968530e68927ad9662d559e99a53101aaad918 Mon Sep 17 00:00:00 2001 From: Alexander Held Date: Wed, 7 Sep 2022 19:09:05 +0200 Subject: [PATCH 2/2] empty commit