From d0c6dd2d4341ebb6e39dceccea3525e994b07955 Mon Sep 17 00:00:00 2001 From: Mathis Frahm Date: Wed, 14 Aug 2024 13:21:14 +0200 Subject: [PATCH 1/6] update get_das_info to include empty and broken files --- scripts/get_das_info.py | 94 +++++++++++++++++++++++++++++++++-------- 1 file changed, 76 insertions(+), 18 deletions(-) diff --git a/scripts/get_das_info.py b/scripts/get_das_info.py index ac7ebe74..a529406d 100644 --- a/scripts/get_das_info.py +++ b/scripts/get_das_info.py @@ -38,6 +38,10 @@ def convert_default(data: dict, placeholder="PLACEHOLDER") -> str: keys=[ "{data['name']}", # noqa ], + aux={{ + "broken_files": {data['broken_files']}, + "empty_files": {data['empty_files']}, + }} n_files={data['nfiles']}, n_events={data['nevents']}, ) @@ -180,13 +184,11 @@ def convert_minimal(data: dict) -> str: } -def get_das_info( - dataset: str, -) -> dict: +def load_das_info(dataset: str, add_file_info: bool = False) -> dict: from law.util import interruptable_popen # call dasgoclient command - cmd = f"dasgoclient -query='dataset={dataset}' -json" + cmd = f"dasgoclient -query='{'file ' if add_file_info else ''}dataset={dataset}' -json" code, out, _ = interruptable_popen( cmd, shell=True, @@ -196,9 +198,24 @@ def get_das_info( if code != 0: raise Exception(f"dasgoclient query failed:\n{out}") infos = json.loads(out) + + return infos + + +def get_das_info( + dataset: str, + add_file_info: bool = False, +) -> dict: + infos = load_das_info(dataset, add_file_info=False) + info_of_interest = {"name": dataset} for info in infos: dataset_info = info["dataset"][0] + if "files_via_dataset" in info["das"]["services"][0]: + print("should not be called") + empty_files = list(filter(lambda x: x["file"][0]["nevents"] == 0, info)) + broken_files = list(filter(lambda x: x["file"][0]["is_file_valid"] == 0, info)) + # Get json format of single das_string gives multiple dictornaries with different info # Avoid to print multiple infos twice and ask specificly for the kew of interest if "dataset_info" in info["das"]["services"][0]: @@ -207,6 +224,58 @@ def get_das_info( info_of_interest["nfiles"] = dataset_info.get("nfiles", "") info_of_interest["nevents"] = dataset_info.get("nevents", "") + if add_file_info: + file_infos = load_das_info(dataset, add_file_info=True) + + empty_files = [ + info["file"][0]["name"] + for info in filter(lambda info: info["file"][0]["nevents"] == 0, file_infos) + ] + broken_files = [ + info["file"][0]["name"] + for info in filter(lambda info: info["file"][0]["is_file_valid"] == 0, file_infos) + ] + info_of_interest["empty_files"] = empty_files + info_of_interest["broken_files"] = broken_files + else: + info_of_interest["empty_files"] = "UNKNOWN" + info_of_interest["broken_files"] = "UNKNOWN" + + return info_of_interest + + +def new_get_das_info(dataset: str) -> dict: + info_of_interest = {"name": dataset} + + file_infos = load_das_info(dataset, add_file_info=True) + + info_of_interest["dataset_id"] = file_infos[0]["file"][0]["dataset_id"] + + empty_files_filter = lambda info: info["file"][0]["nevents"] == 0 + broken_files_filter = lambda info: info["file"][0]["is_file_valid"] == 0 + + good_files = list(filter(lambda x: not broken_files_filter(x) and not empty_files_filter(x), file_infos)) + + dataset_id = {info["file"][0]["dataset_id"] for info in good_files} + if len(dataset_id) == 1: + info_of_interest["dataset_id"] = dataset_id.pop() + else: + raise ValueError(f"Multiple dataset IDs ({dataset_id}) found for dataset {dataset}") + + info_of_interest["nfiles"] = len(good_files) + info_of_interest["nevents"] = sum(info["file"][0]["nevents"] for info in good_files) + + empty_files = [ + info["file"][0]["name"] + for info in filter(empty_files_filter, file_infos) + ] + broken_files = [ + info["file"][0]["name"] + for info in filter(broken_files_filter, file_infos) + ] + info_of_interest["empty_files"] = empty_files + info_of_interest["broken_files"] = broken_files + return info_of_interest @@ -215,8 +284,6 @@ def print_das_info( keys_of_interest: tuple | None = None, convert_function_str: str | None = None, ): - from law.util import interruptable_popen - # get the requested convert function convert_function = convert_functions[convert_function_str] @@ -224,7 +291,7 @@ def print_das_info( # set default keys of interest # NOTE: this attribute is currently not used keys_of_interest = keys_of_interest or ( - "name", "dataset_id", "nfiles", "nevents", + "name", "dataset_id", "nfiles", "nevents", "empty_files", "broken_files", ) wildcard = "*" in das_string @@ -234,22 +301,13 @@ def print_das_info( datasets.append(das_string) else: # using a wildcard leads to a different structer in json format - cmd = f"dasgoclient -query='dataset={das_string}' -json" - code, out, _ = interruptable_popen( - cmd, - shell=True, - stdout=subprocess.PIPE, - executable="/bin/bash", - ) - if code != 0: - raise Exception(f"dasgoclient query failed:\n{out}") - infos = json.loads(out) + infos = load_das_info(das_string, add_file_info=False) for info in infos: dataset_name = info.get("dataset", [])[0].get("name", "") datasets.append(dataset_name) for dataset in datasets: - info_of_interest = get_das_info(dataset) + info_of_interest = new_get_das_info(dataset) desired_output = convert_function(info_of_interest) print(desired_output) From f2350e1486d7331e3004613f1cd0bd88f4c9388a Mon Sep 17 00:00:00 2001 From: Mathis Frahm Date: Sat, 31 Aug 2024 14:32:04 +0200 Subject: [PATCH 2/6] improve verbosity of broken files --- scripts/get_das_info.py | 34 +++++++++++++++++++++++++++++----- 1 file changed, 29 insertions(+), 5 deletions(-) diff --git a/scripts/get_das_info.py b/scripts/get_das_info.py index a529406d..102d61ef 100644 --- a/scripts/get_das_info.py +++ b/scripts/get_das_info.py @@ -26,11 +26,33 @@ def get_generator_name(name: str) -> str: return "" +def get_broken_files_str(data: dict, n_spaces: int = 12) -> str: + """ + Function that returns a string represenatation of broken files + """ + + broken_files_list = [ + f'"{d}", # broken' for d in data["broken_files"] + ] + [ + f'"{d}", # empty' for d in data["empty_files"] if d not in data["broken_files"] + ] + + if not broken_files_list: + return "" + else: + return ( + f"\n{' '* n_spaces}" + + f"\n{' '* n_spaces}".join(broken_files_list) + + f"\n{' '* (n_spaces - 4)}" + ) + + def convert_default(data: dict, placeholder="PLACEHOLDER") -> str: """ Function that converts dataset info into one order Dataset per query """ generator = get_generator_name(data["name"]) + return f"""cpn.add_dataset( name="{placeholder}{generator}", id={data['dataset_id']}, @@ -39,10 +61,9 @@ def convert_default(data: dict, placeholder="PLACEHOLDER") -> str: "{data['name']}", # noqa ], aux={{ - "broken_files": {data['broken_files']}, - "empty_files": {data['empty_files']}, - }} - n_files={data['nfiles']}, + "broken_files": [{get_broken_files_str(data)}], + }}, + n_files={data['nfiles_good']}, # {data["nfiles"]}-{data["nfiles_bad"]} n_events={data['nevents']}, ) """ @@ -262,7 +283,8 @@ def new_get_das_info(dataset: str) -> dict: else: raise ValueError(f"Multiple dataset IDs ({dataset_id}) found for dataset {dataset}") - info_of_interest["nfiles"] = len(good_files) + info_of_interest["nfiles"] = len(file_infos) + info_of_interest["nfiles_good"] = len(good_files) info_of_interest["nevents"] = sum(info["file"][0]["nevents"] for info in good_files) empty_files = [ @@ -276,6 +298,8 @@ def new_get_das_info(dataset: str) -> dict: info_of_interest["empty_files"] = empty_files info_of_interest["broken_files"] = broken_files + info_of_interest["nfiles_bad"] = len(set(empty_files + broken_files)) + return info_of_interest From af30edd77d359115e867d0006f3d2fbbf3fa0d82 Mon Sep 17 00:00:00 2001 From: Mathis Frahm Date: Mon, 14 Oct 2024 11:56:07 +0200 Subject: [PATCH 3/6] add broken files info to all convert functions --- scripts/get_das_info.py | 59 +++++++++++++++++------------------------ 1 file changed, 24 insertions(+), 35 deletions(-) diff --git a/scripts/get_das_info.py b/scripts/get_das_info.py index 102d61ef..5afd5599 100644 --- a/scripts/get_das_info.py +++ b/scripts/get_das_info.py @@ -26,15 +26,15 @@ def get_generator_name(name: str) -> str: return "" -def get_broken_files_str(data: dict, n_spaces: int = 12) -> str: +def get_broken_files_str(data: dict, n_spaces: int = 20) -> str: """ Function that returns a string represenatation of broken files """ broken_files_list = [ - f'"{d}", # broken' for d in data["broken_files"] + f'"{d}", # broken # noqa: E501' for d in data["broken_files"] ] + [ - f'"{d}", # empty' for d in data["empty_files"] if d not in data["broken_files"] + f'"{d}", # empty # noqa: E501' for d in data["empty_files"] if d not in data["broken_files"] ] if not broken_files_list: @@ -53,28 +53,6 @@ def convert_default(data: dict, placeholder="PLACEHOLDER") -> str: """ generator = get_generator_name(data["name"]) - return f"""cpn.add_dataset( - name="{placeholder}{generator}", - id={data['dataset_id']}, - processes=[procs.{placeholder}], - keys=[ - "{data['name']}", # noqa - ], - aux={{ - "broken_files": [{get_broken_files_str(data)}], - }}, - n_files={data['nfiles_good']}, # {data["nfiles"]}-{data["nfiles_bad"]} - n_events={data['nevents']}, -) -""" - - -def convert_variation(data: dict, placeholder="PLACEHOLDER") -> str: - """ - Function that converts dataset info into one order Dataset per query. Stores the dataset info - in a dict with the dataset type as key. - """ - generator = get_generator_name(data["name"]) return f"""cpn.add_dataset( name="{placeholder}{generator}", id={data['dataset_id']}, @@ -82,9 +60,12 @@ def convert_variation(data: dict, placeholder="PLACEHOLDER") -> str: info=dict( nominal=DatasetInfo( keys=[ - "{data['name']}", # noqa + "{data['name']}", # noqa: E501 ], - n_files={data['nfiles']}, + aux={{ + "broken_files": [{get_broken_files_str(data)}], + }}, + n_files={data['nfiles_good']}, # {data["nfiles"]}-{data["nfiles_bad"]} n_events={data['nevents']}, ), ), @@ -153,9 +134,12 @@ def convert_top(data: dict, placeholder="PLACEHOLDER") -> str: info=dict( nominal=DatasetInfo( keys=[ - "{data['name']}", # noqa + "{data['name']}", # noqa: E501 ], - n_files={data['nfiles']}, + aux={{ + "broken_files": [{get_broken_files_str(data)}], + }}, + n_files={data['nfiles_good']}, # {data["nfiles"]}-{data["nfiles_bad"]} n_events={data['nevents']}, ), ), @@ -164,9 +148,12 @@ def convert_top(data: dict, placeholder="PLACEHOLDER") -> str: # comment out this dataset return f""" # {identifier}=DatasetInfo( # keys=[ - # "{data['name']}", # noqa + # "{data['name']}", # noqa: E501 # ], - # n_files={data['nfiles']}, + # aux={{ + # "broken_files": [{get_broken_files_str(data)}], + # }}, + # n_files={data['nfiles_good']}, # {data["nfiles"]}-{data["nfiles_bad"]} # n_events={data['nevents']}, # ),""" elif dataset_type == "ignore": @@ -175,9 +162,12 @@ def convert_top(data: dict, placeholder="PLACEHOLDER") -> str: # some known variation of the dataset return f""" {dataset_type}=DatasetInfo( keys=[ - "{data['name']}", # noqa + "{data['name']}", # noqa: E501 ], - n_files={data['nfiles']}, + aux={{ + "broken_files": [{get_broken_files_str(data)}], + }}, + n_files={data['nfiles_good']}, # {data["nfiles"]}-{data["nfiles_bad"]} n_events={data['nevents']}, ),""" @@ -193,12 +183,11 @@ def convert_minimal(data: dict) -> str: """ Function that only returns the dataset key + number of events. """ - return f"""{data['name']}\nFiles: {data['nfiles']}\nEvents: {data['nevents']}\n""" + return f"""{data['name']}\nFiles: {data['nfiles_good']}\nEvents: {data['nevents']}\n""" convert_functions = { "default": convert_default, - "variation": convert_variation, "keys": convert_keys, "top": convert_top, "minimal": convert_minimal, From 487e3adfce456f72069cc1584f176a83198050e5 Mon Sep 17 00:00:00 2001 From: Mathis Frahm Date: Mon, 14 Oct 2024 13:17:48 +0200 Subject: [PATCH 4/6] remove previous get_das_info function --- scripts/get_das_info.py | 46 ++--------------------------------------- 1 file changed, 2 insertions(+), 44 deletions(-) diff --git a/scripts/get_das_info.py b/scripts/get_das_info.py index 5afd5599..7733db2b 100644 --- a/scripts/get_das_info.py +++ b/scripts/get_das_info.py @@ -212,49 +212,7 @@ def load_das_info(dataset: str, add_file_info: bool = False) -> dict: return infos -def get_das_info( - dataset: str, - add_file_info: bool = False, -) -> dict: - infos = load_das_info(dataset, add_file_info=False) - - info_of_interest = {"name": dataset} - for info in infos: - dataset_info = info["dataset"][0] - if "files_via_dataset" in info["das"]["services"][0]: - print("should not be called") - empty_files = list(filter(lambda x: x["file"][0]["nevents"] == 0, info)) - broken_files = list(filter(lambda x: x["file"][0]["is_file_valid"] == 0, info)) - - # Get json format of single das_string gives multiple dictornaries with different info - # Avoid to print multiple infos twice and ask specificly for the kew of interest - if "dataset_info" in info["das"]["services"][0]: - info_of_interest["dataset_id"] = dataset_info.get("dataset_id", "") - elif "filesummaries" in info["das"]["services"][0]: - info_of_interest["nfiles"] = dataset_info.get("nfiles", "") - info_of_interest["nevents"] = dataset_info.get("nevents", "") - - if add_file_info: - file_infos = load_das_info(dataset, add_file_info=True) - - empty_files = [ - info["file"][0]["name"] - for info in filter(lambda info: info["file"][0]["nevents"] == 0, file_infos) - ] - broken_files = [ - info["file"][0]["name"] - for info in filter(lambda info: info["file"][0]["is_file_valid"] == 0, file_infos) - ] - info_of_interest["empty_files"] = empty_files - info_of_interest["broken_files"] = broken_files - else: - info_of_interest["empty_files"] = "UNKNOWN" - info_of_interest["broken_files"] = "UNKNOWN" - - return info_of_interest - - -def new_get_das_info(dataset: str) -> dict: +def get_das_info(dataset: str) -> dict: info_of_interest = {"name": dataset} file_infos = load_das_info(dataset, add_file_info=True) @@ -320,7 +278,7 @@ def print_das_info( datasets.append(dataset_name) for dataset in datasets: - info_of_interest = new_get_das_info(dataset) + info_of_interest = get_das_info(dataset) desired_output = convert_function(info_of_interest) print(desired_output) From 4fc4beca87b8eeb667130fede88427ed25179017 Mon Sep 17 00:00:00 2001 From: Mathis Frahm Date: Mon, 14 Oct 2024 13:18:24 +0200 Subject: [PATCH 5/6] add broken files aux entries to MuonEG in 2022 --- .../run3_2022_postEE_nano_v12/data.py | 70 +++++++++------ .../run3_2022_preEE_nano_v12/data.py | 90 +++++++++++-------- 2 files changed, 97 insertions(+), 63 deletions(-) diff --git a/cmsdb/campaigns/run3_2022_postEE_nano_v12/data.py b/cmsdb/campaigns/run3_2022_postEE_nano_v12/data.py index e343dcb6..35f3fd1c 100644 --- a/cmsdb/campaigns/run3_2022_postEE_nano_v12/data.py +++ b/cmsdb/campaigns/run3_2022_postEE_nano_v12/data.py @@ -4,6 +4,8 @@ CMS datasets from the 2022 post-EE data-taking campaign """ +from order import DatasetInfo + import cmsdb.processes as procs from cmsdb.campaigns.run3_2022_postEE_nano_v12 import campaign_run3_2022_postEE_nano_v12 as cpn @@ -118,44 +120,58 @@ cpn.add_dataset( name="data_muoneg_e", id=14783435, - is_data=True, processes=[procs.data_muoneg], - keys=[ - "/MuonEG/Run2022E-22Sep2023-v1/NANOAOD", # noqa - ], - n_files=29, - n_events=12873327, - aux={ - "era": "E", - }, + info=dict( + nominal=DatasetInfo( + keys=[ + "/MuonEG/Run2022E-22Sep2023-v1/NANOAOD", # noqa: E501 + ], + aux={ + "broken_files": [], + }, + n_files=29, # 29-0 + n_events=12873327, + ), + ), ) cpn.add_dataset( name="data_muoneg_f", id=14784482, - is_data=True, processes=[procs.data_muoneg], - keys=[ - "/MuonEG/Run2022F-22Sep2023-v1/NANOAOD", # noqa - ], - n_files=95, - n_events=38219969, - aux={ - "era": "F", - }, + info=dict( + nominal=DatasetInfo( + keys=[ + "/MuonEG/Run2022F-22Sep2023-v1/NANOAOD", # noqa: E501 + ], + aux={ + "broken_files": [ + "/store/data/Run2022F/MuonEG/NANOAOD/22Sep2023-v1/50000/4d76213a-ef14-411a-9558-559a6df3f978.root", # empty # noqa: E501 + "/store/data/Run2022F/MuonEG/NANOAOD/22Sep2023-v1/50000/4fb72196-3b02-4499-8f6c-a54e15692b32.root", # empty # noqa: E501 + ], + }, + n_files=93, # 95-2 + n_events=38219969, + ), + ), ) cpn.add_dataset( name="data_muoneg_g", id=14784485, - is_data=True, processes=[procs.data_muoneg], - keys=[ - "/MuonEG/Run2022G-22Sep2023-v1/NANOAOD", # noqa - ], - n_files=27, - n_events=6238527, - aux={ - "era": "G", - }, + info=dict( + nominal=DatasetInfo( + keys=[ + "/MuonEG/Run2022G-22Sep2023-v1/NANOAOD", # noqa: E501 + ], + aux={ + "broken_files": [ + "/store/data/Run2022G/MuonEG/NANOAOD/22Sep2023-v1/2520000/cd404eb6-8218-4787-b5ed-af6cd9fe3750.root", # empty # noqa: E501 + ], + }, + n_files=26, # 27-1 + n_events=6238527, + ), + ), ) diff --git a/cmsdb/campaigns/run3_2022_preEE_nano_v12/data.py b/cmsdb/campaigns/run3_2022_preEE_nano_v12/data.py index 8e0c414f..492d2746 100644 --- a/cmsdb/campaigns/run3_2022_preEE_nano_v12/data.py +++ b/cmsdb/campaigns/run3_2022_preEE_nano_v12/data.py @@ -4,6 +4,8 @@ CMS datasets from the 2022 pre-EE data-taking campaign """ +from order import DatasetInfo + import cmsdb.processes as procs from cmsdb.campaigns.run3_2022_preEE_nano_v12 import campaign_run3_2022_preEE_nano_v12 as cpn @@ -118,59 +120,75 @@ cpn.add_dataset( name="data_muoneg_a", id=14783289, - is_data=True, processes=[procs.data_muoneg], - keys=[ - "/MuonEG/Run2022A-22Sep2023-v1/NANOAOD", # noqa - ], - n_files=5, - n_events=12, - aux={ - "era": "A", - }, + info=dict( + nominal=DatasetInfo( + keys=[ + "/MuonEG/Run2022A-22Sep2023-v1/NANOAOD", # noqa: E501 + ], + aux={ + "broken_files": [ + "/store/data/Run2022A/MuonEG/NANOAOD/22Sep2023-v1/50000/9a127bdb-9522-4f49-b754-67bb9152c0b3.root", # empty # noqa: E501 + ], + }, + n_files=4, # 5-1 + n_events=12, + ), + ), ) cpn.add_dataset( name="data_muoneg_b", id=14784076, - is_data=True, processes=[procs.data_muoneg], - keys=[ - "/MuonEG/Run2022B-22Sep2023-v1/NANOAOD", # noqa - ], - n_files=7, - n_events=254803, - aux={ - "era": "B", - }, + info=dict( + nominal=DatasetInfo( + keys=[ + "/MuonEG/Run2022B-22Sep2023-v1/NANOAOD", # noqa: E501 + ], + aux={ + "broken_files": [ + "/store/data/Run2022B/MuonEG/NANOAOD/22Sep2023-v1/50000/947809ff-822e-4a3a-84a2-d3fe84fc2573.root", # empty # noqa: E501 + ], + }, + n_files=6, # 7-1 + n_events=254803, + ), + ), ) cpn.add_dataset( name="data_muoneg_c", id=14784125, - is_data=True, processes=[procs.data_muoneg], - keys=[ - "/MuonEG/Run2022C-22Sep2023-v1/NANOAOD", # noqa - ], - n_files=28, - n_events=15768439, - aux={ - "era": "C", - }, + info=dict( + nominal=DatasetInfo( + keys=[ + "/MuonEG/Run2022C-22Sep2023-v1/NANOAOD", # noqa: E501 + ], + aux={ + "broken_files": [], + }, + n_files=28, # 28-0 + n_events=15768439, + ), + ), ) cpn.add_dataset( name="data_muoneg_d", id=14784209, - is_data=True, processes=[procs.data_muoneg], - keys=[ - "/MuonEG/Run2022D-22Sep2023-v1/NANOAOD", # noqa - ], - n_files=16, - n_events=8007031, - aux={ - "era": "D", - }, + info=dict( + nominal=DatasetInfo( + keys=[ + "/MuonEG/Run2022D-22Sep2023-v1/NANOAOD", # noqa: E501 + ], + aux={ + "broken_files": [], + }, + n_files=16, # 16-0 + n_events=8007031, + ), + ), ) From de20331329a779b69a821878b1649588902a718f Mon Sep 17 00:00:00 2001 From: Mathis Frahm Date: Mon, 14 Oct 2024 13:22:22 +0200 Subject: [PATCH 6/6] add is_data and era info back to muoneg datasets --- .../campaigns/run3_2022_postEE_nano_v12/data.py | 12 ++++++++++++ cmsdb/campaigns/run3_2022_preEE_nano_v12/data.py | 16 ++++++++++++++++ 2 files changed, 28 insertions(+) diff --git a/cmsdb/campaigns/run3_2022_postEE_nano_v12/data.py b/cmsdb/campaigns/run3_2022_postEE_nano_v12/data.py index 35f3fd1c..63d3efe8 100644 --- a/cmsdb/campaigns/run3_2022_postEE_nano_v12/data.py +++ b/cmsdb/campaigns/run3_2022_postEE_nano_v12/data.py @@ -120,6 +120,7 @@ cpn.add_dataset( name="data_muoneg_e", id=14783435, + is_data=True, processes=[procs.data_muoneg], info=dict( nominal=DatasetInfo( @@ -133,11 +134,15 @@ n_events=12873327, ), ), + aux={ + "era": "E", + }, ) cpn.add_dataset( name="data_muoneg_f", id=14784482, + is_data=True, processes=[procs.data_muoneg], info=dict( nominal=DatasetInfo( @@ -154,11 +159,15 @@ n_events=38219969, ), ), + aux={ + "era": "F", + }, ) cpn.add_dataset( name="data_muoneg_g", id=14784485, + is_data=True, processes=[procs.data_muoneg], info=dict( nominal=DatasetInfo( @@ -174,4 +183,7 @@ n_events=6238527, ), ), + aux={ + "era": "G", + }, ) diff --git a/cmsdb/campaigns/run3_2022_preEE_nano_v12/data.py b/cmsdb/campaigns/run3_2022_preEE_nano_v12/data.py index 492d2746..b87b07dd 100644 --- a/cmsdb/campaigns/run3_2022_preEE_nano_v12/data.py +++ b/cmsdb/campaigns/run3_2022_preEE_nano_v12/data.py @@ -120,6 +120,7 @@ cpn.add_dataset( name="data_muoneg_a", id=14783289, + is_data=True, processes=[procs.data_muoneg], info=dict( nominal=DatasetInfo( @@ -135,11 +136,15 @@ n_events=12, ), ), + aux={ + "era": "A", + }, ) cpn.add_dataset( name="data_muoneg_b", id=14784076, + is_data=True, processes=[procs.data_muoneg], info=dict( nominal=DatasetInfo( @@ -155,11 +160,15 @@ n_events=254803, ), ), + aux={ + "era": "B", + }, ) cpn.add_dataset( name="data_muoneg_c", id=14784125, + is_data=True, processes=[procs.data_muoneg], info=dict( nominal=DatasetInfo( @@ -173,11 +182,15 @@ n_events=15768439, ), ), + aux={ + "era": "C", + }, ) cpn.add_dataset( name="data_muoneg_d", id=14784209, + is_data=True, processes=[procs.data_muoneg], info=dict( nominal=DatasetInfo( @@ -191,4 +204,7 @@ n_events=8007031, ), ), + aux={ + "era": "D", + }, )