diff --git a/src/exdf/data_reduction/method.py b/src/exdf/data_reduction/method.py index 1994c54b69db52578ea48d494d4a2052b145c0e8..436c8131572d8065c26b6b7ad6f2d9e3a9afa1af 100644 --- a/src/exdf/data_reduction/method.py +++ b/src/exdf/data_reduction/method.py @@ -8,6 +8,7 @@ from extra_data.read_machinery import select_train_ids log = getLogger('exdf.data_reduction.ReductionMethod') + train_sel = TypeVar('train_sel') entry_sel = TypeVar('entry_sel') index_exp = TypeVar('index_exp') diff --git a/src/exdf/write/datafile.py b/src/exdf/write/datafile.py index cc8a9430c9338ef54af286dff38435852db27701..32fc13d06f65a77445d1b37400dcd52933ece5fd 100644 --- a/src/exdf/write/datafile.py +++ b/src/exdf/write/datafile.py @@ -484,6 +484,7 @@ class ControlSource(Source): timestamp for the corresponding value in the RUN section. The first entry for the train values is used if omitted. No run key is created if exactly False. + attrs (dict, optional): Attributes to add to this key. Returns: None @@ -614,6 +615,7 @@ class InstrumentSource(Source): slashes. data (array_like, optional): Key data to initialize the dataset to. + attrs (dict, optional): Attributes to add to this key. kwargs: Any additional keyword arguments are passed to create_dataset. @@ -645,6 +647,7 @@ class InstrumentSource(Source): key (str): Source key, dots are automatically replaced by slashes. data (np.ndarray): Key data.ss + attrs (dict, optional): Attributes to add to this key. comp_threads (int, optional): Number of threads to use for compression, 8 by default. diff --git a/src/exdf/write/sd_writer.py b/src/exdf/write/sd_writer.py index 0ea9e4595feede6cbf81062c141830b5dd813430..cdeae23d4ea6831272b3ca5f1c2f824f2d4215c9 100644 --- a/src/exdf/write/sd_writer.py +++ b/src/exdf/write/sd_writer.py @@ -430,6 +430,7 @@ def get_key_attributes(sd): return source_attrs + def iter_index_group_keys(keys, index_group): for key in keys: if key[:key.index('.')] == index_group: @@ -446,11 +447,11 @@ def mask_index(g, counts, masks_by_train): # Modify INDEX entry if necessary. if full_mask.sum() != num_entries: g.create_dataset( - f'original/first', data=get_pulse_offsets(counts)) + 'original/first', data=get_pulse_offsets(counts)) g.create_dataset( - f'original/count', data=counts) + 'original/count', data=counts) g.create_dataset( - f'original/position', + 'original/position', data=np.concatenate([np.flatnonzero(mask) for mask in masks_by_train]))