Skip to content
Snippets Groups Projects

[AGIPD][TESTs]test_agipdlib AGIPDCtrl and get_bias_voltage for AGIPD1M and AGIPD500K

Merged Karim Ahmed requested to merge test/test_agipdlib into master
All threads resolved!
1 file
+ 26
24
Compare changes
  • Side-by-side
  • Inline
+ 26
24
@@ -40,7 +40,7 @@ def sequence_trains(train_ids, trains_per_sequence=256):
per sequence, 256 by default.
Yields:
(int, array_like) Current sequence ID, train mask.
(int, slice) Current sequence ID, train mask.
"""
num_trains = len(train_ids)
@@ -61,7 +61,7 @@ def sequence_pulses(train_ids, pulses_per_train=1, pulse_offsets=None,
train. If scalar, it is assumed to be constant for all
trains. If omitted, it is 1 by default.
pulse_offsets (array_like, optional): Offsets for the first
pulse in each train, computed from pulses_per_train of
pulse in each train, computed from pulses_per_train if
omitted.
trains_per_sequence (int, optional): Number of trains
per sequence, 256 by default.
@@ -156,7 +156,8 @@ class DataFile(h5py.File):
return self
def create_index(self, train_ids, timestamp=None, flag=None, origin=None):
def create_index(self, train_ids, timestamps=None, flags=None,
origins=None):
"""Create global INDEX datasets.
These datasets are agnostic of any source and describe the
@@ -166,9 +167,9 @@ class DataFile(h5py.File):
train_ids (array_like): Train IDs contained in this file.
timestamps (array_like, optional): Timestamp of each train,
0 if omitted.
flag (array_like, optional): Whether the time server is the
flags (array_like, optional): Whether the time server is the
initial origin of each train, 1 if omitted.
origin (array_like, optional): Which source is the initial
origins (array_like, optional): Which source is the initial
origin of each train, -1 (time server) if omitted.
Returns:
@@ -177,26 +178,27 @@ class DataFile(h5py.File):
self.create_dataset('INDEX/trainId', data=train_ids, dtype=np.uint64)
if timestamp is None:
timestamp = np.zeros_like(train_ids, dtype=np.uint64)
elif len(timestamp) != len(train_ids):
raise ValueError('timestamp and train_ids must be same length')
if timestamps is None:
timestamps = np.zeros_like(train_ids, dtype=np.uint64)
elif len(timestamps) != len(train_ids):
raise ValueError('timestamps and train_ids must be same length')
self.create_dataset('INDEX/timestamp', data=timestamp, dtype=np.uint64)
self.create_dataset('INDEX/timestamp', data=timestamps,
dtype=np.uint64)
if flag is None:
flag = np.ones_like(train_ids, dtype=np.int32)
elif len(flag) != len(train_ids):
raise ValueError('flag and train_ids must be same length')
if flags is None:
flags = np.ones_like(train_ids, dtype=np.int32)
elif len(flags) != len(train_ids):
raise ValueError('flags and train_ids must be same length')
self.create_dataset('INDEX/flag', data=flag, dtype=np.int32)
self.create_dataset('INDEX/flag', data=flags, dtype=np.int32)
if origin is None:
origin = np.full_like(train_ids, -1, dtype=np.int32)
elif len(origin) != len(train_ids):
raise ValueError('origin and train_ids must be same length')
if origins is None:
origins = np.full_like(train_ids, -1, dtype=np.int32)
elif len(origins) != len(train_ids):
raise ValueError('origins and train_ids must be same length')
self.create_dataset('INDEX/origin', data=origin, dtype=np.int32)
self.create_dataset('INDEX/origin', data=origins, dtype=np.int32)
def create_control_source(self, source):
"""Create group for a control source ("slow data").
@@ -239,8 +241,8 @@ class DataFile(h5py.File):
def create_metadata(self, like=None, *,
creation_date=None, update_date=None, proposal=0,
run=None, sequence=None, daq_library='1.x',
karabo_framework='2.x', control_sources=[],
instrument_sources=[]):
karabo_framework='2.x', control_sources=(),
instrument_sources=()):
"""Create METADATA datasets.
Args:
@@ -392,7 +394,6 @@ class ControlSource(h5py.Group):
self.create_dataset(f'{key}/value', data=values)
self.create_dataset(f'{key}/timestamp', data=timestamps)
self.__nonempty_index = True
if run_entry is None:
run_entry = (values[0], timestamps[0])
@@ -441,11 +442,12 @@ class ControlSource(h5py.Group):
self.__run_group.create_dataset(
f'{key}/value', data=value, shape=shape, dtype=dtype)
self.__run_group.create_dataset(
f'{key}/timestamp', data=value, shape=shape, dtype=np.uint64)
f'{key}/timestamp', data=timestamp, shape=shape, dtype=np.uint64)
def create_index(self, num_trains):
"""Create source-specific INDEX datasets.
Depending on whether this source has train-varying data or not,
different count/first datasets are written.
Loading