diff --git a/setup.py b/setup.py index 2f3602f6644feb2eb13f11b27db89ba4f4b30247..0f50a792a95a3f929b5767bf8245701d7e6a6b82 100644 --- a/setup.py +++ b/setup.py @@ -51,10 +51,11 @@ setup( ], "calng.correction_addon": [ "IntegratedIntensity = calng.correction_addons.integrated_intensity:IntegratedIntensity", # noqa - "LitPixelCounter = calng.correction_addons.litpixel_counter:LitPixelCounter [agipd]", # noqa + "LitPixelCounter = calng.correction_addons.litpixel_counter:LitPixelCounter", # noqa "Peakfinder9 = calng.correction_addons.peakfinder9:Peakfinder9", # noqa "RandomFrames = calng.correction_addons.random_frames:RandomFrames", "SaturationMonitor = calng.correction_addons.saturation_monitor:SaturationMonitor",# noqa + "Autocorrelation = calng.correction_addons.autocorrelation:Autocorrelation", # noqa ], "calng.arbiter_kernel": [ "Assign = calng.arbiter_kernels.base_kernel:Assign", diff --git a/src/calng/CalibrationManager.py b/src/calng/CalibrationManager.py index 93d4347aa48f0258a4c75b080f1bd2b15082d645..f0a6d192f7dd6473f5d8fa400cd269c712295209 100644 --- a/src/calng/CalibrationManager.py +++ b/src/calng/CalibrationManager.py @@ -24,7 +24,8 @@ from karabo.middlelayer import ( KaraboError, Device, DeviceClientBase, Descriptor, Hash, Configurable, Slot, Node, Type, Schema, ProxyFactory, AccessMode, AccessLevel, Assignment, DaqPolicy, State, Unit, - UInt16, UInt32, Bool, Float, Double, String, VectorString, VectorHash, + Int16, UInt16, UInt32, Bool, Float, Double, String, + VectorString, VectorHash, background, call, callNoWait, setNoWait, sleep, instantiate, slot, coslot, get_property, getDevice, getSchema, getTopology, getConfiguration, getConfigurationFromPast, getConfigurationFromName, getInstanceInfo, @@ -123,6 +124,14 @@ class ModuleRow(Configurable): defaultValue='', accessMode=AccessMode.RECONFIGURABLE) + gpuIndex = Int16( + displayedName='GPU to use', + defaultValue=-1, + description='For GPU correction devices on nodes with multiple GPUs, ' + 'select which GPU (0-indexed) this device should use. Leave at -1 for ' + 'automatic distribution over available GPUs', + accessMode=AccessMode.RECONFIGURABLE) + class ModuleGroupRow(Configurable): group = UInt32( @@ -182,6 +191,11 @@ class DeviceServerRow(Configurable): defaultValue='http://', accessMode=AccessMode.RECONFIGURABLE) + numGpus = UInt16( + displayedName='Number of GPUs', + defaultValue=1, + accessMode=AccessMode.RECONFIGURABLE) + class WebserverApiNode(Configurable): statePollInterval = Double( @@ -1087,7 +1101,7 @@ class CalibrationManager(DeviceClientBase, Device): return True # Mapping of servers to device servers. - self._server_hosts = {server: host for server, host + self._server_hosts = {server: host for server, host, _ in self.deviceServers.value} # Mapping of "Karabo names" (with capitalization and slashes) @@ -1314,10 +1328,21 @@ class CalibrationManager(DeviceClientBase, Device): modules_by_group = defaultdict(list) correct_device_id_by_module = {} input_source_by_module = {} + gpu_load_by_server = {server: [0] * num_gpus for server, _, num_gpus + in self.deviceServers.value} awaitables = [] for index, row in enumerate(self.modules.value): - vname, group, aggregator, input_channel, input_source = row + vname, group, aggregator, input_channel, input_source, \ + gpu_index = row + server = server_by_group[group] + gpu_loads = gpu_load_by_server[server] + + if gpu_index < 0: + # crude load balancing + gpu_index = min(enumerate(gpu_loads), + key=lambda pair: pair[1])[0] + gpu_loads[gpu_index] += 1 modules_by_group[group].append(vname) device_id = self._device_id_templates['correction'].format( @@ -1339,11 +1364,13 @@ class CalibrationManager(DeviceClientBase, Device): input_source_by_module[vname] = input_source config = Hash( + 'managerDevice', self.deviceId.value, 'constantParameters.detectorName', detector_id, 'constantParameters.karaboDa', aggregator, 'dataInput.connectedOutputChannels', [input_channel], 'fastSources', [input_source], - 'geometryDevice', self.geometryDevice.value + 'geometryDevice', self.geometryDevice.value, + 'gpuIndex', gpu_index ) # Add managed keys. @@ -1354,7 +1381,7 @@ class CalibrationManager(DeviceClientBase, Device): config[key] = value.value awaitables.append(self._instantiate_device( - server_by_group[group], + server, self._class_ids['correction'], device_id, config @@ -1378,7 +1405,8 @@ class CalibrationManager(DeviceClientBase, Device): correct_device_id_by_module[vname]) ) for vname in modules_by_group[group]] - config = Hash('sources', sources) + config = Hash('managerDevice', self.deviceId.value, + 'sources', sources) if with_bridge: config['zmqConfig'] = [ @@ -1399,7 +1427,8 @@ class CalibrationManager(DeviceClientBase, Device): 'source', f'{device_id}:preview.{preview_name}.output') for (_, device_id) in correct_device_id_by_module.items()] - config = Hash('sources', sources, + config = Hash('managerDevice', self.deviceId.value, + 'sources', sources, 'geometryDevice', self.geometryDevice.value) for remote_key, local_key, _ in self._managed_assembler_keys: diff --git a/src/calng/DetectorAssembler.py b/src/calng/DetectorAssembler.py index 53fb2b62a2bf82e16d2595c7a5c09b4ce49877e4..36e92e55d0ae4505fcd3170ad6e7fe4859f2dd13 100644 --- a/src/calng/DetectorAssembler.py +++ b/src/calng/DetectorAssembler.py @@ -85,6 +85,13 @@ class DetectorAssembler(TrainMatcher.TrainMatcher): .setNewDefaultValue(["overview", "trainMatcherScene"]) .commit(), + STRING_ELEMENT(expected) + .key("managerDevice") + .displayedName("Manager device") + .assignmentInternal() + .defaultValue("") + .commit(), + DOUBLE_ELEMENT(expected) .key("timeOfFlight") .displayedName("Time of flight") @@ -227,7 +234,6 @@ class DetectorAssembler(TrainMatcher.TrainMatcher): geom_utils.subscribe_to_geometry_bound( self, self.get("geometryDevice"), _on_geometry ) - print("done setting up thing", self._geometry) if self._geometry is None: # initial get failed self.set("geometryState", "ERROR") diff --git a/src/calng/FrameSelectionArbiter.py b/src/calng/FrameSelectionArbiter.py index c3ddbd7f3f8f317845b03cd5822e421e22271e2c..53ef68e4571e7208efee5b919932fc7b2d63bb91 100644 --- a/src/calng/FrameSelectionArbiter.py +++ b/src/calng/FrameSelectionArbiter.py @@ -1,4 +1,5 @@ import enum +import re from importlib.metadata import entry_points import numpy as np @@ -31,6 +32,7 @@ from .utils import WarningContextSystem class DeviceWarning(enum.Enum): PLAN = "plan" DECISION = "decision" + REDUCTION = "reduction" def output_schema(): @@ -115,6 +117,47 @@ def selection_plan_schema(kernels): return schema +def reduction_classes_schema(): + schema = Schema() + ( + BOOL_ELEMENT(schema) + .key("enable") + .displayedName("Enable") + .assignmentOptional().defaultValue(False) + .reconfigurable() + .commit(), + + STRING_ELEMENT(schema) + .key("operation") + .displayedName("Operation") + .options("sum,count,min,max,sumsq") + .assignmentOptional().defaultValue("sum") + .reconfigurable() + .commit(), + + STRING_ELEMENT(schema) + .key("argument") + .displayedName("Key to reduce") + .assignmentOptional().defaultValue("image.data") + .reconfigurable() + .commit(), + + STRING_ELEMENT(schema) + .key("selection") + .displayedName("Selection Name") + .assignmentOptional().defaultValue("") + .reconfigurable() + .commit(), + + STRING_ELEMENT(schema).key("result") + .displayedName("Result Key") + .assignmentOptional().defaultValue("sumImage") + .reconfigurable() + .commit(), + ) + return schema + + class BaseFrameSelectionArbiter(TrainMatcher.TrainMatcher): @staticmethod def expectedParameters(expected): @@ -308,6 +351,14 @@ class AdvancedFrameSelectionArbiter(BaseFrameSelectionArbiter): .reconfigurable() .commit(), + TABLE_ELEMENT(expected).key("frameSelection.reduction") + .displayedName("Reduction Classes") + .description("") + .setColumns(reduction_classes_schema()) + .assignmentOptional().defaultValue([]) + .reconfigurable() + .commit(), + STRING_ELEMENT(expected) .key("frameSelection.decision") .displayedName("Decision") @@ -319,6 +370,13 @@ class AdvancedFrameSelectionArbiter(BaseFrameSelectionArbiter): .key("frameSelection.selections") .displayedName("Selections") .commit(), + + STRING_ELEMENT(expected) + .key("matchers") + .displayedName("Matcher pattern") + .assignmentOptional().defaultValue("") + .reconfigurable() + .commit(), ) def __init__(self, config): @@ -326,20 +384,31 @@ class AdvancedFrameSelectionArbiter(BaseFrameSelectionArbiter): self._selection_steps = {} # name -> (kernel class, preselection expression) self._selection_kernels = {} # name -> kernel instance self._decision_expr = BinaryExpression(None) + self._reduction_classes = {} # result key -> (operation, source key, name) + self._serialized_reduction_classes = [] + self._matchers = set() def initialization(self): super().initialization() + self._set_matcher_pattern(self["matchers"]) + self._validate_plan_and_update_schema(self["frameSelection.plan"]) self._initialize_kernels() self._configure_decision(self["frameSelection.decision"]) geom_utils.subscribe_to_geometry_bound( self, self.get("geometryDevice"), on_geometry=self._on_geometry ) + self._validate_reduction(self["frameSelection.reduction"]) if self["state"] != State.ERROR: self.start() # Auto-start this type of matcher. + def preDestruction(self): + super().preDestruction() + for matcher_id in self._matchers: + self.call(matcher_id, "receive_reduction_classes", []) + def start(self): super().start() self.warning_context.on_success["state"] = "ACTIVE" @@ -395,6 +464,30 @@ class AdvancedFrameSelectionArbiter(BaseFrameSelectionArbiter): self.info["trainId"] = train_id self.rate_out.update() + def on_connect(self, connections): + new_matchers = set() + for remote in connections: + matcher_id = remote["remoteId"].partition(':')[0] + if not self._matcher_pattern.match(matcher_id): + continue + if matcher_id not in self._matchers: + # send reduction configuration to matcher + self.call(matcher_id, "receive_reduction_classes", + self._serialized_reduction_classes) + new_matchers.add(matcher_id) + for matcher_id in self._matchers - new_matchers: + # maybe check if the matcher is still alive? + self.call(matcher_id, "receive_reduction_classes", []) + self._matchers = new_matchers + + def _set_matcher_pattern(self, matchers): + if not matchers: + matchers = self.getInstanceId().rpartition('/')[0] + r"/MATCH_G\d" + self.set("matchers", matchers) + self._matcher_pattern = re.compile(matchers) + connections = self["output.connections"] + self.on_connect(connections) + def _process_selection_steps(self, sources, train_id, num_frames, out_hash): res = {} for selection_name, kernel in self._selection_kernels.items(): @@ -423,6 +516,7 @@ class AdvancedFrameSelectionArbiter(BaseFrameSelectionArbiter): except Exception as ex: warn(f"Kernel for {prefix} failed: {ex}") res[selection_name] = np.ones(num_frames, dtype=bool) + out_hash[f"{selection_name}.mask"] = list(map(bool, res[selection_name])) return res def _configure_decision(self, decision_string): @@ -501,6 +595,17 @@ class AdvancedFrameSelectionArbiter(BaseFrameSelectionArbiter): kernel_class.extend_device_schema( schema_update, kernel_prefix ) + ( + NODE_ELEMENT(output_schema_update) + .key(selection_name) + .commit(), + + VECTOR_BOOL_ELEMENT(output_schema_update) + .key(f"{selection_name}.mask") + .assignmentOptional() + .defaultValue([]) + .commit(), + ) kernel_class.extend_output_schema(output_schema_update, selection_name) new_selection_steps[selection_name] = (kernel_class, preselection) @@ -512,6 +617,42 @@ class AdvancedFrameSelectionArbiter(BaseFrameSelectionArbiter): ) self._selection_steps = new_selection_steps self.updateSchema(schema_update) + self.output = self._ss.getOutputChannel('output') + self.output.registerShowConnectionsHandler(self.on_connect) + + def _validate_reduction(self, classes): + new_reduction_classes = {} + with self.warning_context("state", DeviceWarning.REDUCTION) as warn: + for redu_cls in classes: + if not redu_cls["enable"]: + continue + + op = redu_cls["operation"] + arg = redu_cls["argument"].strip() + if not all(n.isidentifier() for n in arg.split('.')): + warn(f"Invalid source key '{arg}'") + continue + + name = redu_cls["selection"].strip() + if name not in self._selection_steps: + warn(f"Name '{name}' is not found among selection steps") + continue + + result = redu_cls["result"].strip() + if not result.isidentifier(): + warn(f"Invalid result key '{result}'") + continue + + new_reduction_classes[result] = (op, arg, name) + self._reduction_classes = new_reduction_classes + self._serialized_reduction_classes = [ + f"{name}:{op}:{arg}:{result}" + for result, (op, arg, name) in self._reduction_classes.items() + ] + # notify all connected matchers + for matcher_id in self._matchers: + self.call(matcher_id, "receive_reduction_classes", + self._serialized_reduction_classes) def _initialize_kernels(self): # instantiate kernels for selections @@ -539,12 +680,18 @@ class AdvancedFrameSelectionArbiter(BaseFrameSelectionArbiter): if conf.has("frameSelection.decision"): self._configure_decision(conf["frameSelection.decision"]) + if conf.has("frameSelection.reduction"): + self._validate_reduction(conf["frameSelection.reduction"]) + if conf.has("frameSelection.selections"): for name, kernel in self._selection_kernels.items(): kernel_prefix = f"frameSelection.selections.{name}" if conf.has(kernel_prefix): kernel.reconfigure(conf[kernel_prefix]) + if conf.has("matchers"): + self._set_matcher_pattern(conf["matchers"]) + def _on_geometry(self, geometry): for kernel in self._selection_kernels.values(): kernel.on_new_geometry(geometry) diff --git a/src/calng/ShmemTrainMatcher.py b/src/calng/ShmemTrainMatcher.py index d99df8d0d8beb7791bb297678ce96694eeba0d6c..6c8c6e0b6c073bc406baca38b27436226f00bee5 100644 --- a/src/calng/ShmemTrainMatcher.py +++ b/src/calng/ShmemTrainMatcher.py @@ -7,6 +7,7 @@ from karabo.bound import ( DOUBLE_ELEMENT, KARABO_CLASSINFO, OVERWRITE_ELEMENT, + STRING_ELEMENT, UINT32_ELEMENT, ChannelMetaData, Hash, @@ -26,6 +27,13 @@ class ShmemTrainMatcher(TrainMatcher.TrainMatcher): @staticmethod def expectedParameters(expected): ( + STRING_ELEMENT(expected) + .key("managerDevice") + .displayedName("Manager device") + .assignmentInternal() + .defaultValue("") + .commit(), + # order is important for stacking, disable sorting OVERWRITE_ELEMENT(expected) .key("sortSources") @@ -88,7 +96,7 @@ class ShmemTrainMatcher(TrainMatcher.TrainMatcher): self._stacking_friend = StackingFriend( self, self.get("sources"), self.get("merge") ) - self._frameselection_friend = FrameselectionFriend(self.get("frameSelector")) + self._frameselection_friend = FrameselectionFriend(self, self.get("frameSelector")) self._thread_pool = concurrent.futures.ThreadPoolExecutor( max_workers=self.get("processingThreads") ) @@ -118,19 +126,32 @@ class ShmemTrainMatcher(TrainMatcher.TrainMatcher): def on_matched_data(self, train_id, sources): ts_start = default_timer() + + # shared memory handlers + for source, (data_hash, timestamp) in sources.items(): + self._shmem_handler.dereference_shmem_handles(data_hash) + + # reduction operations + reduction_operations = ( + self._frameselection_friend.resolve_reduction_classes(sources)) + concurrent.futures.wait([ + self._thread_pool.submit(op) for op in reduction_operations]) + + # frame slicing frame_selection_mask = self._frameselection_friend.get_mask(sources) concurrent.futures.wait( [ self._thread_pool.submit( - self._handle_source, + self._frameselection_friend.apply_mask, source, data, - timestamp, frame_selection_mask, ) for source, (data, timestamp) in sources.items() ] ) + + # stacking self._stacking_friend.process(sources, self._thread_pool) # karabo output @@ -151,13 +172,3 @@ class ShmemTrainMatcher(TrainMatcher.TrainMatcher): self.info["sent"] += 1 self.info["trainId"] = train_id self.rate_out.update() - - def _handle_source( - self, - source, - data_hash, - timestamp, - frame_selection_mask, - ): - self._shmem_handler.dereference_shmem_handles(data_hash) - self._frameselection_friend.apply_mask(source, data_hash, frame_selection_mask) diff --git a/src/calng/base_calcat.py b/src/calng/base_calcat.py index 8e1ec49d6120c335a4b8628eac569f9c7d5396d7..195371db259ba1ca9e191d9109593aab65fde81c 100644 --- a/src/calng/base_calcat.py +++ b/src/calng/base_calcat.py @@ -356,6 +356,7 @@ class BaseCalcatFriend: .description( "Number of memory cells / frames per train. Relevant for burst mode." ) + .unit(Unit.COUNT) .assignmentOptional() .defaultValue(1) .reconfigurable() @@ -364,6 +365,7 @@ class BaseCalcatFriend: UINT32_ELEMENT(schema) .key("constantParameters.pixelsX") .displayedName("Pixels X") + .unit(Unit.COUNT) .assignmentOptional() .defaultValue(512) .commit(), @@ -371,6 +373,7 @@ class BaseCalcatFriend: UINT32_ELEMENT(schema) .key("constantParameters.pixelsY") .displayedName("Pixels Y") + .unit(Unit.COUNT) .assignmentOptional() .defaultValue(128) .commit(), @@ -380,6 +383,7 @@ class BaseCalcatFriend: .tags("managed") .displayedName("Bias voltage") .description("Sensor bias voltage") + .unit(Unit.VOLT) .assignmentOptional() .defaultValue(300) .reconfigurable() diff --git a/src/calng/base_correction.py b/src/calng/base_correction.py index 459ef83f37da4c27d289b9d0b71ff53f47252204..8fa0c4830fd8fb147174ac664b6f8327796cd918 100644 --- a/src/calng/base_correction.py +++ b/src/calng/base_correction.py @@ -26,6 +26,7 @@ from karabo.bound import ( OVERWRITE_ELEMENT, SLOT_ELEMENT, STRING_ELEMENT, + INT32_ELEMENT, UINT32_ELEMENT, UINT64_ELEMENT, VECTOR_STRING_ELEMENT, @@ -77,6 +78,13 @@ class BaseCorrection(PythonDevice): .setNewDefaultValue(State.INIT) .commit(), + STRING_ELEMENT(expected) + .key("managerDevice") + .displayedName("Manager device") + .assignmentInternal() + .defaultValue("") + .commit(), + INPUT_CHANNEL(expected) .key("dataInput") .commit(), @@ -159,6 +167,19 @@ class BaseCorrection(PythonDevice): .assignmentOptional() .defaultValue("") .commit(), + + INT32_ELEMENT(expected) + .key("gpuIndex") + .displayedName("GPU index to use") + .description( + "In case this device uses a GPU kernel runner and the node it runs on " + "has multiple GPUs available, use this index to control which of the " + "GPUs the device will run on. This is intended for the manager to load " + "balance multiple correction devices sharing big nodes." + ) + .assignmentOptional() + .defaultValue(0) + .commit(), ) ( @@ -545,9 +566,7 @@ class BaseCorrection(PythonDevice): addon_prefix = f"addons.{addon_class.__name__}" if not self.get(f"{addon_prefix}.enable"): continue - addon = addon_class(self._parameters[addon_prefix]) - addon._device = self - addon._prefix = addon_prefix + addon = addon_class(self, addon_prefix, self._parameters[addon_prefix]) self._enabled_addons.append(addon) if ( (self.get("useShmemHandles") != self._use_shmem_handles) @@ -886,6 +905,19 @@ class BaseCorrection(PythonDevice): image_data, cell_table, *additional_data ) data_hash["corrections"] = corrections + + # write previews first so addons cannot mess with them + with self.warning_context( + "processingState", WarningLampType.PREVIEW_SETTINGS + ) as warn: + self.preview_friend.write_outputs( + *previews, + timestamp=timestamp, + cell_table=cell_table, + pulse_table=pulse_table, + warn_fun=warn, + ) + for addon in self._enabled_addons: addon.post_correction( timestamp.getTrainId(), @@ -896,17 +928,6 @@ class BaseCorrection(PythonDevice): ) self.kernel_runner.reshape(processed_buffer, out=buffer_array) - with self.warning_context( - "processingState", WarningLampType.PREVIEW_SETTINGS - ) as warn: - self.preview_friend.write_outputs( - *previews, - timestamp=timestamp, - cell_table=cell_table, - pulse_table=pulse_table, - warn_fun=warn, - ) - for addon in self._enabled_addons: addon.post_reshape( timestamp.getTrainId(), buffer_array, cell_table, pulse_table, data_hash @@ -914,7 +935,8 @@ class BaseCorrection(PythonDevice): if self.unsafe_get("useShmemHandles"): data_hash.set(self._image_data_path, buffer_handle) - data_hash.set("calngShmemPaths", [self._image_data_path]) + shmem_paths = data_hash.get("calngShmemPaths", default=[]) + data_hash.set("calngShmemPaths", shmem_paths + [self._image_data_path]) else: data_hash.set(self._image_data_path, buffer_array) data_hash.set("calngShmemPaths", []) diff --git a/src/calng/base_kernel_runner.py b/src/calng/base_kernel_runner.py index 346308833bb79a83f9e1c8107d61e5b5a34dd9b2..55c85ccb7c2755666906cf3ce151656dd3034b9e 100644 --- a/src/calng/base_kernel_runner.py +++ b/src/calng/base_kernel_runner.py @@ -19,6 +19,7 @@ import jinja2 class BaseKernelRunner: + runner_type = None # subclass sets CPU / GPU _xp = None # subclass sets numpy or cupy num_pixels_ss = None # subclass must set num_pixels_fs = None # subclass must set @@ -129,6 +130,16 @@ class BaseKernelRunner: def __init__(self, device): self._device = device + if self.runner_type is KernelRunnerTypes.GPU: + import os + os.environ["CUDA_VISIBLE_DEVICES"] = str( + self._device.unsafe_get("gpuIndex") + ) + import cupy + self._xp = cupy + else: + import numpy + self._xp = numpy # for now, we depend on multiple keys from device hash, so get multiple nodes config = subset_of_hash( self._device._parameters, @@ -137,7 +148,6 @@ class BaseKernelRunner: "constantParameters", ) self._constant_memory_cells = config["constantParameters.memoryCells"] - self._pre_init() # note: does not handle one constant enabling multiple correction steps # (do we need to generalize "decide if this correction step is available"?) self._constant_to_correction_names = {} @@ -149,17 +159,6 @@ class BaseKernelRunner: self.reconfigure(config) self._post_init() - def _pre_init(self): - """Hook used to set up things which need to be in place before __init__ is - called. Note that __init__ will do reconfigure with full configuration, which - means creating constant buffers and such (__init__ already takes care to set - _constant_memory_cells so this can be used in _setup_constant_buffers). - - - See also _setup_constant_buffers - """ - pass - def _post_init(self): # can be used to set GPU / CPU-specific buffers in subclasses # without overriding __init__ diff --git a/src/calng/conditions/AgipdCondition.py b/src/calng/conditions/AgipdCondition.py index 4bf316033bc56615375b458cebae01bdea1fa32a..acd42e698b3cc85b3b07f9e1f5f963caa9aa5109 100644 --- a/src/calng/conditions/AgipdCondition.py +++ b/src/calng/conditions/AgipdCondition.py @@ -1,6 +1,14 @@ from karabo.middlelayer import AccessMode, Assignment, String from .. import base_condition -from ..corrections.AgipdCorrection import GainModes +from ..corrections.AgipdCorrection import GainMode, GainSetting + + +def gain_setting_translator(setting): + return GainSetting(setting).format_helpful() + + +def gain_mode_translator(mode): + return GainMode(mode).format_helpful() class AgipdCondition(base_condition.ConditionBase): @@ -21,12 +29,8 @@ class AgipdCondition(base_condition.ConditionBase): ("bunchStructure.repetitionRate", "acquisitionRate", None), # TODO: check if appropriate (agipdlib looks at image.cellId) ("bunchStructure.nPulses", "memoryCells", None), - ("gain", "gainSetting", None), - ( - "gainModeIndex", - "gainMode", - lambda i: GainModes(i).name, - ), + ("gain", "gainSetting", gain_setting_translator), + ("gainModeIndex", "gainMode", gain_mode_translator), ("integrationTime", "integrationTime", None), ], # observed voltages in calcat: diff --git a/src/calng/conditions/JungfrauCondition.py b/src/calng/conditions/JungfrauCondition.py index 30963793023deff7b8fa0fd01ada0f221992d36d..9c4e9266142abc184e957a7409518dfa4a280c6f 100644 --- a/src/calng/conditions/JungfrauCondition.py +++ b/src/calng/conditions/JungfrauCondition.py @@ -2,23 +2,23 @@ import operator from karabo.middlelayer import AccessMode, Assignment, String from .. import base_condition -from ..corrections.JungfrauCorrection import GainModes, GainSettings +from ..corrections.JungfrauCorrection import GainMode, GainSetting def gain_mode_translator(gain_mode_string): if gain_mode_string in {"dynamic", "forceswitchg1", "forceswitchg2"}: - return GainModes.ADAPTIVE_GAIN.name + return GainMode.ADAPTIVE_GAIN.format_helpful() elif gain_mode_string in {"fixg1", "fixg2"}: - return GainModes.FIXED_GAIN.name + return GainMode.FIXED_GAIN.format_helpful() else: raise ValueError(f"Unknown gain mode {gain_mode_string}") def gain_setting_translator(setting): if setting == "gain0": - return GainSettings.LOW_CDS.name + return GainSetting.LOW_CDS.format_helpful() elif setting == "highgain0": - return GainSettings.HIGH_CDS.name + return GainSetting.HIGH_CDS.format_helpful() else: raise ValueError(f"Unknown gain setting {setting}") @@ -41,6 +41,7 @@ class JungfrauCondition(base_condition.ConditionBase): "integrationTime", lambda n: n * 1e6, ), + ("exposureTimeout", "exposureTimeout", None), ("highVoltage", "biasVoltage", operator.itemgetter(0)), ("settings", "gainSetting", gain_setting_translator), ("gainMode", "gainMode", gain_mode_translator), diff --git a/src/calng/correction_addons/autocorrelation.py b/src/calng/correction_addons/autocorrelation.py new file mode 100644 index 0000000000000000000000000000000000000000..08abb8b910db607c7e8ea52a84d7eba54fd155a7 --- /dev/null +++ b/src/calng/correction_addons/autocorrelation.py @@ -0,0 +1,252 @@ +import h5py +import numpy as np +from calngUtils import shmem_utils +from karabo.bound import ( + DOUBLE_ELEMENT, INT32_ELEMENT, NDARRAY_ELEMENT, STRING_ELEMENT, + VECTOR_STRING_ELEMENT) + +from .. import base_kernel_runner +from .base_addon import BaseCorrectionAddon + + +def block(b): + return np.concatenate([np.concatenate(row, axis=-1) for row in b], axis=-2) + + +def autocorr2_fft(f, mode='full', backend='scipy'): + """Computes 2D autocorrelation function for real input. + + This is equivalent to + fftconvolve(f, np.flip(f, axis=(-2, -1)), mode) + """ + nx, ny = f.shape[-2:] + + from scipy.fft import next_fast_len + fshape = [next_fast_len(sz, True) for sz in (2 * nx - 1, 2 * ny - 1)] + r = fft.rfft2(f, fshape) + r = fft.irfft2((r * r.conj()).real, fshape) + + if mode == "same": + ux = (nx + 1) // 2 + lx = (1 - nx) // 2 + uy = (ny + 1) // 2 + ly = (1 - ny) // 2 + elif mode == "full": + ux = nx + lx = (1 - nx) + uy = ny + ly = (1 - ny) + else: + raise ValueError("acceptable mode flags are 'same', or 'full'") + + a11 = r[..., :ux, :uy] + a12 = r[..., :ux, ly:] + a21 = r[..., lx:, :uy] + a22 = r[..., lx:, ly:] + + return block([[a22, a21], [a12, a11]]) + + +def autocorr2symm_fft(f, backend='scipy'): + """Computes 2D autocorrelation function for real input and + symmetrical boundary conditions. + """ + r = fft.dctn(f, 3, axes=(-2, -1)) + r = fft.idctn(r * r, 3, axes=(-2, -1)) + return r + + +def autocorr2wrap_fft(f, backend='scipy'): + """Computes 2D autocorrelation function for real input and + circular boundary conditions. + """ + s = f.shape[-2:] + r = fft.rfft2(f, s) + r = fft.irfft2((r * r.conj()).real, s) + r = fft.fftshift(r) + return r + + +AUTOCORR_FUN = { + "symm": autocorr2symm_fft, + "wrap": autocorr2wrap_fft, + "fill": lambda f: autocorr2_fft(f, "same"), +} + +# AGIPD specific, we need to know the detector layout +RESULT_SHAPE = { + "symm": np.s_[:, :-14, :], + "wrap": np.s_[:, 7:-7, :], + "fill": np.s_[:, 7:-7, :], +} + + +class Autocorrelation(BaseCorrectionAddon): + _device = None # will be set to host device *after* init + + @staticmethod + def extend_device_schema(schema, prefix): + ( + INT32_ELEMENT(schema) + .key(f"{prefix}.moduleNumber") + .readOnly().initialValue(-1) + .commit(), + + STRING_ELEMENT(schema) + .key(f"{prefix}.boundary") + .tags("managed") + .options(",".join(AUTOCORR_FUN.keys())) + .assignmentOptional() + .defaultValue("symm") + .reconfigurable() + .commit(), + + DOUBLE_ELEMENT(schema) + .key(f"{prefix}.intenstityPerPhoton") + .tags("managed") + .assignmentOptional() + .defaultValue(9.3) + .reconfigurable() + .commit(), + + DOUBLE_ELEMENT(schema) + .key(f"{prefix}.roundingThreshold") + .tags("managed") + .assignmentOptional() + .defaultValue(0.7) + .reconfigurable() + .commit(), + + VECTOR_STRING_ELEMENT(schema) + .key(f"{prefix}.maskPaths") + .tags("managed") + .assignmentOptional() + .defaultValue([]) + .reconfigurable() + .commit(), + ) + + @staticmethod + def extend_output_schema(schema): + ( + NDARRAY_ELEMENT(schema) + .key("image.autocorr") + .dtype('FLOAT') + .commit(), + ) + + def __init__(self, device, prefix, config): + super().__init__(device, prefix, config) + self._shape = None + self._shmem_buffer = None + + global cupy + import cupy + + global fft + kernel_type = base_kernel_runner.KernelRunnerTypes[ + device.unsafe_get("kernelType") + ] + if kernel_type is base_kernel_runner.KernelRunnerTypes.CPU: + import scipy.fft as fft + else: + import cupyx.scipy.fft as fft + + da = device["constantParameters.karaboDa"] + self._modno = int(da[-2:]) + device.set(f"{prefix}.moduleNumber", self._modno) + + self.reconfigure(config) + + # AGIPD specific, we need to know the detector layout + self._repeats = np.ones(512, int) + self._repeats[63:-1:64] = 2 + self._repeats[64::64] = 2 + self._repeats = self._repeats.tolist() + + def __del__(self): + del self._shmem_buffer + super().__del__() + + def post_correction(self, train_id, processed_data, cell_table, pulse_table, output_hash): + shape = processed_data.shape + if shape != self._shape: + self._update_buffer(shape) + + # conversion to photons + np.around( + processed_data / self._intensity_per_photon - (self._rounding_threshold - 0.5), + out=processed_data + ) + processed_data[processed_data < 0.0] = 0.0 + + # apply extra user mask + if self._mask is not None: + processed_data[:, self._mask] = np.nan + + # compute autocorrelation + data = processed_data.copy() + data[np.isnan(data)] = 0.0 + data = np.repeat(data, self._repeats, axis=1) + + autocorr = self._autocorr(data)[self._result_shape] + + # compute autocorrelation normalized by mask (takes too much memory) + # mask = np.isfinite(data) + # mask = np.repeat(mask, self._repeats, axis=1).astype(np.float32) + # autocorr /= self._autocorr(mask)[self._result_shape] + + buffer_handle, buffer_array = self._shmem_buffer.next_slot() + if hasattr(autocorr, 'get'): + autocorr.get(out=buffer_array[:]) + else: + buffer_array[:] = autocorr + + if self._device._use_shmem_handles: + output_hash.set("image.autocorr", buffer_handle) + shmem_paths = output_hash.get("calngShmemPaths", default=[]) + output_hash.set("calngShmemPaths", + shmem_paths + ["image.autocorr"]) + else: + output_hash.set("image.autocorr", buffer_array) + + def reconfigure(self, changed_config): + if changed_config.has("intenstityPerPhoton"): + self._intensity_per_photon = changed_config["intenstityPerPhoton"] + if changed_config.has("boundary"): + self._autocorr = AUTOCORR_FUN[changed_config["boundary"]] + self._result_shape = RESULT_SHAPE[changed_config["boundary"]] + if changed_config.has("roundingThreshold"): + self._rounding_threshold = changed_config["roundingThreshold"] + if changed_config.has("maskPaths"): + self._load_mask(changed_config["maskPaths"]) + + def _update_buffer(self, shape): + self._shape = shape + + if self._shmem_buffer is None: + shmem_buffer_name = self._device.getInstanceId() + ":dataOutput/autocorrelation" + memory_budget = self._device.get("outputShmemBufferSize") * 2**30 + self._shmem_buffer = shmem_utils.ShmemCircularBuffer( + memory_budget, + shape, + np.float32, + shmem_buffer_name, + ) + if self._device._cuda_pin_buffers: + self._shmem_buffer.cuda_pin() + else: + self._shmem_buffer.change_shape(shape) + + def _load_mask(self, paths): + self._mask = None + for fn in paths: + try: + with h5py.File(fn, 'r') as f: + mask = f["entry_1/data_1/mask"][self._modno] != 0 + if self._mask is None: + self._mask = mask + else: + self._mask = self._mask | mask + except Exception as e: + self._device.logger.warn(f"Aurocorrelation addon: {e}") diff --git a/src/calng/correction_addons/base_addon.py b/src/calng/correction_addons/base_addon.py index b5578c1e5166948d5bbfbad8cf5075c28c477dd7..0a063bde862cecf7f5755aba34e8b53a14019d53 100644 --- a/src/calng/correction_addons/base_addon.py +++ b/src/calng/correction_addons/base_addon.py @@ -15,9 +15,10 @@ class BaseCorrectionAddon: and add properties to it.""" pass - def __init__(self, config): + def __init__(self, device, prefix, config): """Will be given the node from extend_device_schema, no prefix needed here""" - pass + self._device = device + self._prefix = prefix def post_correction( self, train_id, processed_data, cell_table, pulse_table, output_hash diff --git a/src/calng/correction_addons/integrated_intensity.py b/src/calng/correction_addons/integrated_intensity.py index 5cfaad9ee436c64c66ecc05f43a2c76b3270b68a..188b6d4f26ac629a9a323d646aca54f120be2568 100644 --- a/src/calng/correction_addons/integrated_intensity.py +++ b/src/calng/correction_addons/integrated_intensity.py @@ -11,7 +11,8 @@ def maybe_get(a): class IntegratedIntensity(BaseCorrectionAddon): - def __init__(self, config): + def __init__(self, device, prefix, config): + super().__init__(device, prefix, config) global cupy import cupy diff --git a/src/calng/correction_addons/litpixel_counter.py b/src/calng/correction_addons/litpixel_counter.py index 7baa14fd506e2a45d2fab61a0944c85c66c05d42..8089e9c33133155fc574698ef0c14155fd7a66b5 100644 --- a/src/calng/correction_addons/litpixel_counter.py +++ b/src/calng/correction_addons/litpixel_counter.py @@ -1,20 +1,62 @@ import numpy as np - -from karabo.bound import NODE_ELEMENT, NDARRAY_ELEMENT, DOUBLE_ELEMENT +from karabo.bound import ( + DOUBLE_ELEMENT, NDARRAY_ELEMENT, NODE_ELEMENT, UINT32_ELEMENT) from .base_addon import BaseCorrectionAddon +def maybe_get(a): + # TODO: proper check for cupy + if hasattr(a, "get"): + return a.get() + return a + + +def factors(n): + if n is None: + return np.array([1]) + f = [i for i in range(2, int(np.sqrt(n)) + 1) if (n % i) == 0] + f.extend(n // i for i in reversed(f)) + return np.array(f + [n]) + + class LitPixelCounter(BaseCorrectionAddon): - def __init__(self, config): + def __init__(self, device, prefix, config): + super().__init__(device, prefix, config) global cupy import cupy - self._threshold = config["threshold"] + self._ss_size = device.kernel_runner.num_pixels_ss + self._ss_factors = factors(self._ss_size) + self._fs_size = device.kernel_runner.num_pixels_fs + self._fs_factors = factors(self._fs_size) + + self.reconfigure(config) + + # these properties may be changed to the factors + # of module dimensions due to protection + device.set(f"{prefix}.ssAsicSize", self._ss_asic_size) + device.set(f"{prefix}.fsAsicSize", self._fs_asic_size) def reconfigure(self, changed_config): if changed_config.has("threshold"): self._threshold = changed_config["threshold"] + if changed_config.has("ssAsicSize"): + size = changed_config["ssAsicSize"] + if size == 0: + self._ss_asic_size = self._ss_size + else: + self._ss_asic_size = int(self._ss_factors[ + np.argmin(np.abs(self._ss_factors - size))]) + changed_config["ssAsicSize"] = self._ss_asic_size + if changed_config.has("fsAsicSize"): + size = changed_config["fsAsicSize"] + if size == 0: + self._fs_asic_size = self._fs_size + else: + self._fs_asic_size = int(self._fs_factors[ + np.argmin(np.abs(self._fs_factors - size))]) + changed_config["fsAsicSize"] = self._fs_asic_size @staticmethod def extend_output_schema(schema): @@ -29,9 +71,19 @@ class LitPixelCounter(BaseCorrectionAddon): .commit(), NDARRAY_ELEMENT(schema) - .key("litpixels.unmasked") + .key("litpixels.intensity") .dtype('FLOAT') - .commit() + .commit(), + + NDARRAY_ELEMENT(schema) + .key("litpixels.unmasked") + .dtype('UINT32') + .commit(), + + NDARRAY_ELEMENT(schema) + .key("litpixels.total") + .dtype('UINT32') + .commit(), ) @staticmethod @@ -43,15 +95,46 @@ class LitPixelCounter(BaseCorrectionAddon): .assignmentOptional() .defaultValue(6.0) .reconfigurable() - .commit() + .commit(), + + UINT32_ELEMENT(schema) + .key(f"{prefix}.ssAsicSize") + .tags("managed") + .assignmentOptional() + .defaultValue(0) + .reconfigurable() + .commit(), + + UINT32_ELEMENT(schema) + .key(f"{prefix}.fsAsicSize") + .tags("managed") + .assignmentOptional() + .defaultValue(0) + .reconfigurable() + .commit(), ) def post_correction(self, tid, data, cell_table, pulse_table, output_hash): - n_cells, n_x, n_y = data.shape - per_asic_data = data.reshape(n_cells, 64, n_x // 64, 64, n_y // 64) + if np.ndim(data) == 2: + n_cells, fs_size = data.shape + axis = 1 + shape = (n_cells, + self._fs_asic_size, fs_size // self._fs_asic_size) + else: + n_cells, ss_size, fs_size = data.shape + axis = (1, 3) + shape = (n_cells, + self._ss_asic_size, ss_size // self._ss_asic_size, + self._fs_asic_size, fs_size // self._fs_asic_size) + + per_asic_data = data.reshape(shape) - lit_pixels = np.sum(per_asic_data > self._threshold, axis=(1, 3)) - unmasked_pixels = np.isfinite(per_asic_data).sum(axis=(1, 3)) + lit_pixels = np.sum(per_asic_data > self._threshold, axis=axis) + intensity = np.nansum(per_asic_data, axis=axis) + unmasked_pixels = np.isfinite(per_asic_data).sum(axis=axis) - output_hash["litpixels.count"] = lit_pixels.get() - output_hash["litpixels.unmasked"] = unmasked_pixels.get() + output_hash["litpixels.count"] = maybe_get(lit_pixels) + output_hash["litpixels.intensity"] = maybe_get(intensity) + output_hash["litpixels.unmasked"] = maybe_get(unmasked_pixels) + output_hash["litpixels.total"] = maybe_get(np.full_like( + lit_pixels, self._ss_asic_size * self._fs_asic_size)) diff --git a/src/calng/correction_addons/peakfinder9.py b/src/calng/correction_addons/peakfinder9.py index b4694ce21df9eb72d41a16f4a7b78e487e799733..1647350cb0e7e7a9cb567d99d77159e966a76d52 100644 --- a/src/calng/correction_addons/peakfinder9.py +++ b/src/calng/correction_addons/peakfinder9.py @@ -124,7 +124,7 @@ class Peakfinder9(BaseCorrectionAddon): .commit(), ) - def post_correction(self, data, train_id, cell_table, pulse_table, output_hash): + def post_correction(self, train_id, data, cell_table, pulse_table, output_hash): # assumes processed data shape is frames, pixels, pixels if self._input_shape != data.shape: try: @@ -169,7 +169,8 @@ class Peakfinder9(BaseCorrectionAddon): except AttributeError: pass - def __init__(self, config): + def __init__(self, device, prefix, config): + super().__init__(device, prefix, config) global cupy import cupy diff --git a/src/calng/correction_addons/random_frames.py b/src/calng/correction_addons/random_frames.py index d3447710323565099626cfa1bc29e6533b1ed172..bfbb78cf2a12681133081d5fc0c6c506543849a2 100644 --- a/src/calng/correction_addons/random_frames.py +++ b/src/calng/correction_addons/random_frames.py @@ -30,7 +30,8 @@ class RandomFrames(BaseCorrectionAddon): .commit(), ) - def __init__(self, config): + def __init__(self, device, prefix, config): + super().__init__(device, prefix, config) # TODO: figure out why no / 100 here... self._probability = config["probability"] diff --git a/src/calng/correction_addons/saturation_monitor.py b/src/calng/correction_addons/saturation_monitor.py index 1c48a2196e06496db11a9208f3266f15f2afb259..31bcec59791bbe7957a9ee1830ffb48eb10829c2 100644 --- a/src/calng/correction_addons/saturation_monitor.py +++ b/src/calng/correction_addons/saturation_monitor.py @@ -87,7 +87,8 @@ def saturation_monitoring_schema(schema=None): class SaturationMonitor(BaseCorrectionAddon): - def __init__(self, config): + def __init__(self, device, prefix, config): + super().__init__(device, prefix, config) global cupy import cupy self._alarmThreshold = config["alarmThreshold"] diff --git a/src/calng/corrections/AgipdCorrection.py b/src/calng/corrections/AgipdCorrection.py index 17b37ac04c427c0e1e88930d96d122cbd10d7302..75a2032c8f77c218b789ec97c7acc2b4dc3b8639 100644 --- a/src/calng/corrections/AgipdCorrection.py +++ b/src/calng/corrections/AgipdCorrection.py @@ -10,13 +10,14 @@ from karabo.bound import ( OVERWRITE_ELEMENT, STRING_ELEMENT, UINT32_ELEMENT, + MetricPrefix, + Unit, ) from .. import ( base_calcat, base_correction, base_kernel_runner, - schemas, utils, ) @@ -45,13 +46,18 @@ bad_pixel_constants = { # from pycalibration's enum.py -class GainModes(enum.IntEnum): +class GainMode(utils.OptionsEnum): ADAPTIVE_GAIN = 0 FIXED_HIGH_GAIN = 1 FIXED_MEDIUM_GAIN = 2 FIXED_LOW_GAIN = 3 +class GainSetting(utils.OptionsEnum): + LOW_CDS = 0 + HIGH_CDS = 1 + + # note: if this is extended further, bump up the dtype passed to kernel class CorrectionFlags(enum.IntFlag): NONE = 0 @@ -321,11 +327,13 @@ class AgipdBaseRunner(base_kernel_runner.BaseKernelRunner): ) if config.has("constantParameters.gainMode"): - gain_mode = GainModes[config["constantParameters.gainMode"]] - if gain_mode is GainModes.ADAPTIVE_GAIN: - self.default_gain = self._xp.uint8(gain_mode) + gain_mode = GainMode.parse_helpful_format( + config["constantParameters.gainMode"] + ) + if gain_mode is GainMode.ADAPTIVE_GAIN: + self.default_gain = self._xp.uint8(gain_mode.value) else: - self.default_gain = self._xp.uint8(gain_mode - 1) + self.default_gain = self._xp.uint8(gain_mode.value - 1) def expected_input_shape(self, num_frames): return ( @@ -549,7 +557,7 @@ class AgipdBaseRunner(base_kernel_runner.BaseKernelRunner): class AgipdCpuRunner(AgipdBaseRunner): - _xp = np + runner_type = base_kernel_runner.KernelRunnerTypes.CPU def _correct(self, flags, image_data, cell_table, processed_data, gain_map): if flags & CorrectionFlags.COMMON_MODE: @@ -572,15 +580,13 @@ class AgipdCpuRunner(AgipdBaseRunner): processed_data, ) - def _pre_init(self): + def _post_init(self): from ..kernels import agipd_cython self.correction_kernel = agipd_cython.correct class AgipdGpuRunner(AgipdBaseRunner): - def _pre_init(self): - import cupy - self._xp = cupy + runner_type = base_kernel_runner.KernelRunnerTypes.GPU def _post_init(self): self.correction_kernel = self._xp.RawKernel( @@ -684,23 +690,31 @@ class AgipdCalcatFriend(base_calcat.BaseCalcatFriend): ( DOUBLE_ELEMENT(schema) .key("constantParameters.acquisitionRate") + .displayedName("Acquisition rate") .tags("managed") + .unit(Unit.HERTZ) + .metricPrefix(MetricPrefix.MEGA) .assignmentOptional() .defaultValue(1.1) .reconfigurable() .commit(), - DOUBLE_ELEMENT(schema) + STRING_ELEMENT(schema) .key("constantParameters.gainSetting") .tags("managed") + .displayedName("Gain setting") .assignmentOptional() - .defaultValue(0) + .defaultValue(GainSetting.LOW_CDS.format_helpful()) + .options(GainSetting.list_options()) .reconfigurable() .commit(), DOUBLE_ELEMENT(schema) .key("constantParameters.photonEnergy") .tags("managed") + .displayedName("Source energy") + .unit(Unit.ELECTRONVOLT) + .metricPrefix(MetricPrefix.KILO) .assignmentOptional() .defaultValue(9.2) .reconfigurable() @@ -709,15 +723,18 @@ class AgipdCalcatFriend(base_calcat.BaseCalcatFriend): STRING_ELEMENT(schema) .key("constantParameters.gainMode") .tags("managed") + .displayedName("Gain mode") .assignmentOptional() - .defaultValue("ADAPTIVE_GAIN") - .options(",".join(gain_mode.name for gain_mode in GainModes)) + .defaultValue(GainMode.ADAPTIVE_GAIN.format_helpful()) + .options(GainMode.list_options()) .reconfigurable() .commit(), DOUBLE_ELEMENT(schema) .key("constantParameters.integrationTime") .tags("managed") + .displayedName("Integration time") + .unit(Unit.COUNT) # not directly * 10 ns .assignmentOptional() .defaultValue(12) .reconfigurable() @@ -739,12 +756,13 @@ class AgipdCalcatFriend(base_calcat.BaseCalcatFriend): if integration_time != 12: res["Integration Time"] = integration_time - gain_mode = GainModes[self._get_param("gainMode")] - if gain_mode is not GainModes.ADAPTIVE_GAIN: + gain_mode = GainMode.parse_helpful_format(self._get_param("gainMode")) + if gain_mode is not GainMode.ADAPTIVE_GAIN: res["Gain Mode"] = 1 - # TODO: make configurable whether or not to include gain setting? - res["Gain Setting"] = self._get_param("gainSetting") + res["Gain Setting"] = GainSetting.parse_helpful_format( + self._get_param("gainSetting") + ).value return res diff --git a/src/calng/corrections/DsscCorrection.py b/src/calng/corrections/DsscCorrection.py index b4a65c769f45a4634f7ea85b7fd0de428913f8e4..3199d2f9780ac9dd365f4b8579dfb46110550333 100644 --- a/src/calng/corrections/DsscCorrection.py +++ b/src/calng/corrections/DsscCorrection.py @@ -69,7 +69,7 @@ class DsscBaseRunner(base_kernel_runner.BaseKernelRunner): class DsscCpuRunner(DsscBaseRunner): - _xp = np + runner_type = base_kernel_runner.KernelRunnerTypes.CPU def _post_init(self): from ..kernels import dssc_cython @@ -86,9 +86,7 @@ class DsscCpuRunner(DsscBaseRunner): class DsscGpuRunner(DsscBaseRunner): - def _pre_init(self): - import cupy - self._xp = cupy + runner_type = base_kernel_runner.KernelRunnerTypes.GPU def _post_init(self): self.correction_kernel = self._xp.RawModule( diff --git a/src/calng/corrections/Epix100Correction.py b/src/calng/corrections/Epix100Correction.py index a469c0ee9cf500bebdbfadf62d3d44667afda390..3d3564d38f962d9692a1ef45d83bba9bb8fc1d3d 100644 --- a/src/calng/corrections/Epix100Correction.py +++ b/src/calng/corrections/Epix100Correction.py @@ -8,6 +8,8 @@ from karabo.bound import ( KARABO_CLASSINFO, OUTPUT_CHANNEL, OVERWRITE_ELEMENT, + MetricPrefix, + Unit, ) from .. import ( @@ -91,6 +93,8 @@ class Epix100CalcatFriend(base_calcat.BaseCalcatFriend): .key("constantParameters.integrationTime") .tags("managed") .displayedName("Integration Time") + .unit(Unit.SECOND) + .metricPrefix(MetricPrefix.MICRO) # according to pycalibration notebook .assignmentOptional() .defaultValue(10) .reconfigurable() @@ -146,6 +150,7 @@ class Epix100CalcatFriend(base_calcat.BaseCalcatFriend): class Epix100CpuRunner(base_kernel_runner.BaseKernelRunner): + runner_type = base_kernel_runner.KernelRunnerTypes.CPU _correction_steps = correction_steps _correction_flag_class = CorrectionFlags _bad_pixel_constants = {Constants.BadPixelsDarkEPix100} diff --git a/src/calng/corrections/Gotthard2Correction.py b/src/calng/corrections/Gotthard2Correction.py index 84dc79e61d46e5b6b9ddec35935358945d2dfb1a..d5dccab406f6e05157f6c134df1634e71681f03b 100644 --- a/src/calng/corrections/Gotthard2Correction.py +++ b/src/calng/corrections/Gotthard2Correction.py @@ -6,6 +6,8 @@ from karabo.bound import ( KARABO_CLASSINFO, OUTPUT_CHANNEL, OVERWRITE_ELEMENT, + MetricPrefix, + Unit, ) from .. import ( @@ -72,6 +74,7 @@ def framesums_reduction_fun(data, cell_table, pulse_table, warn_fun): class Gotthard2CpuRunner(base_kernel_runner.BaseKernelRunner): + runner_type = base_kernel_runner.KernelRunnerTypes.CPU _bad_pixel_constants = bad_pixel_constants _correction_steps = correction_steps _correction_flag_class = CorrectionFlags @@ -239,6 +242,8 @@ class Gotthard2CalcatFriend(base_calcat.BaseCalcatFriend): DOUBLE_ELEMENT(schema) .key("constantParameters.acquisitionRate") .tags("managed") + .unit(Unit.HERTZ) + .metricPrefix(MetricPrefix.MEGA) .assignmentOptional() .defaultValue(1.1) .reconfigurable() diff --git a/src/calng/corrections/JungfrauCorrection.py b/src/calng/corrections/JungfrauCorrection.py index 1d9cb0c4c79e7e43fbc585a62367b88f35377143..465587319511f45eb5c59ccda01b410a09fda710 100644 --- a/src/calng/corrections/JungfrauCorrection.py +++ b/src/calng/corrections/JungfrauCorrection.py @@ -9,6 +9,8 @@ from karabo.bound import ( OUTPUT_CHANNEL, OVERWRITE_ELEMENT, STRING_ELEMENT, + MetricPrefix, + Unit, ) from .. import ( @@ -36,12 +38,12 @@ bad_pixel_constants = { # from pycalibration (TOOD: move to common shared lib) -class GainModes(enum.Enum): +class GainMode(utils.OptionsEnum): ADAPTIVE_GAIN = 0 FIXED_GAIN = 1 -class GainSettings(enum.Enum): +class GainSetting(utils.OptionsEnum): LOW_CDS = 0 HIGH_CDS = 1 @@ -205,10 +207,7 @@ class JungfrauBaseRunner(base_kernel_runner.BaseKernelRunner): class JungfrauGpuRunner(JungfrauBaseRunner): - def _pre_init(self): - import cupy as cp - - self._xp = cp + runner_type = base_kernel_runner.KernelRunnerTypes.GPU def _post_init(self): source_module = self._xp.RawModule( @@ -260,9 +259,9 @@ class JungfrauGpuRunner(JungfrauBaseRunner): class JungfrauCpuRunner(JungfrauBaseRunner): - _xp = np + runner_type = base_kernel_runner.KernelRunnerTypes.CPU - def _pre_init(self): + def _post_init(self): # for computing previews faster self.thread_pool = concurrent.futures.ThreadPoolExecutor(max_workers=16) @@ -327,8 +326,8 @@ class JungfrauCalcatFriend(base_calcat.BaseCalcatFriend): return { Constants.Offset10Hz: self.dark_condition, Constants.BadPixelsDark10Hz: self.dark_condition, - Constants.BadPixelsFF10Hz: self.dark_condition, - Constants.RelativeGain10Hz: self.dark_condition, + Constants.BadPixelsFF10Hz: self.base_condition, + Constants.RelativeGain10Hz: self.base_condition, } @staticmethod @@ -366,16 +365,35 @@ class JungfrauCalcatFriend(base_calcat.BaseCalcatFriend): .key("constantParameters.integrationTime") .tags("managed") .displayedName("Integration time") - .description("Integration time in ms") + .unit(Unit.SECOND) + .metricPrefix(MetricPrefix.MICRO) + .description( + "Note that the control device specifies integration time in seconds " + "and we here expect microseconds. This is because injected constant " + "conditions use microseconds - despite CalCat thinking the parameter " + "is in seconds." + ) .assignmentOptional() .defaultValue(350) .reconfigurable() .commit(), + DOUBLE_ELEMENT(schema) + .key("constantParameters.exposureTimeout") + .tags("managed") + .displayedName("Exposure timeout") + .unit(Unit.SECOND) + .metricPrefix(MetricPrefix.NANO) + .assignmentOptional() + .defaultValue(25) + .reconfigurable() + .commit(), + DOUBLE_ELEMENT(schema) .key("constantParameters.sensorTemperature") .tags("managed") .displayedName("Sensor temperature") + .unit(Unit.KELVIN) .description("Sensor temperature in K") .assignmentOptional() .defaultValue(291) @@ -392,9 +410,10 @@ class JungfrauCalcatFriend(base_calcat.BaseCalcatFriend): "dynamic gain with HG0 (0 otherwise) and 'Gain mode' which is 1 " "for fixed gain (omitted otherwise)." ) + .unit(Unit.NUMBER) .assignmentOptional() - .defaultValue(GainModes.ADAPTIVE_GAIN.name) - .options(",".join(gain_mode.name for gain_mode in GainModes)) + .defaultValue(GainMode.ADAPTIVE_GAIN.format_helpful()) + .options(GainMode.list_options()) .reconfigurable() .commit(), @@ -404,15 +423,15 @@ class JungfrauCalcatFriend(base_calcat.BaseCalcatFriend): .displayedName("Gain setting") .description("See description of gainMode") .assignmentOptional() - .defaultValue(GainSettings.LOW_CDS.name) - .options(",".join(gain_setting.name for gain_setting in GainSettings)) + .defaultValue(GainSetting.LOW_CDS.format_helpful()) + .options(GainSetting.list_options()) .reconfigurable() .commit(), ) base_calcat.add_status_schema_from_enum(schema, Constants) - def dark_condition(self): + def base_condition(self): res = base_calcat.OperatingConditions() res["Memory cells"] = self._get_param("memoryCells") res["Sensor Bias Voltage"] = self._get_param("biasVoltage") @@ -422,12 +441,23 @@ class JungfrauCalcatFriend(base_calcat.BaseCalcatFriend): res["Sensor Temperature"] = self._get_param("sensorTemperature") if ( - gain_mode := GainModes[self._get_param("gainMode")] - ) is not GainModes.ADAPTIVE_GAIN: + gain_mode := GainMode.parse_helpful_format(self._get_param("gainMode")) + ) is not GainMode.ADAPTIVE_GAIN: # NOTE: currently only including if parameter for CalCat is 1 # change if conditions are tidied up in the database res["Gain mode"] = gain_mode.value - res["Gain Setting"] = GainSettings[self._get_param("gainSetting")].value + + res["Gain Setting"] = GainSetting.parse_helpful_format( + self._get_param("gainSetting") + ).value + + return res + + def dark_condition(self): + res = self.base_condition() + + if (exposure_timeout := self._get_param("exposureTimeout")) != 25: + res["Exposure timeout"] = exposure_timeout return res diff --git a/src/calng/corrections/LpdCorrection.py b/src/calng/corrections/LpdCorrection.py index 325f5b2193de7abe0190b99b8ee26ced09358283..8e77e95593dcd594646b8735ea4029b6d4d5fc43 100644 --- a/src/calng/corrections/LpdCorrection.py +++ b/src/calng/corrections/LpdCorrection.py @@ -8,6 +8,8 @@ from karabo.bound import ( OUTPUT_CHANNEL, OVERWRITE_ELEMENT, STRING_ELEMENT, + MetricPrefix, + Unit, ) from .. import ( @@ -139,9 +141,7 @@ class LpdBaseRunner(base_kernel_runner.BaseKernelRunner): class LpdGpuRunner(LpdBaseRunner): - def _pre_init(self): - import cupy - self._xp = cupy + runner_type = base_kernel_runner.KernelRunnerTypes.GPU def _post_init(self): self.correction_kernel = self._xp.RawModule( @@ -185,7 +185,7 @@ class LpdGpuRunner(LpdBaseRunner): class LpdCpuRunner(LpdBaseRunner): - _xp = np + runner_type = base_kernel_runner.KernelRunnerTypes.CPU def _post_init(self): from ..kernels import lpd_cython @@ -251,6 +251,8 @@ class LpdCalcatFriend(base_calcat.BaseCalcatFriend): DOUBLE_ELEMENT(schema) .key("constantParameters.feedbackCapacitor") .tags("managed") + .unit(Unit.FARAD) + .metricPrefix(MetricPrefix.PICO) # according to CalCat .assignmentOptional() .defaultValue(5) .reconfigurable() @@ -259,6 +261,8 @@ class LpdCalcatFriend(base_calcat.BaseCalcatFriend): DOUBLE_ELEMENT(schema) .key("constantParameters.photonEnergy") .tags("managed") + .unit(Unit.ELECTRONVOLT) + .metricPrefix(MetricPrefix.KILO) .assignmentOptional() .defaultValue(9.3) .reconfigurable() @@ -268,6 +272,7 @@ class LpdCalcatFriend(base_calcat.BaseCalcatFriend): .key("constantParameters.category") .tags("managed") .displayedName("Category") + .unit(Unit.NUMBER) .assignmentOptional() .defaultValue(0) .reconfigurable() diff --git a/src/calng/corrections/LpdminiCorrection.py b/src/calng/corrections/LpdminiCorrection.py index 4a5d5c24582427c429b923a73bf69bdf373a1998..c30cbb727a8685e33b1525f9efa2efb041229f00 100644 --- a/src/calng/corrections/LpdminiCorrection.py +++ b/src/calng/corrections/LpdminiCorrection.py @@ -5,11 +5,13 @@ from karabo.bound import ( OVERWRITE_ELEMENT, ) +from .. import base_kernel_runner from .._version import version as deviceVersion from . import LpdCorrection class LpdminiGpuRunner(LpdCorrection.LpdGpuRunner): + runner_type = base_kernel_runner.KernelRunnerTypes.GPU num_pixels_ss = 32 def load_constant(self, constant_type, constant_data): diff --git a/src/calng/corrections/PnccdCorrection.py b/src/calng/corrections/PnccdCorrection.py index 65fc76c5e48778c884d4ddb0c9145da3c8860bc2..b285c5e17b5a85b093e06976b5e5d85779ccec7f 100644 --- a/src/calng/corrections/PnccdCorrection.py +++ b/src/calng/corrections/PnccdCorrection.py @@ -8,6 +8,8 @@ from karabo.bound import ( KARABO_CLASSINFO, OUTPUT_CHANNEL, OVERWRITE_ELEMENT, + MetricPrefix, + Unit, ) from .. import ( @@ -99,6 +101,8 @@ class PnccdCalcatFriend(base_calcat.BaseCalcatFriend): .key("constantParameters.integrationTime") .tags("managed") .displayedName("Integration Time") + .unit(Unit.SECOND) + .metricPrefix(MetricPrefix.MICRO) # NOTE: assumption, does anyone know? .assignmentOptional() .defaultValue(70) .reconfigurable() @@ -107,6 +111,7 @@ class PnccdCalcatFriend(base_calcat.BaseCalcatFriend): DOUBLE_ELEMENT(schema) .key("constantParameters.sensorTemperature") .tags("managed") + .unit(Unit.KELVIN) .assignmentOptional() .defaultValue(233) .reconfigurable() @@ -116,6 +121,7 @@ class PnccdCalcatFriend(base_calcat.BaseCalcatFriend): .key("constantParameters.gainSetting") .tags("managed") .displayedName("Gain Setting") + .unit(Unit.NUMBER) .assignmentOptional() .defaultValue(16) .reconfigurable() @@ -125,6 +131,8 @@ class PnccdCalcatFriend(base_calcat.BaseCalcatFriend): .key("constantParameters.sourceEnergy") .tags("managed") .displayedName("Source Energy") + .unit(Unit.ELECTRONVOLT) + .metricPrefix(MetricPrefix.KILO) .assignmentOptional() .defaultValue(1.6) .reconfigurable() @@ -155,6 +163,7 @@ class PnccdCalcatFriend(base_calcat.BaseCalcatFriend): class PnccdCpuRunner(base_kernel_runner.BaseKernelRunner): + runner_type = base_kernel_runner.KernelRunnerTypes.CPU _correction_steps = correction_steps _correction_flag_class = CorrectionFlags _bad_pixel_constants = {Constants.BadPixelsDarkCCD} diff --git a/src/calng/frameselection_utils.py b/src/calng/frameselection_utils.py index f6b96c59c9c499aec5b57c73206af01fba9a4e3a..9b444e56eed897d6781d3c24b5dcfad31baa4a22 100644 --- a/src/calng/frameselection_utils.py +++ b/src/calng/frameselection_utils.py @@ -4,11 +4,45 @@ from karabo.bound import ( BOOL_ELEMENT, NODE_ELEMENT, STRING_ELEMENT, + TABLE_ELEMENT, VECTOR_STRING_ELEMENT, Hash, + Schema, + VectorHash, ) import numpy as np +from . import redu_op + + +def reduction_classes_schema(): + schema = Schema() + ( + STRING_ELEMENT(schema) + .key("operation") + .displayedName("Operation") + .readOnly() + .commit(), + + STRING_ELEMENT(schema) + .key("argument") + .displayedName("Key to reduce") + .readOnly() + .commit(), + + STRING_ELEMENT(schema) + .key("selection") + .displayedName("Selection Name") + .readOnly() + .commit(), + + STRING_ELEMENT(schema).key("result") + .displayedName("Result Key") + .readOnly() + .commit(), + ) + return schema + class FrameselectionFriend: @staticmethod @@ -43,7 +77,7 @@ class FrameselectionFriend: .key("frameSelector.dataSourcePattern") .displayedName("Data source pattern") .description( - "Source name pattern to apply frame selection to. Should match " + "Source name pattern to apply frame selection to. Should match " "subset of matched sources." ) .assignmentOptional() @@ -59,15 +93,27 @@ class FrameselectionFriend: .defaultValue([]) .reconfigurable() .commit(), + + TABLE_ELEMENT(schema).key("frameSelector.reduction") + .displayedName("Reduction") + .description( + "The reduction operations on selected images according to " + "the input from Arbiter.") + .setColumns(reduction_classes_schema()) + .readOnly().initialValue([]) + .commit(), ) - def __init__(self, config): + def __init__(self, device, config): + self._device = device self._config = Hash() self._enabled = False self._arbiter = "" self._data_pattern = "" self._data_keys = [] + self._reduction_classes = {} self.reconfigure(config) + self._device.KARABO_SLOT(self.receive_reduction_classes) def reconfigure(self, config): self._config.merge(config) @@ -76,6 +122,20 @@ class FrameselectionFriend: self._source_pattern = re.compile(self._config.get("dataSourcePattern")) self._data_keys = list(self._config.get("dataKeys")) + def receive_reduction_classes(self, redu_classes): + reduction = VectorHash() + self._reduction_classes = {} + for encoded_class in redu_classes: + name, op, arg, result = encoded_class.split(':') + reduction.append(Hash( + "operation", op, + "argument", arg, + "selection", name, + "result", result, + )) + self._reduction_classes[result] = (op, arg, name) + self._device.set("frameSelector.reduction", reduction) + def get_mask(self, sources): if self._enabled and self._arbiter in sources: return np.array( @@ -96,3 +156,37 @@ class FrameselectionFriend: continue data_hash[key] = data_hash[key][mask] + + def resolve_reduction_classes(self, sources): + correction_sources = { + source: data + for source, (data, timestamp) in sources.items() + if self._source_pattern.match(source) + } + operations = [] + if self._enabled and self._arbiter in sources: + arbiter = sources[self._arbiter][0] + for result, (op, arg, selection) in self._reduction_classes.items(): + mask_key = f"{selection}.mask" + if not arbiter.has(mask_key): + # no mask + continue + mask = np.array( + arbiter[mask_key], dtype=bool, copy=False) + + for source, data in correction_sources.items(): + if not data.has(arg): + # no array to reduce + continue + if data[arg].shape[0] != mask.size: + self._device.log.WARN( + f"The reduction mask '{selection}' does not match the '{arg}' size" + ) + continue + + op_class = getattr(redu_op, op, None) + if op_class is None: + # unknown operation + continue + operations.append(op_class(data, arg, mask, result)) + return operations diff --git a/src/calng/kernels/peakfinder9_gpu.cu b/src/calng/kernels/peakfinder9_gpu.cu index ff65d2ea5f2dd68489b06be1d72172aabfdfd398..7d172c9eacfc49e5b333e7f6306e9b5a7b948c51 100644 --- a/src/calng/kernels/peakfinder9_gpu.cu +++ b/src/calng/kernels/peakfinder9_gpu.cu @@ -149,7 +149,7 @@ extern "C" __global__ void pf9(const unsigned short num_frames, // whole peak should have sufficent SNR float peak_weighted_row; float peak_weighted_col; - float peak_total_mass = pixel_val; + float peak_total_mass = pixel_val - mean; { /* TODO: more compact form */ float peak_weighted_row_nom = static_cast<float>(row) * pixel_val; @@ -159,9 +159,10 @@ extern "C" __global__ void pf9(const unsigned short num_frames, float total_mass_before = peak_total_mass; masked_frame.fun_ring(row, col, layer, [&] (unsigned short i, unsigned short j, float val) { if (val > peak_pixel_threshold) { - peak_total_mass += val; - peak_weighted_row_nom += val * static_cast<float>(i); - peak_weighted_col_nom += val * static_cast<float>(j); + float val_over_mean = val - mean; + peak_total_mass += val_over_mean; + peak_weighted_row_nom += val_over_mean * static_cast<float>(i); + peak_weighted_col_nom += val_over_mean * static_cast<float>(j); } }); // in case nothing was added, stop expanding @@ -172,8 +173,13 @@ extern "C" __global__ void pf9(const unsigned short num_frames, if (peak_total_mass <= mean + min_snr_whole_peak * sigma) { return; } - peak_weighted_row = peak_weighted_row_nom / peak_total_mass; - peak_weighted_col = peak_weighted_col_nom / peak_total_mass; + if (peak_total_mass == 0) { + float peak_weighted_row = static_cast<float>(row); + float peak_weighted_col = static_cast<float>(col); + } else { + peak_weighted_row = peak_weighted_row_nom / peak_total_mass; + peak_weighted_col = peak_weighted_col_nom / peak_total_mass; + } } unsigned int output_index = atomicInc(output_counts + frame, max_peaks); diff --git a/src/calng/preview_utils.py b/src/calng/preview_utils.py index 12cb7e183a13133824a7964b852b84a524bf9e8b..4b82d15e14d7886920d825e166b1afb3cf2431da 100644 --- a/src/calng/preview_utils.py +++ b/src/calng/preview_utils.py @@ -261,6 +261,8 @@ class PreviewFriend: ) elif not inplace: data = data.copy() + if spec.swap_axes: + data = np.swapaxes(data, -1, -2) if spec.flip_ss: data = np.flip(data, -2) if spec.flip_fs: @@ -322,7 +324,7 @@ class PreviewFriend: if subconf.has("flipFS"): spec.flip_fs = subconf["flipFS"] if subconf.has("swapAxes"): - spec.flip_fs = subconf["flipFS"] + spec.swap_axes = subconf["swapAxes"] if subconf.has("replaceNanWith"): spec.nan_replacement = subconf["replaceNanWith"] diff --git a/src/calng/redu_op.py b/src/calng/redu_op.py new file mode 100644 index 0000000000000000000000000000000000000000..31b8226120e134261347101611aff66aab7aac5d --- /dev/null +++ b/src/calng/redu_op.py @@ -0,0 +1,47 @@ +import numpy as np + + +class ReductionOperation: + def __init__(self, data, arg, mask, result_key): + self.data = data + self.arg = arg + self.mask = mask + self.result_key = result_key + + def __call__(self): + result_arr = self.reduce() + if result_arr is not None: + self.data[f"reduce.{self.result_key}"] = result_arr + + def reduce(self): + return None + + +class count(ReductionOperation): + def reduce(self): + arr = self.data[self.arg][self.mask] + return np.sum(np.isfinite(arr), axis=0, initial=0) + + +class sum(ReductionOperation): + def reduce(self): + arr = self.data[self.arg][self.mask] + return np.nansum(arr, axis=0, initial=0) + + +class sumsq(ReductionOperation): + def reduce(self): + arr = self.data[self.arg][self.mask] + return np.nansum(arr * arr, axis=0, initial=0) + + +class max(ReductionOperation): + def reduce(self): + arr = self.data[self.arg][self.mask] + return np.max(arr, axis=0, initial=-np.inf) + + +class min(ReductionOperation): + def reduce(self): + arr = self.data[self.arg][self.mask] + return np.min(arr, axis=0, initial=np.inf) diff --git a/src/calng/scenes.py b/src/calng/scenes.py index 04b7a183d011e27a99ceac4a44c7822b3a9ccf25..30b294204d502a0b5f36e8cc715bf0ce3ecfaeaa 100644 --- a/src/calng/scenes.py +++ b/src/calng/scenes.py @@ -398,7 +398,7 @@ class CompactDeviceLinkList(VerticalLayout): @titled("Assembler status", width=8 * NARROW_INC) @boxed class AssemblerDeviceStatus(VerticalLayout): - def __init__(self, device_id, geometry_device_id): + def __init__(self, device_id, geometry_device_id=None): super().__init__(padding=0) name = DisplayLabelModel( keys=[f"{device_id}.deviceId"], @@ -447,36 +447,41 @@ class AssemblerDeviceStatus(VerticalLayout): width=14 * BASE_INC, height=BASE_INC, ), - LabelModel( - text="My geometry device:", - width=14 * BASE_INC, - height=BASE_INC, - ), - # note: link based on current value when generating - DeviceSceneLinkModel( - text=geometry_device_id, - keys=[f"{geometry_device_id}.availableScenes"], - target="overview", - target_window=SceneTargetWindow.Dialog, - width=14 * BASE_INC, - height=BASE_INC, - ), - HorizontalLayout( + ] + ) + if geometry_device_id: + self.children.extend( + [ LabelModel( - text="My geometry:", - width=7 * BASE_INC, + text="My geometry device:", + width=14 * BASE_INC, height=BASE_INC, ), - DisplayStateColorModel( - show_string=True, - keys=[f"{device_id}.geometryState"], - width=7 * BASE_INC, + # note: link based on current value when generating + DeviceSceneLinkModel( + text=geometry_device_id, + keys=[f"{geometry_device_id}.availableScenes"], + target="overview", + target_window=SceneTargetWindow.Dialog, + width=14 * BASE_INC, height=BASE_INC, ), - padding=0, - ), - ] - ) + HorizontalLayout( + LabelModel( + text="My geometry:", + width=7 * BASE_INC, + height=BASE_INC, + ), + DisplayStateColorModel( + show_string=True, + keys=[f"{device_id}.geometryState"], + width=7 * BASE_INC, + height=BASE_INC, + ), + padding=0, + ), + ] + ) @titled("Device status", width=8 * NARROW_INC) @@ -592,35 +597,44 @@ class RoiBox(VerticalLayout): class PreviewSettings(HorizontalLayout): def __init__(self, device_id, schema_hash, node_name): super().__init__() - tweaks = VerticalLayout( + tweaks = [ EditableRow( device_id, schema_hash, f"{node_name}.replaceNanWith", 6, 4, - ), - ) + ) + ] + flips = [] if schema_hash.has(f"{node_name}.flipSS"): - tweaks.children.append( - HorizontalLayout( - EditableRow( - device_id, - schema_hash, - f"{node_name}.flipSS", - 3, - 2, - ), - EditableRow( - device_id, - schema_hash, - f"{node_name}.flipFS", - 3, - 2, - ), - padding=0, + tweaks.append( + EditableRow( + device_id, + schema_hash, + f"{node_name}.swapAxes", + 6, + 4, + ) + ) + flips.append( + EditableRow( + device_id, + schema_hash, + f"{node_name}.flipSS", + 3, + 2, ) ) + flips.append( + EditableRow( + device_id, + schema_hash, + f"{node_name}.flipFS", + 3, + 2, + ) + ) downsampling = VerticalLayout( LabelModel( text="Downsampling", @@ -644,7 +658,7 @@ class PreviewSettings(HorizontalLayout): ) self.children.extend( [ - tweaks, + VerticalLayout(children=tweaks + [HorizontalLayout(children=flips)]), Vline(height=3 * BASE_INC), downsampling, ] diff --git a/src/calng/utils.py b/src/calng/utils.py index c955d80b38d4b9f01cfce1c6e799fa691695512a..64da3d3dc1325bb1c531f2a9984b9ba6c73e5860 100644 --- a/src/calng/utils.py +++ b/src/calng/utils.py @@ -284,3 +284,24 @@ def maybe_get(a, out=None): else: np.copyto(out, a) return out + + +class OptionsEnum(enum.Enum): + """Subclass of enum.Enum only intended to conveniently enumerate stringly options + for Karabo descriptors. See examples like GainSetting or GainMode: the .value will + be a magic number used in CalCat, the name refers to "the actual" detector operation + mode, and format_helpful / parse_helpful_format are tricks employed such that both + are exposed to the operator. Life is full of compromise.""" + def format_helpful(self): + return f"{self.value}: {self.name}" + + @classmethod + def parse_helpful_format(cls, string): + value, separator, name = string.partition(": ") + if not value or not separator or not name: + raise ValueError(f"Cannot parse '{string}' as helpful enum format") + return cls[name] + + @classmethod + def list_options(cls): + return [member.format_helpful() for member in cls] diff --git a/tests/test_utils.py b/tests/test_utils.py index 8864e87cfddd43886d5b0fa21fa0d2e8b06a4f90..24aa5fb374213b31c070824afe544a1f54e8a628 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -1,4 +1,6 @@ import numpy as np +import pytest + from calng import utils @@ -16,3 +18,22 @@ def test_get_c_type(): assert utils.np_dtype_to_c_type(np.int16) == "short" assert utils.np_dtype_to_c_type(np.int32) == "int" assert utils.np_dtype_to_c_type(np.int64) == "long" + + +def test_options_enum(): + class SomeFunnyOptions(utils.OptionsEnum): + A = 0 + B = 10 + any_identifier_should_work = -1 + + options = SomeFunnyOptions.list_options() + assert len(options) == 3 + parsed = [SomeFunnyOptions.parse_helpful_format(opt) for opt in options] + assert list(SomeFunnyOptions) == parsed + + with pytest.raises(ValueError): + SomeFunnyOptions.parse_helpful_format("I don't belong") + + # I don't have strong feelings about KeyError vs ValueError here + with pytest.raises(KeyError): + SomeFunnyOptions.parse_helpful_format("100: C")