diff --git a/DEPENDS b/DEPENDS index d0cfeeb1e744adbe506b5f138923b48b0a0fe305..c014e21c367ad7d4c85ac5bd71f7407a8b7f217a 100644 --- a/DEPENDS +++ b/DEPENDS @@ -1,2 +1,4 @@ TrainMatcher, 2.4.3 calibrationClient, 11.3.0 +geometryDevices 0.0.1 +calngUtils 0.0.1 diff --git a/Makefile b/Makefile index 9927fdce856457b1e17948ec673630a6b4d1e915..2906fbfca206793fc531ea80ba4307fafc991ff6 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ PYPI = pip install --index-url "https://devpi.exfldadev01.desy.de/root/pypi" PROXIED = pip install --proxy "http://exflproxy01.desy.de:3128" -.PHONY: all cupy jinja2 h5py extra-geom calng +.PHONY: all cupy jinja2 h5py xarray extra-geom posixshmem calng all: calng @@ -11,10 +11,15 @@ cupy: jinja2: $(PYPI) Jinja2==3.1.2 +# version: follow minimum used by EXtra-data h5py: - $(PYPI) h5py + $(PYPI) h5py>=2.10 + +xarray: + $(PYPI) xarray extra-geom: $(PYPI) extra_geom==1.11.0 calng: cupy jinja2 h5py extra-geom + pip install --upgrade . diff --git a/setup.py b/setup.py index 5352ed13e566c92a5a0a73511471f064a19ef4be..970f30e3d104648731262b9636b12da6ab3d0f62 100644 --- a/setup.py +++ b/setup.py @@ -45,14 +45,6 @@ setup( "AgipdCondition = calng.conditions:AgipdCondition.AgipdCondition", "JungfrauCondition = calng.conditions.JungfrauCondition:JungfrauCondition", # noqa "LpdCondition = calng.conditions.LpdCondition:LpdCondition", - "Agipd1MGeometry = calng.geometries.Agipd1MGeometry:Agipd1MGeometry", - "Agipd500KGeometry = calng.geometries.Agipd500KGeometry:Agipd500KGeometry", - "Dssc1MGeometry = calng.geometries:Dssc1MGeometry.Dssc1MGeometry", - "Epix100Geometry = calng.geometries:Epix100Geometry.Epix100Geometry", - "JungfrauGeometry = calng.geometries:JungfrauGeometry.JungfrauGeometry", - "Lpd1MGeometry = calng.geometries:Lpd1MGeometry.Lpd1MGeometry", - "LpdminiGeometry = calng.geometries:LpdminiGeometry.LpdminiGeometry", - "PnccdGeometry = calng.geometries:PnccdGeometry.PnccdGeometry", "RoiTool = calng.RoiTool:RoiTool", ], "calng.correction_addon": [ diff --git a/src/calng/CalibrationManager.py b/src/calng/CalibrationManager.py index e8a5ed7f8b46005570595391f663b89828f17647..ab4eb34d8e1883a1e1987c49d706a7026d48b5c0 100644 --- a/src/calng/CalibrationManager.py +++ b/src/calng/CalibrationManager.py @@ -27,8 +27,8 @@ from karabo.middlelayer import ( get_property, getTopology, getConfiguration, getConfigurationFromPast, getConfigurationFromName) -from ._version import version as deviceVersion from . import scenes +from ._version import version as deviceVersion ''' diff --git a/src/calng/DetectorAssembler.py b/src/calng/DetectorAssembler.py index 3080a591aa6a2607fdf9075a67f21b8d2f9bf52b..1f3fb09d0b6c795f4c14903f5e5ae227c56c49a3 100644 --- a/src/calng/DetectorAssembler.py +++ b/src/calng/DetectorAssembler.py @@ -1,12 +1,16 @@ import enum -from timeit import default_timer import re +from timeit import default_timer +import numpy as np +import xarray as xr +from calngUtils import device as device_utils, trackers +from geometryDevices import utils as geom_utils from karabo.bound import ( BOOL_ELEMENT, DOUBLE_ELEMENT, - KARABO_CLASSINFO, INT32_ELEMENT, + KARABO_CLASSINFO, OUTPUT_CHANNEL, OVERWRITE_ELEMENT, STRING_ELEMENT, @@ -21,11 +25,9 @@ from karabo.bound import ( Trainstamp, Unit, ) -import numpy as np -import xarray as xr from TrainMatcher import TrainMatcher -from . import geom_utils, preview_utils, scenes, schemas, utils +from . import preview_utils, scenes, schemas from ._version import version as deviceVersion @@ -68,6 +70,7 @@ def module_index_schema(): return schema +@device_utils.with_unsafe_get @KARABO_CLASSINFO("DetectorAssembler", deviceVersion) class DetectorAssembler(TrainMatcher.TrainMatcher): @staticmethod @@ -191,8 +194,10 @@ class DetectorAssembler(TrainMatcher.TrainMatcher): def __init__(self, conf): super().__init__(conf) self.info.merge(Hash("timeOfFlight", 0, "processingTime", 0)) - self._tof_tracker = utils.ExponentialMovingAverage(alpha=0.3) - self._processing_time_tracker = utils.ExponentialMovingAverage(alpha=0.3) + self._tof_tracker = trackers.ExponentialMovingAverage(alpha=0.3) + self._processing_time_tracker = trackers.ExponentialMovingAverage( + alpha=0.3 + ) self.registerSlot(self.slotReceiveGeometry) self._need_to_update_source_index_mapping = True @@ -496,6 +501,3 @@ class DetectorAssembler(TrainMatcher.TrainMatcher): if self._need_to_update_source_index_mapping: self._merge_source_to_index_from_regex() self._set_source_to_index_from_table() - - -utils.add_unsafe_get(DetectorAssembler) diff --git a/src/calng/FrameSelectionArbiter.py b/src/calng/FrameSelectionArbiter.py index 34177312f249a9d682bcca4fbdd91f634bf69b14..21b8ddf10e7586980fac98c8c78179dbb33f3a26 100644 --- a/src/calng/FrameSelectionArbiter.py +++ b/src/calng/FrameSelectionArbiter.py @@ -1,5 +1,6 @@ from importlib.metadata import entry_points +from geometryDevices import utils as geom_utils from karabo.bound import ( KARABO_CLASSINFO, NODE_ELEMENT, @@ -13,11 +14,10 @@ from karabo.bound import ( Timestamp, Trainstamp, ) - from TrainMatcher import TrainMatcher +from . import utils from ._version import version as deviceVersion -from . import utils, geom_utils from .arbiter_kernels.base_kernel import KernelWarning my_schema = Schema() diff --git a/src/calng/Gotthard2Assembler.py b/src/calng/Gotthard2Assembler.py index 03400c7c60482449c70c4605f0af62c04c5518cc..f80436296d1c664c59036cf94ce889888ad0a103 100644 --- a/src/calng/Gotthard2Assembler.py +++ b/src/calng/Gotthard2Assembler.py @@ -1,4 +1,5 @@ import numpy as np +from calngUtils import misc as utils, shmem_utils from karabo.bound import ( KARABO_CLASSINFO, OUTPUT_CHANNEL, @@ -18,7 +19,6 @@ from karabo.bound import ( from TrainMatcher import TrainMatcher from . import scenes, schemas -from . import shmem_utils, utils from ._version import version as deviceVersion diff --git a/src/calng/LpdminiSplitter.py b/src/calng/LpdminiSplitter.py index ed4c78a6086f1370716e9a0c2f402b1f3429a2fd..5c32d679e305c8586d40e93457721789602586c5 100644 --- a/src/calng/LpdminiSplitter.py +++ b/src/calng/LpdminiSplitter.py @@ -1,6 +1,7 @@ from timeit import default_timer import numpy as np +from calngUtils import shmem_utils, timing, trackers from karabo.bound import ( DOUBLE_ELEMENT, INPUT_CHANNEL, @@ -21,7 +22,7 @@ from karabo.bound import ( ) from karabo.common.api import KARABO_SCHEMA_DISPLAY_TYPE_SCENES as DT_SCENES -from . import scenes, schemas, shmem_utils, utils +from . import schemas, scenes from .base_correction import WarningLampType from ._version import version as deviceVersion @@ -131,10 +132,10 @@ class LpdminiSplitter(PythonDevice): # performance measures and such self._last_processing_started = 0 # used for processing time and timeout self._buffered_status_update = Hash() - self._processing_time_tracker = utils.ExponentialMovingAverage(alpha=0.3) - self._rate_tracker = utils.WindowRateTracker() - self._input_delay_tracker = utils.ExponentialMovingAverage(alpha=0.3) - self._performance_measure_update_timer = utils.RepeatingTimer( + self._processing_time_tracker = trackers.ExponentialMovingAverage(alpha=0.3) + self._rate_tracker = trackers.WindowRateTracker() + self._input_delay_tracker = trackers.ExponentialMovingAverage(alpha=0.3) + self._performance_measure_update_timer = timing.RepeatingTimer( interval=1, callback=self._update_performance_measures, ) diff --git a/src/calng/RoiTool.py b/src/calng/RoiTool.py index df7ddbbdde49dc27894dca362fa900d0045067bd..c450afa418720a103da3e21d20acaf58fc2fe7b8 100644 --- a/src/calng/RoiTool.py +++ b/src/calng/RoiTool.py @@ -1,7 +1,7 @@ import threading - import numpy as np +from calngUtils import trackers from karabo.middlelayer import ( AccessLevel, AccessMode, @@ -131,7 +131,7 @@ class RoiTool(Device): self._bins = None self._means = None self._bin_lock = threading.RLock() - self._rate_tracker = utils.WindowRateTracker() + self._rate_tracker = trackers.WindowRateTracker() async def onInitialization(self): self.state = State.ON diff --git a/src/calng/ShmemToZMQ.py b/src/calng/ShmemToZMQ.py index 1e0af8e14f5b3b71f1fa29db231e95370ac714a0..cde00af65e83e2a029ebb67ef135ba0886f37d7d 100644 --- a/src/calng/ShmemToZMQ.py +++ b/src/calng/ShmemToZMQ.py @@ -3,7 +3,7 @@ from time import time from karabo.bound import KARABO_CLASSINFO from PipeToZeroMQ import PipeToZeroMQ, conversion, device_schema -from . import shmem_utils +from calngUtils import shmem_utils from ._version import version as deviceVersion diff --git a/src/calng/ShmemTrainMatcher.py b/src/calng/ShmemTrainMatcher.py index 9fcbb7fe6ba8f15d1ba3d0f85f07323942ebbf37..d99df8d0d8beb7791bb297678ce96694eeba0d6c 100644 --- a/src/calng/ShmemTrainMatcher.py +++ b/src/calng/ShmemTrainMatcher.py @@ -1,6 +1,7 @@ import concurrent.futures from timeit import default_timer +from calngUtils import shmem_utils, trackers from karabo.bound import ( BOOL_ELEMENT, DOUBLE_ELEMENT, @@ -15,7 +16,6 @@ from karabo.bound import ( ) from TrainMatcher import TrainMatcher -from . import shmem_utils, utils from .stacking_utils import StackingFriend from .frameselection_utils import FrameselectionFriend from ._version import version as deviceVersion @@ -78,7 +78,7 @@ class ShmemTrainMatcher(TrainMatcher.TrainMatcher): from PipeToZeroMQ.utils import find_infiniband_ip config["output.hostname"] = find_infiniband_ip() - self._processing_time_tracker = utils.ExponentialMovingAverage(alpha=0.3) + self._processing_time_tracker = trackers.ExponentialMovingAverage(alpha=0.3) super().__init__(config) self.info.merge(Hash("processingTime", 0)) diff --git a/src/calng/base_calcat.py b/src/calng/base_calcat.py index 79e8db7d4e13827c1751cf04e076f441af45a257..8887e972c5d8c9371b3593fc01239d3eb8ad810d 100644 --- a/src/calng/base_calcat.py +++ b/src/calng/base_calcat.py @@ -16,6 +16,7 @@ from calibration_client.modules import ( DetectorType, PhysicalDetectorUnit, ) +from calngUtils import misc as utils, timing from karabo.bound import ( DOUBLE_ELEMENT, NODE_ELEMENT, @@ -26,8 +27,6 @@ from karabo.bound import ( Unit, ) -from . import utils - class ConditionNotFound(Exception): pass @@ -571,7 +570,7 @@ class BaseCalcatFriend: self._set_status(constant, "dataFilePath", str(file_path)) self._set_status(constant, "dataSetName", resp["data"]["data_set_name"]) - with utils.Stopwatch() as stopwatch: + with timing.Stopwatch() as stopwatch: constant_data = _read_dataset_externally( file_path, resp["data"]["data_set_name"] ) @@ -601,7 +600,7 @@ class BaseCalcatFriend: self._set_status(constant, "dataFilePath", str(file_path)) self._set_status(constant, "dataSetName", resp["data"]["data_set_name"]) - with utils.Stopwatch() as stopwatch: + with timing.Stopwatch() as stopwatch: constant_data = _read_dataset_externally( file_path, resp["data"]["data_set_name"] ) @@ -618,7 +617,7 @@ class BaseCalcatFriend: return constant_data def get_constant_from_file(self, constant): - with utils.Stopwatch() as stopwatch: + with timing.Stopwatch() as stopwatch: constant_data = _read_dataset_externally( self.device.get(f"foundConstants.{constant.name}.dataFilePath"), self.device.get(f"foundConstants.{constant.name}.dataSetName"), diff --git a/src/calng/base_condition.py b/src/calng/base_condition.py index 57e42b4d69bb095f40af17781ae1ec409a7d2607..11de77ff408ccbe3806b6a0b23d92236d3726413 100644 --- a/src/calng/base_condition.py +++ b/src/calng/base_condition.py @@ -24,8 +24,8 @@ from karabo.middlelayer import ( slot, waitUntilNew, ) -from ._version import version as deviceVersion from . import scenes +from ._version import version as deviceVersion class PipelineOperationMode(enum.Enum): diff --git a/src/calng/base_correction.py b/src/calng/base_correction.py index 7394e78c7bada37c680cbfdadc9a92142752cb0e..038e60eb823eb972542124aafd3473d0ca9b094f 100644 --- a/src/calng/base_correction.py +++ b/src/calng/base_correction.py @@ -2,14 +2,16 @@ import concurrent.futures import enum import functools import gc -from importlib.metadata import entry_points import math import pathlib import threading +from importlib.metadata import entry_points from timeit import default_timer import dateutil.parser import numpy as np +from geometryDevices import utils as geom_utils +from calngUtils import device as device_utils, misc, shmem_utils, timing, trackers from karabo.bound import ( BOOL_ELEMENT, DOUBLE_ELEMENT, @@ -36,7 +38,7 @@ from karabo.bound import ( ) from karabo.common.api import KARABO_SCHEMA_DISPLAY_TYPE_SCENES as DT_SCENES -from . import geom_utils, preview_utils, scenes, schemas, shmem_utils, utils +from . import preview_utils, schemas, scenes, utils from ._version import version as deviceVersion PROCESSING_STATE_TIMEOUT = 10 @@ -63,6 +65,7 @@ class WarningLampType(enum.Enum): TIMESERVER_CONNECTION = enum.auto() +@device_utils.with_unsafe_get @KARABO_CLASSINFO("BaseCorrection", deviceVersion) class BaseCorrection(PythonDevice): _available_addons = [] # classes, filled by add_addon_nodes using entry_points @@ -743,14 +746,18 @@ class BaseCorrection(PythonDevice): self._update_frame_filter() self._buffered_status_update = Hash() - self._processing_time_tracker = utils.ExponentialMovingAverage(alpha=0.3) - self._rate_tracker = utils.WindowRateTracker() - self._input_delay_tracker = utils.ExponentialMovingAverage(alpha=0.3) - self._rate_update_timer = utils.RepeatingTimer( + self._processing_time_tracker = trackers.ExponentialMovingAverage( + alpha=0.3 + ) + self._rate_tracker = trackers.WindowRateTracker() + self._input_delay_tracker = trackers.ExponentialMovingAverage( + alpha=0.3 + ) + self._rate_update_timer = timing.RepeatingTimer( interval=1, callback=self._update_rate_and_state, ) - self._train_ratio_tracker = utils.TrainRatioTracker() + self._train_ratio_tracker = trackers.TrainRatioTracker() self.KARABO_ON_INPUT("dataInput", self.input_handler) self.KARABO_ON_EOS("dataInput", self.handle_eos) @@ -804,7 +811,7 @@ class BaseCorrection(PythonDevice): # update device based on changes if config.has("frameFilter"): self._frame_filter = _parse_frame_filter( - utils.ChainHash(config, self._parameters) + misc.ChainHash(config, self._parameters) ) self._prereconfigure_update_hash = config @@ -1216,7 +1223,7 @@ class BaseCorrection(PythonDevice): if my_train_id != 0 else self._train_ratio_tracker.get(), ) - except utils.NonMonotonicTrainIdWarning as ex: + except trackers.NonMonotonicTrainIdWarning as ex: warn( f"Train ratio tracker noticed issue with train ID: {ex}\n" f"For the record, I think now is: {my_train_id}" @@ -1309,9 +1316,6 @@ class BaseCorrection(PythonDevice): self.signalEndOfStream("dataOutput") -utils.add_unsafe_get(BaseCorrection) - - def add_correction_step_schema(schema, field_flag_constants_mapping): """Using the fields in the provided mapping, will add nodes to schema diff --git a/src/calng/base_geometry.py b/src/calng/base_geometry.py deleted file mode 100644 index 01ae9b2c9064bd0b9e0d3e18ea0c368c8a555d9f..0000000000000000000000000000000000000000 --- a/src/calng/base_geometry.py +++ /dev/null @@ -1,699 +0,0 @@ -import contextlib -import functools -import logging - -import matplotlib.pyplot as plt -import numpy as np -from karabo.middlelayer import ( - AccessLevel, - AccessMode, - Assignment, - Bool, - Configurable, - DaqPolicy, - Device, - Double, - EncodingType, - Hash, - Image, - ImageData, - Int32, - Node, - Signal, - Slot, - State, - String, - Unit, - VectorHash, - VectorString, - sleep, - slot, -) -from matplotlib.backends.backend_agg import FigureCanvasAgg - -from ._version import version as deviceVersion -from . import geom_utils, scenes - - -def make_x_y_coordinate_node( - default_x, default_y, x_args=None, y_args=None, node_args=None -): - class XYCoordinate(Configurable): - x = Double( - defaultValue=default_x, - accessMode=AccessMode.RECONFIGURABLE, - assignment=Assignment.OPTIONAL, - **({} if x_args is None else x_args), - ) - y = Double( - defaultValue=default_y, - accessMode=AccessMode.RECONFIGURABLE, - assignment=Assignment.OPTIONAL, - **({} if y_args is None else y_args), - ) - - return Node(XYCoordinate, **({} if node_args is None else node_args)) - - -def make_x_y_z_coordinate_node( - default_x, - default_y, - default_z, - x_args=None, - y_args=None, - z_args=None, - node_args=None, -): - class XYZCoordinate(Configurable): - x = Double( - defaultValue=default_x, - accessMode=AccessMode.RECONFIGURABLE, - assignment=Assignment.OPTIONAL, - **({} if x_args is None else x_args), - ) - y = Double( - defaultValue=default_y, - accessMode=AccessMode.RECONFIGURABLE, - assignment=Assignment.OPTIONAL, - **({} if y_args is None else y_args), - ) - z = Double( - defaultValue=default_z, - accessMode=AccessMode.RECONFIGURABLE, - assignment=Assignment.OPTIONAL, - **({} if z_args is None else z_args), - ) - - return Node(XYZCoordinate, **({} if node_args is None else node_args)) - - -make_x_y_offset_node = functools.partial( - make_x_y_coordinate_node, - 0, - 0, - x_args={"unitSymbol": Unit.METER}, - y_args={"unitSymbol": Unit.METER}, - node_args={ - "displayedName": "Offset", - "description": "See EXtra-geom documentation for details. This offset is " - "applied to entire detector after initial geometry is created from manual " - "parameters. Example: To move entire geometry up by 2 mm relative to beam, " - "set offset.y to 2e-3.", - }, -) - - -def make_x_y_z_relative_offset_node(title): - return make_x_y_z_coordinate_node( - 0, - 0, - 0, - x_args={"unitSymbol": Unit.METER}, - y_args={"unitSymbol": Unit.METER}, - z_args={"unitSymbol": Unit.METER}, - node_args={ - "displayedName": title, - "description": "See EXtra-geom documentation for details. This is used to " - "set the relative offset for the two halves of a pnCCD", - }, - ) - - -# TODO: consider other history models (could be fun) -class TweakGeometryNode(Configurable): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self._undo_stack = [] - self._redo_stack = [] - self._device = self.get_root() - - def _reset(self): - # clear history when new geometry is set from manual / file - self._undo_stack.clear() - self._redo_stack.clear() - self.undoLength = 0 - self.redoLength = 0 - - @Slot(displayedName="Undo") - async def undo(self): - assert len(self._undo_stack) > 0 - self._redo_stack.append(self._device.geometry) - await self._device._set_geometry(self._undo_stack.pop()) - self.undoLength = len(self._undo_stack) - self.redoLength = len(self._redo_stack) - - @Slot(displayedName="Redo") - async def redo(self): - assert len(self._redo_stack) > 0 - self._undo_stack.append(self._device.geometry) - await self._device._set_geometry(self._redo_stack.pop()) - self.undoLength = len(self._undo_stack) - self.redoLength = len(self._redo_stack) - - offset = make_x_y_offset_node() - - @Slot(displayedName="Add offset") - async def add(self): - current_geometry = self._device.geometry - new_geometry = current_geometry.offset( - (self.offset.x.value, self.offset.y.value) - ) - self._undo_stack.append(current_geometry) - self.undoLength = len(self._undo_stack) - await self._device._set_geometry(new_geometry) - if self._redo_stack: - self._redo_stack.clear() - self.redoLength = 0 - - undoLength = Int32( - displayedName="Undo length", - defaultValue=0, - accessMode=AccessMode.READONLY, - ) - - redoLength = Int32( - displayedName="Redo length", - defaultValue=0, - accessMode=AccessMode.READONLY, - ) - - -class BaseManualGeometryConfigNode(Configurable): - offset = make_x_y_offset_node() - - @Slot( - displayedName="Set manual geometry", - allowedStates=[State.ACTIVE], - ) - async def setManual(self): - await self.get_root()._set_from_manual_config() - - -class GeometryFileNode(Configurable): - filePath = String( - defaultValue="", - displayedName="File path", - description="Full path (including filename and suffix) to the desired geometry " - "file. Keep in mind that the default directory is $KARABO/var/data on device " - "server node, so it's probably wise to give absolute path.", - assignment=Assignment.OPTIONAL, - accessMode=AccessMode.RECONFIGURABLE, - ) - fileType = String( - displayedName="File type", - defaultValue="crystfel", - description="What kind of file will be loaded. Corresponds to options within " - "EXtra-geom. Note that the options listed here may not all apply to all " - "geometries. I think 'crystfel' (uses extra_geom.[geometry type]" - ".from_crystfel_geom is the most 'universal', so I left that as default.", - options=["crystfel", "h5", "h5+quadrants"], - assignment=Assignment.OPTIONAL, - accessMode=AccessMode.RECONFIGURABLE, - ) - offset = make_x_y_offset_node() - updateManualOnLoad = Bool( - defaultValue=True, - displayedName="Update manual settings", - description="If this flag is on, the manual settings on this device will be " - "updated according to the loaded geometry file. This is useful when you want " - "to load a file and then tweak the geometry a bit. This will zero current " - "offset.", - assignment=Assignment.OPTIONAL, - accessMode=AccessMode.RECONFIGURABLE, - ) - - @Slot( - displayedName="Load from file", - allowedStates=[State.ACTIVE], - ) - async def loadFromFile(self): - await self.get_root()._load_from_file() - - -class ManualGeometryBase(Device): - __version__ = deviceVersion - geometry_class = None # subclass must set - # subclass must add slot setManual - - availableScenes = VectorString( - displayedName="Available scenes", - displayType="Scenes", - requiredAccessLevel=AccessLevel.OBSERVER, - accessMode=AccessMode.READONLY, - defaultValue=[ - "overview", - ], - daqPolicy=DaqPolicy.OMIT, - ) - - signalNewGeometry = Signal(String(), String()) - - geometryPreview = Image( - ImageData(np.empty(shape=(0, 0, 0), dtype=np.uint32)), - displayedName="Geometry preview", - encoding=EncodingType.RGBA, - daqPolicy=DaqPolicy.OMIT, - ) - - geometryFile = Node( - GeometryFileNode, - displayedName="Geometry file", - description="Allows loading geometry from CrystFEL geometry file", - ) - - tweakGeometry = Node( - TweakGeometryNode, - displayedName="Tweak geometry", - ) - - @Slot( - displayedName="Send geometry", - allowedStates=[State.ACTIVE], - description="Will send 'signalNewGeometry' to connected slots. These will for " - "example be DetectorAssembler. Note that signal is sent automatically when new " - "geometry is set - this slot is mostly to be called by manager after " - "(re)starting assemblers while geometry device is still up.", - ) - async def sendGeometry(self): - self.signalNewGeometry( - self.deviceId, geom_utils.serialize_geometry(self.geometry) - ) - - @Slot( - displayedName="Update preview", - allowedStates=[State.ACTIVE], - ) - async def updatePreview(self): - axis = self.geometry.inspect() - axis.figure.tight_layout(pad=0) - axis.figure.set_facecolor("none") - # axis.figure.set_size_inches(6, 6) - # axis.figure.set_dpi(300) - canvas = FigureCanvasAgg(axis.figure) - canvas.draw() - image_buffer = np.frombuffer(canvas.buffer_rgba(), dtype=np.uint8).reshape( - canvas.get_width_height()[::-1] + (4,) - ) - self.geometryPreview = ImageData( - image_buffer, encoding=EncodingType.RGBA, bitsPerPixel=3 * 8 - ) - self._set_status("Preview updated") - - async def _load_from_file(self): - with self.push_state(State.CHANGING): - geometry = None - - file_path = self.geometryFile.filePath.value - self._set_status(f"Loading geometry from {file_path}...") - file_type = self.geometryFile.fileType.value - - try: - if file_type == "crystfel": - geometry = self.geometry_class.from_crystfel_geom(file_path) - elif file_type == "h5": - geometry = self.geometry_class.from_h5_file(file_path) - elif file_type == "h5+quadrants": - geometry = self.geometry_class.from_h5_file_and_quad_positions( - file_path, - [(Q.x.value, Q.y.value) for Q in self._quadrant_corners], - ) - else: - raise ValueError(f"Invalid file type {file_type}") - except FileNotFoundError: - self._set_status("Geometry file not found", level=logging.WARN) - except RuntimeError as e: - self._set_status( - f"Failed to load geometry file: {e}", level=logging.WARN - ) - except Exception as e: - self._set_status( - f"Misc. exception when loading geometry file: {e}", - level=logging.WARN, - ) - else: - geometry = geometry.offset( - (self.geometryFile.offset.x.value, self.geometryFile.offset.y.value) - ) - await self._set_geometry(geometry) - self._set_status("Successfully loaded geometry from file") - if self.geometryFile.updateManualOnLoad.value: - self._set_status( - "Updating manual settings on device to reflect loaded geometry" - ) - self._update_manual_from_current() - self.tweakGeometry._reset() - return True - - return False - - def _update_manual_from_current(self): - # subclass should implement (neat when loading from CrystFEL geom) - raise NotImplementedError() - - async def _set_geometry(self, geometry, update_preview=True): - self.geometry = geometry - await self.sendGeometry() - if update_preview: - await self.updatePreview() - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - plt.switch_backend("agg") # plotting backend which works for preview hack - - async def onInitialization(self): - self.state = State.INIT - self.log.INFO("Waiting a second to let slots connect to signal") - await sleep(1) - if self.geometryFile.filePath.value and await self._load_from_file(): - ... - else: - await self._set_from_manual_config() - self.state = State.ACTIVE - - @contextlib.contextmanager - def push_state(self, state): - previous_state = self.state - self.state = state - try: - yield - finally: - self.state = previous_state - - def _set_status(self, text, level=logging.INFO): - """Add and log a status message. - - Suppresses throttling from the gui server. - """ - - self.status = text - self.logger.log(level, text) - - -def make_origin_node(default_x, default_y): - class OriginNode(BaseManualGeometryConfigNode): - x = Double( - defaultValue=default_x, - accessMode=AccessMode.RECONFIGURABLE, - assignment=Assignment.OPTIONAL, - ) - y = Double( - defaultValue=default_y, - accessMode=AccessMode.RECONFIGURABLE, - assignment=Assignment.OPTIONAL, - ) - - return Node(OriginNode) - - -class ManualOriginGeometryBase(ManualGeometryBase): - origin = None # subclass must define - - async def _set_from_manual_config(self): - self._set_status("Updating geometry from manual configuration") - geometry = self.geometry_class.from_origin( - (self.origin.x.value, self.origin.y.value) - ) - await self._set_geometry(geometry) - # TODO: allow offset - - @slot - def requestScene(self, params): - return Hash( - "type", - "deviceScene", - "origin", - self.deviceId, - "payload", - Hash( - "success", - True, - "name", - "overview", - "data", - scenes.origin_geometry_overview( - self.deviceId, - self.getDeviceSchema(), - ), - ), - ) - - async def _update_manual_from_current(self): - raise NotImplementedError() - - -class RelativePositionNode(BaseManualGeometryConfigNode): - gap = Double( - defaultValue=0.004, - accessMode=AccessMode.RECONFIGURABLE, - assignment=Assignment.OPTIONAL, - unitSymbol=Unit.METER, - displayedName="Gap", - description="Gap between modules. 4mm (~50) by default.", - ) - topOffset = make_x_y_z_relative_offset_node("Top offset") - bottomOffset = make_x_y_z_relative_offset_node("Bottom offset") - - -class ManualRelativePositionsGeometryBase(ManualGeometryBase): - """Only used for pnCCD for now: specify geometry primarily via the gap between the - two detector modules. Optionally tweak by offsetting top and / or bottom half. If - this is useful for other detectors at some point, should refactor to set individual - defaults.""" - - manualSetting = Node(RelativePositionNode) - - async def _set_from_manual_config(self): - self._set_status("Updating geometry from manual configuration") - geometry = self.geometry_class.from_relative_positions( - gap=self.manualSetting.gap.value, - top_offset=( - self.manualSetting.topOffset.x.value, - self.manualSetting.topOffset.y.value, - self.manualSetting.topOffset.z.value, - ), - bottom_offset=( - self.manualSetting.bottomOffset.x.value, - self.manualSetting.bottomOffset.y.value, - self.manualSetting.bottomOffset.z.value, - ), - ) - await self._set_geometry(geometry) - - @slot - def requestScene(self, params): - return Hash( - "type", - "deviceScene", - "origin", - self.deviceId, - "payload", - Hash( - "success", - True, - "name", - "overview", - "data", - scenes.relative_geometry_overview( - self.deviceId, - self.getDeviceSchema(), - ), - ), - ) - - async def _update_manual_from_current(self): - raise NotImplementedError() - - -def make_quadrant_corners_node(default_values): - assert len(default_values) == 4 - assert all(len(x) == 2 for x in default_values) - - class QuadrantCornersNode(BaseManualGeometryConfigNode): - Q1 = make_x_y_coordinate_node(*default_values[0]) - Q2 = make_x_y_coordinate_node(*default_values[1]) - Q3 = make_x_y_coordinate_node(*default_values[2]) - Q4 = make_x_y_coordinate_node(*default_values[3]) - - return Node(QuadrantCornersNode) - - -class ManualQuadrantsGeometryBase(ManualGeometryBase): - quadrantCorners = None # subclass must define (with nice defaults) - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self._quadrant_corners = [ - self.quadrantCorners.Q1, - self.quadrantCorners.Q2, - self.quadrantCorners.Q3, - self.quadrantCorners.Q4, - ] - - @slot - def requestScene(self, params): - return Hash( - "type", - "deviceScene", - "origin", - self.deviceId, - "payload", - Hash( - "success", - True, - "name", - "overview", - "data", - scenes.quadrant_geometry_overview( - self.deviceId, - self.getDeviceSchema(), - ), - ), - ) - - async def _set_from_manual_config(self): - self._set_status("Updating geometry from manual configuration") - with self.push_state(State.CHANGING): - geometry = self.geometry_class.from_quad_positions( - [(Q.x.value, Q.y.value) for Q in self._quadrant_corners] - ).offset( - ( - self.quadrantCorners.offset.x.value, - self.quadrantCorners.offset.y.value, - ) - ) - await self._set_geometry(geometry) - self.tweakGeometry._reset() - - def _update_manual_from_current(self): - # TODO: consider what to do about offset - for corner, (x, y) in zip( - self._quadrant_corners, self.geometry.quad_positions() - ): - corner.x = x - corner.y = y - self.quadrantCorners.offset.x = 0 - self.quadrantCorners.offset.y = 0 - - -class OrientableModuleListItem(Configurable): - posX = Double( - assignment=Assignment.OPTIONAL, - defaultValue=0, - ) - posY = Double( - assignment=Assignment.OPTIONAL, - defaultValue=0, - ) - orientX = Int32(assignment=Assignment.OPTIONAL, defaultValue=1) - orientY = Int32(assignment=Assignment.OPTIONAL, defaultValue=1) - - -class RotatableModuleListItem(Configurable): - posX = Double( - assignment=Assignment.OPTIONAL, - defaultValue=0, - ) - posY = Double( - assignment=Assignment.OPTIONAL, - defaultValue=0, - ) - rotate = Int32(assignment=Assignment.OPTIONAL, defaultValue=1) - - -def make_manual_orientable_module_list_node(defaults): - class ManualOrientableModuleListNode(BaseManualGeometryConfigNode): - modules = VectorHash( - displayedName="Modules", - rows=OrientableModuleListItem, - defaultValue=defaults, - accessMode=AccessMode.RECONFIGURABLE, - assignment=Assignment.OPTIONAL, - ) - - return Node(ManualOrientableModuleListNode) - - -class ManualOrientableModuleListGeometryBase(ManualGeometryBase): - moduleList = None # subclass must define (with nice defaults) - - @slot - def requestScene(self, params): - return Hash( - "type", - "deviceScene", - "origin", - self.deviceId, - "payload", - Hash( - "success", - True, - "name", - "overview", - "data", - scenes.modules_geometry_overview( - self.deviceId, - self.getDeviceSchema(), - ), - ), - ) - - async def _set_from_manual_config(self): - self._set_status("Updating geometry from manual configuration") - with self.push_state(State.CHANGING): - geometry = self.geometry_class.from_module_positions( - [(x, y) for (x, y, _, _) in self.moduleList.modules.value], - [ - (orient_x, orient_y) - for (_, _, orient_x, orient_y) in self.moduleList.modules.value - ], - ).offset((self.moduleList.offset.x.value, self.moduleList.offset.y.value)) - await self._set_geometry(geometry) - self.tweakGeometry._reset() - - -def make_manual_rotatable_module_list_node(defaults): - class ManualRotatableModuleListNode(BaseManualGeometryConfigNode): - modules = VectorHash( - displayedName="Modules", - rows=RotatableModuleListItem, - defaultValue=defaults, - accessMode=AccessMode.RECONFIGURABLE, - assignment=Assignment.OPTIONAL, - ) - - return Node(ManualRotatableModuleListNode) - - -class ManualRotatableModuleListGeometryBase(ManualGeometryBase): - moduleList = None # subclass must define (with nice defaults) - - @slot - def requestScene(self, params): - return Hash( - "type", - "deviceScene", - "origin", - self.deviceId, - "payload", - Hash( - "success", - True, - "name", - "overview", - "data", - scenes.modules_geometry_overview( - self.deviceId, - self.getDeviceSchema(), - ), - ), - ) - - async def _set_from_manual_config(self): - self._set_status("Updating geometry from manual configuration") - with self.push_state(State.CHANGING): - geometry = self.geometry_class.from_module_positions( - [(x, y) for (x, y, _) in self.moduleList.modules.value], - [rotation for (_, _, rotation) in self.moduleList.modules.value], - ).offset((self.moduleList.offset.x.value, self.moduleList.offset.y.value)) - await self._set_geometry(geometry) - self.tweakGeometry._reset() diff --git a/src/calng/base_kernel_runner.py b/src/calng/base_kernel_runner.py index e44c65d14be2f581a0f8df0f2299efd95df28a93..5eaaf10155d23e50b73044da62cad62a335f97f6 100644 --- a/src/calng/base_kernel_runner.py +++ b/src/calng/base_kernel_runner.py @@ -3,11 +3,10 @@ import functools import operator import pathlib +from calngUtils import misc as utils import jinja2 import numpy as np -from . import utils - class BaseKernelRunner: _gpu_based = True diff --git a/src/calng/geom_utils.py b/src/calng/geom_utils.py deleted file mode 100644 index de8181380195a8df78e02d3696d17f4c131943bb..0000000000000000000000000000000000000000 --- a/src/calng/geom_utils.py +++ /dev/null @@ -1,11 +0,0 @@ -import base64 -import gzip -import pickle - - -def serialize_geometry(geom: "extra_geom.base.DetectorGeometryBase") -> str: - return base64.b64encode(gzip.compress(pickle.dumps(geom))).decode(encoding="ASCII") - - -def deserialize_geometry(serialized: str) -> "extra_geom.base.DetectorGeometryBase": - return pickle.loads(gzip.decompress(base64.b64decode(serialized.encode()))) diff --git a/src/calng/geometries/Agipd1MGeometry.py b/src/calng/geometries/Agipd1MGeometry.py deleted file mode 100644 index dd800ad7195dec64388906a6c3e61de7e1b8a821..0000000000000000000000000000000000000000 --- a/src/calng/geometries/Agipd1MGeometry.py +++ /dev/null @@ -1,15 +0,0 @@ -import extra_geom - -from ..base_geometry import ManualQuadrantsGeometryBase, make_quadrant_corners_node - - -class Agipd1MGeometry(ManualQuadrantsGeometryBase): - geometry_class = extra_geom.AGIPD_1MGeometry - quadrantCorners = make_quadrant_corners_node( - [ - (-525, 625), - (-550, -10), - (520, -160), - (542.5, 475), - ] - ) diff --git a/src/calng/geometries/Agipd500KGeometry.py b/src/calng/geometries/Agipd500KGeometry.py deleted file mode 100644 index a2e9580050dc4e80d97d892018561fd9aa3d4242..0000000000000000000000000000000000000000 --- a/src/calng/geometries/Agipd500KGeometry.py +++ /dev/null @@ -1,8 +0,0 @@ -import extra_geom - -from ..base_geometry import ManualOriginGeometryBase, make_origin_node - - -class Agipd500KGeometry(ManualOriginGeometryBase): - geometry_class = extra_geom.AGIPD_500K2GGeometry - origin = make_origin_node(0, 0) diff --git a/src/calng/geometries/Dssc1MGeometry.py b/src/calng/geometries/Dssc1MGeometry.py deleted file mode 100644 index 55548120ac07a68d54c88c5811d7eb2c22f6d116..0000000000000000000000000000000000000000 --- a/src/calng/geometries/Dssc1MGeometry.py +++ /dev/null @@ -1,15 +0,0 @@ -import extra_geom - -from ..base_geometry import ManualQuadrantsGeometryBase, make_quadrant_corners_node - - -class Dssc1MGeometry(ManualQuadrantsGeometryBase): - geometry_class = extra_geom.DSSC_1MGeometry - quadrantCorners = make_quadrant_corners_node( - [ - (-130, 5), - (-130, -125), - (5, -125), - (5, 5), - ] - ) diff --git a/src/calng/geometries/Epix100Geometry.py b/src/calng/geometries/Epix100Geometry.py deleted file mode 100644 index 20729ea3cd60a1009041b0cf831b36997126668f..0000000000000000000000000000000000000000 --- a/src/calng/geometries/Epix100Geometry.py +++ /dev/null @@ -1,8 +0,0 @@ -import extra_geom - -from ..base_geometry import ManualOriginGeometryBase, make_origin_node - - -class Epix100Geometry(ManualOriginGeometryBase): - geometry_class = extra_geom.Epix100Geometry - origin = make_origin_node(0, 0) diff --git a/src/calng/geometries/JungfrauGeometry.py b/src/calng/geometries/JungfrauGeometry.py deleted file mode 100644 index e0c0703cb394a9b3f51fa2f163a62cc119c6a15a..0000000000000000000000000000000000000000 --- a/src/calng/geometries/JungfrauGeometry.py +++ /dev/null @@ -1,65 +0,0 @@ -import extra_geom -import numpy as np -from karabo.middlelayer import Hash - -from ..base_geometry import ( - ManualOrientableModuleListGeometryBase, - make_manual_orientable_module_list_node, -) - - -class JungfrauGeometry(ManualOrientableModuleListGeometryBase): - geometry_class = extra_geom.JUNGFRAUGeometry - moduleList = make_manual_orientable_module_list_node( - [ - Hash("posX", x, "posY", y, "orientX", ox, "orientY", oy) - for (x, y, ox, oy) in [ - (95, 564, -1, -1), - (95, 17, -1, -1), - (95, -530, -1, -1), - (95, -1077, -1, -1), - (-1125, -1078, 1, 1), - (-1125, -531, 1, 1), - (-1125, 16, 1, 1), - (-1125, 563, 1, 1), - ] - ] - ) - - def _update_manual_from_current(self): - self.moduleList.modules = [ - self._guess_offset_and_orientation(module) - for module in self.geometry.modules - ] - - def _guess_offset_and_orientation(self, module, asic_gap=2): - # note: if this generalizes, make classmethod of ManualModuleListGeometryBase - # Working backwards from from_module_positions from detectors.py in extra_geom - corners = np.asarray([tile.corner_pos for tile in module]) - row_1 = corners[0:4] - row_2 = corners[4:8] - horizontal_diff = np.diff([row_1[:, 0], row_2[:, 0]]) - vertical_diff = np.diff([row_1[:, 1], row_2[:, 1]], axis=0) - - if np.all(horizontal_diff > 0): - x_orientation = 1 - else: - # TODO: maybe warn if also not all < 0 - x_orientation = -1 - - if np.all(vertical_diff > 0): - y_orientation = 1 - else: - y_orientation = -1 - - corner = module[0].corner_pos - module_width = (4 * self.geometry_class.frag_fs_pixels) + (3 * asic_gap) - module_height = (2 * self.geometry_class.frag_ss_pixels) + asic_gap - corner_x = corner[0] / self.geometry_class.pixel_size - ( - module_width if x_orientation == -1 else 0 - ) - corner_y = corner[1] / self.geometry_class.pixel_size - ( - module_height if y_orientation == -1 else 0 - ) - - return (corner_x, corner_y, x_orientation, y_orientation) diff --git a/src/calng/geometries/Lpd1MGeometry.py b/src/calng/geometries/Lpd1MGeometry.py deleted file mode 100644 index 39a5d043d800bed87c67cb8a1c38d4a1fa014606..0000000000000000000000000000000000000000 --- a/src/calng/geometries/Lpd1MGeometry.py +++ /dev/null @@ -1,15 +0,0 @@ -import extra_geom - -from ..base_geometry import ManualQuadrantsGeometryBase, make_quadrant_corners_node - - -class Lpd1MGeometry(ManualQuadrantsGeometryBase): - geometry_class = extra_geom.LPD_1MGeometry - quadrantCorners = make_quadrant_corners_node( - [ - (11.4, 299), - (-11.5, 8), - (254.5, -16), - (278.5, 275), - ] - ) diff --git a/src/calng/geometries/LpdminiGeometry.py b/src/calng/geometries/LpdminiGeometry.py deleted file mode 100644 index 31846b953ccc949564686e221b631bf0bd8c866e..0000000000000000000000000000000000000000 --- a/src/calng/geometries/LpdminiGeometry.py +++ /dev/null @@ -1,24 +0,0 @@ -import extra_geom -from karabo.middlelayer import Hash - -from ..base_geometry import ( - ManualRotatableModuleListGeometryBase, - make_manual_rotatable_module_list_node, -) - - -class LpdminiGeometry(ManualRotatableModuleListGeometryBase): - geometry_class = extra_geom.LPD_MiniGeometry - - moduleList = make_manual_rotatable_module_list_node( - [ - Hash("posX", x, "posY", y, "rotate", r) - for (x, y, r) in [ - (0, 0, 0), - # TODO: appropriate defaults - ] - ] - ) - - def _update_manual_from_current(self): - raise NotImplementedError() diff --git a/src/calng/geometries/PnccdGeometry.py b/src/calng/geometries/PnccdGeometry.py deleted file mode 100644 index 04c438dd3950782baae2a2b894c1a7dacf5b3525..0000000000000000000000000000000000000000 --- a/src/calng/geometries/PnccdGeometry.py +++ /dev/null @@ -1,23 +0,0 @@ -import extra_geom - -from ..base_geometry import ManualRelativePositionsGeometryBase - - -class FixedUpPnccdGeometry(extra_geom.PNCCDGeometry): - # DetectorAssembler neeeds expected_data_shape to match actual data shape - expected_data_shape = (1, 1024, 1024) - - # and these two methods need tweaking to avoid that breaking assertions - def _ensure_shape(self, data): - return data.reshape(self.expected_data_shape) - - def split_tiles(self, module_data): - return [module_data[:512], module_data[512:]] - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.modules = [[self.modules[0][0], self.modules[1][0]]] - - -class PnccdGeometry(ManualRelativePositionsGeometryBase): - geometry_class = FixedUpPnccdGeometry diff --git a/src/calng/geometries/__init__.py b/src/calng/geometries/__init__.py deleted file mode 100644 index a2864497a2d9ee83b1f5b64536b2f89f9d8b814c..0000000000000000000000000000000000000000 --- a/src/calng/geometries/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -# flake8: noqa: F401 -from . import ( - Agipd1MGeometry, - Dssc1MGeometry, - Epix100Geometry, - Lpd1MGeometry, - LpdminiGeometry, - JungfrauGeometry, - Lpd1MGeometry, - PnccdGeometry, -) diff --git a/src/calng/scenes.py b/src/calng/scenes.py index 417e72e04da6cc47e2be2293e86b73665bd52dab..8c272ac476933c754144f5ce2e29aafb6db1e619 100644 --- a/src/calng/scenes.py +++ b/src/calng/scenes.py @@ -1,436 +1,38 @@ -import enum - -import karabo.native -import karathon +import natsort +from calngUtils.scene_utils import ( + BASE_INC, + NARROW_INC, + DisplayRoundedFloat, + DisplayRow, + EditableRow, + HorizontalLayout, + Space, + VerticalLayout, + Vline, + boxed, + recursive_editable, + scene_generator, + schema_to_hash, + titled, +) from karabo.common.scenemodel.api import ( - CheckBoxModel, - ComboBoxModel, - DeviceSceneLinkModel, DetectorGraphModel, + DeviceSceneLinkModel, DisplayCommandModel, DisplayLabelModel, - DisplayListModel, DisplayStateColorModel, DisplayTextLogModel, - DoubleLineEditModel, - EvaluatorModel, - IntLineEditModel, LabelModel, LampModel, LineEditModel, - LineModel, NDArrayGraphModel, - RectangleModel, - SceneModel, SceneTargetWindow, TableElementModel, TrendGraphModel, UnknownWidgetDataModel, VectorXYGraphModel, - WebCamGraphModel, WebLinkModel, - write_scene, ) -import natsort - - -# section: common setup - - -BASE_INC = 25 -NARROW_INC = 20 -PADDING = 5 - - -def DisplayRoundedFloat(*args, decimals=2, **kwargs): - # note: naive subclass breaks as registry looks for writer based on exact class - return EvaluatorModel(*args, expression=f"f'{{x:.{decimals}f}}'", **kwargs) - - -_type_to_display_model = { - "BOOL": CheckBoxModel, - "DOUBLE": DisplayRoundedFloat, - "FLOAT": DisplayRoundedFloat, - "STRING": DisplayLabelModel, - "UINT32": DisplayLabelModel, - "VECTOR_UINT32": DisplayListModel, -} -_type_to_line_editable = { - "BOOL": (CheckBoxModel, {"klass": "EditableCheckBox"}), - "DOUBLE": (DoubleLineEditModel, {}), - "FLOAT": (DoubleLineEditModel, {}), - "INT32": (IntLineEditModel, {}), - "UINT32": (IntLineEditModel, {}), - "INT64": (IntLineEditModel, {}), - "UINT64": (IntLineEditModel, {}), - "STRING": (LineEditModel, {"klass": "EditableLineEdit"}), -} - - -def safe_render(obj, x, y): - if hasattr(obj, "render"): - return obj.render(x, y) - else: - obj.x = x - obj.y = y - return [obj] - - -class Align(enum.Enum): - CENTER = enum.auto() - TOP = enum.auto() - BOTTOM = enum.auto() - LEFT = enum.auto() - RIGHT = enum.auto() - - -# section: nice component decorators - - -def titled(title, width=8 * NARROW_INC): - def actual_decorator(component_class): - class new_class(component_class): - def render(self, x, y, *args, **kwargs): - return [ - LabelModel( - frame_width=1, - text=title, - width=width, - height=NARROW_INC, - x=x, - y=y, - ) - ] + component_class.render(self, x, y + NARROW_INC, *args, **kwargs) - - @property - def width(self): - return max(component_class.width.fget(self), width) - - @property - def height(self): - return component_class.height.fget(self) + NARROW_INC - - return new_class - - return actual_decorator - - -def boxed(component_class): - class new_class(component_class): - def render(self, x, y, *args, **kwargs): - return [ - RectangleModel( - x=x, - y=y, - width=component_class.width.fget(self) + 2 * PADDING, - height=component_class.height.fget(self) + 2 * PADDING, - stroke="#000000", - ) - ] + component_class.render(self, x + PADDING, y + PADDING, *args, **kwargs) - - @property - def width(self): - return component_class.width.fget(self) + 2 * PADDING - - @property - def height(self): - return component_class.height.fget(self) + 2 * PADDING - - return new_class - - -# section: useful layout and utility classes - - -class Space: - def __init__(self, width, height): - self.width = width - self.height = height - - def render(self, x, y): - return [] - - -class Hline: - def __init__(self, width): - self.width = width - self.height = 0 - - def render(self, x, y): - return [ - LineModel( - stroke="#000000", - x1=x, - x2=x + self.width, - y1=y, - y2=y, - ) - ] - - -class Vline: - def __init__(self, height): - self.width = 0 - self.height = height - - def render(self, x, y): - return [ - LineModel( - stroke="#000000", - x1=x, - x2=x, - y1=y, - y2=y + self.height, - ) - ] - - -def dummy_wrap(model_class): - class Wrapper: - def __init__(self, *args, **kwargs): - self.thing = model_class(*args, **kwargs) - - def render(self, x, y): - self.thing.x = x - self.thing.y = y - return [self.thing] - - @property - def width(self): - return self.thing.width - - @property - def height(self): - return self.thing.height - - return Wrapper - - -class HorizontalLayout: - def __init__(self, *arg_children, children=None, padding=PADDING): - self.children = list(arg_children) - if children is not None: - self.children.extend(children) - self.padding = padding - - def render(self, x, y, align=Align.TOP): - if align is not Align.TOP: - height = self.height - res = [] - for child in self.children: - if align is Align.TOP: - y_ = y - elif align is Align.CENTER: - y_ = y + (height - child.height) / 2 - elif align is Align.BOTTOM: - y_ = y + (height - child.height) - else: - raise ValueError(f"Invalid align {align} for HorizontalLayout") - res.extend(safe_render(child, x, y_)) - x += child.width + self.padding - return res - - @property - def width(self): - if not self.children: - return 0 - return self.padding * (len(self.children) - 1) + sum( - c.width for c in self.children - ) - - @property - def height(self): - if not self.children: - return 0 - return max(c.height for c in self.children) - - -class VerticalLayout: - def __init__(self, *arg_children, children=None, padding=PADDING): - self.children = list(arg_children) - if children is not None: - self.children.extend(children) - self.padding = padding - - def render(self, x, y): - res = [] - for child in self.children: - res.extend(safe_render(child, x, y)) - y += child.height + self.padding - return res - - @property - def width(self): - if not self.children: - return 0 - return max(c.width for c in self.children) - - @property - def height(self): - if not self.children: - return 0 - return self.padding * (len(self.children) - 1) + sum( - c.height for c in self.children - ) - - -class DisplayAndEditableRow(HorizontalLayout): - def __init__( - self, - device_id, - schema_hash, - key_path, - label_width=7, - display_width=5, - edit_width=5, - height=None, - size_scale=BASE_INC, - ): - super().__init__(padding=0) - if height is None: - height = size_scale - key_attr = schema_hash.getAttributes(key_path) - label_text = ( - key_attr["displayedName"] - if "displayedName" in key_attr - else key_path.split(".")[-1] - ) - if "valueType" not in key_attr: - print(f"Key {key_path} on {device_id} had no valueType") - return - value_type = key_attr["valueType"] - - self.children.append( - LabelModel( - text=label_text, - width=label_width * size_scale, - height=height, - ) - ) - - if self.include_display(key_attr): - if value_type in _type_to_display_model: - model = _type_to_display_model[value_type] - else: - model = DisplayLabelModel - print(f"Scene generator would like to know more about {value_type}") - self.children.append( - model( - keys=[f"{device_id}.{key_path}"], - width=display_width * size_scale, - height=height, - ) - ) - - if self.include_editable(key_attr): - if "options" in key_attr: - self.children.append( - ComboBoxModel( - keys=[f"{device_id}.{key_path}"], - width=edit_width * size_scale, - height=height, - klass="EditableComboBox", - ) - ) - elif value_type in _type_to_line_editable: - line_editable_class, extra_args = _type_to_line_editable[value_type] - self.children.append( - line_editable_class( - keys=[f"{device_id}.{key_path}"], - width=edit_width * size_scale, - height=height, - **extra_args, - ) - ) - else: - self.children.append( - LabelModel( - text=f"Not implemented: editing {value_type} ({key_path})", - width=edit_width * size_scale, - height=height, - ) - ) - - def include_display(self, key_attr): - return True - - def include_editable(self, key_attr): - return True - - -class DisplayAndMaybeEditableRow(DisplayAndEditableRow): - def include_editable(self, key_attr): - return key_attr["accessMode"] == karabo.native.AccessMode.RECONFIGURABLE.value - - -class MaybeDisplayMaybeEditableRow(DisplayAndEditableRow): - # overriding init to unify display_width and edit_width - def __init__( - self, - device_id, - schema_hash, - key_path, - label_width=7, - display_or_edit_width=5, - height=None, - size_scale=BASE_INC, - ): - super().__init__( - device_id, - schema_hash, - key_path, - label_width=label_width, - display_width=display_or_edit_width, - edit_width=display_or_edit_width, - height=height, - size_scale=size_scale, - ) - - def include_display(self, key_attr): - return not self.include_editable(key_attr) - - def include_editable(self, key_attr): - return key_attr["accessMode"] == karabo.native.AccessMode.RECONFIGURABLE.value - - -class EditableRow(DisplayAndEditableRow): - # overriding init to get label_width, edit_width without label_width - def __init__( - self, - device_id, - schema_hash, - key_path, - label_width=7, - edit_width=5, - height=None, - size_scale=BASE_INC, - ): - super().__init__( - device_id, - schema_hash, - key_path, - label_width=label_width, - edit_width=edit_width, - height=height, - size_scale=size_scale, - ) - - def include_display(self, key_attr): - return False - - def include_editable(self, key_attr): - return True - - -class DisplayRow(DisplayAndEditableRow): - def include_display(self, key_attr): - return True - - def include_editable(self, key_attr): - return False - - -# section: specific handcrafted components for device classes @titled("Found constants", width=6 * NARROW_INC) @@ -799,294 +401,6 @@ class AssemblerDeviceStatus(VerticalLayout): ) -@titled("Manual geometry settings") -@boxed -class ManualQuadrantGeometrySettings(VerticalLayout): - def __init__(self, device_id): - super().__init__(padding=0) - self.children.append( - HorizontalLayout( - Space(width=3 * BASE_INC, height=BASE_INC), - LabelModel(text="x", width=4 * BASE_INC, height=BASE_INC), - LabelModel(text="y", width=4 * BASE_INC, height=BASE_INC), - ) - ) - self.children.extend( - [ - HorizontalLayout( - LabelModel(text=f"{thing}", width=3 * BASE_INC, height=BASE_INC), - DoubleLineEditModel( - keys=[f"{device_id}.quadrantCorners.{thing}.x"], - width=4 * BASE_INC, - height=BASE_INC, - ), - DoubleLineEditModel( - keys=[f"{device_id}.quadrantCorners.{thing}.y"], - width=4 * BASE_INC, - height=BASE_INC, - ), - ) - for thing in ("Q1", "Q2", "Q3", "Q4", "offset") - ] - ) - self.children.append( - DisplayCommandModel( - keys=[f"{device_id}.quadrantCorners.setManual"], - width=6 * BASE_INC, - height=BASE_INC, - ), - ) - - -@titled("Manual geometry settings") -@boxed -class ManualOriginGeometrySettings(VerticalLayout): - def __init__(self, device_id): - super().__init__(padding=0) - self.children.append( - HorizontalLayout( - DoubleLineEditModel( - keys=[f"{device_id}.origin.x"], - width=4 * BASE_INC, - height=BASE_INC, - ), - DoubleLineEditModel( - keys=[f"{device_id}.origin.y"], - width=4 * BASE_INC, - height=BASE_INC, - ), - ) - ) - self.children.append( - DisplayCommandModel( - keys=[f"{device_id}.origin.setManual"], - width=6 * BASE_INC, - height=BASE_INC, - ), - ) - - -@titled("Manual geometry settings") -@boxed -class RelativeGeometrySettings(VerticalLayout): - def __init__(self, device_id): - super().__init__(padding=0) - self.children.extend( - [ - HorizontalLayout( - LabelModel(text="Gap", width=4 * BASE_INC, height=BASE_INC), - DoubleLineEditModel( - keys=[f"{device_id}.manualSetting.gap"], - width=4 * BASE_INC, - height=BASE_INC, - ), - ), - HorizontalLayout( - LabelModel( - text="Module offsets", width=6 * BASE_INC, height=BASE_INC - ), - LabelModel(text="x", width=4 * BASE_INC, height=BASE_INC), - LabelModel(text="y", width=4 * BASE_INC, height=BASE_INC), - LabelModel(text="z", width=4 * BASE_INC, height=BASE_INC), - ), - HorizontalLayout( - LabelModel(text="Top", width=6 * BASE_INC, height=BASE_INC), - DoubleLineEditModel( - keys=[f"{device_id}.manualSetting.topOffset.x"], - width=4 * BASE_INC, - height=BASE_INC, - ), - DoubleLineEditModel( - keys=[f"{device_id}.manualSetting.topOffset.y"], - width=4 * BASE_INC, - height=BASE_INC, - ), - DoubleLineEditModel( - keys=[f"{device_id}.manualSetting.topOffset.z"], - width=4 * BASE_INC, - height=BASE_INC, - ), - ), - HorizontalLayout( - LabelModel(text="Bottom", width=6 * BASE_INC, height=BASE_INC), - DoubleLineEditModel( - keys=[f"{device_id}.manualSetting.bottomOffset.x"], - width=4 * BASE_INC, - height=BASE_INC, - ), - DoubleLineEditModel( - keys=[f"{device_id}.manualSetting.bottomOffset.y"], - width=4 * BASE_INC, - height=BASE_INC, - ), - DoubleLineEditModel( - keys=[f"{device_id}.manualSetting.bottomOffset.z"], - width=4 * BASE_INC, - height=BASE_INC, - ), - ), - DisplayCommandModel( - keys=[f"{device_id}.manualSetting.setManual"], - width=6 * BASE_INC, - height=BASE_INC, - ), - ] - ) - - -@titled("Manual geometry settings") -@boxed -class ManualModulesGeometrySettings(VerticalLayout): - def __init__(self, device_id): - super().__init__(padding=0) - self.children.append( - TableElementModel( - keys=[f"{device_id}.moduleList.modules"], - klass="EditableTableElement", - width=14 * BASE_INC, - height=10 * BASE_INC, - ) - ) - self.children.append( - HorizontalLayout( - LabelModel(text="Offset", width=3 * BASE_INC, height=BASE_INC), - DoubleLineEditModel( - keys=[f"{device_id}.moduleList.offset.x"], - width=4 * BASE_INC, - height=BASE_INC, - ), - DoubleLineEditModel( - keys=[f"{device_id}.moduleList.offset.y"], - width=4 * BASE_INC, - height=BASE_INC, - ), - ) - ) - self.children.append( - DisplayCommandModel( - keys=[f"{device_id}.moduleList.setManual"], - width=6 * BASE_INC, - height=BASE_INC, - ), - ) - - -@titled("Tweak current geometry") -@boxed -class TweakCurrentGeometry(VerticalLayout): - def __init__(self, device_id): - super().__init__(padding=0) - self.children.append( - HorizontalLayout( - LabelModel(text="Offset", width=3 * BASE_INC, height=BASE_INC), - DoubleLineEditModel( - keys=[f"{device_id}.tweakGeometry.offset.x"], - width=4 * BASE_INC, - height=BASE_INC, - ), - DoubleLineEditModel( - keys=[f"{device_id}.tweakGeometry.offset.y"], - width=4 * BASE_INC, - height=BASE_INC, - ), - ), - ) - self.children.append( - DisplayCommandModel( - keys=[f"{device_id}.tweakGeometry.add"], - width=6 * BASE_INC, - height=BASE_INC, - ), - ) - self.children.append( - HorizontalLayout( - DisplayLabelModel( - keys=[f"{device_id}.tweakGeometry.undoLength"], - width=2 * BASE_INC, - height=BASE_INC, - font_size=9, - ), - DisplayCommandModel( - keys=[f"{device_id}.tweakGeometry.undo"], - width=4 * BASE_INC, - height=BASE_INC, - ), - DisplayLabelModel( - keys=[f"{device_id}.tweakGeometry.redoLength"], - width=2 * BASE_INC, - height=BASE_INC, - font_size=9, - ), - DisplayCommandModel( - keys=[f"{device_id}.tweakGeometry.redo"], - width=4 * BASE_INC, - height=BASE_INC, - ), - ), - ) - - -@titled("Geometry preview") -@boxed -class GeometryPreview(VerticalLayout): - def __init__(self, device_id): - super().__init__(padding=0) - self.children.append( - DisplayCommandModel( - keys=[f"{device_id}.updatePreview"], - width=6 * BASE_INC, - height=BASE_INC, - ), - ) - self.children.append( - WebCamGraphModel( - keys=[f"{device_id}.geometryPreview"], - width=30 * BASE_INC, - height=30 * BASE_INC, - x=PADDING, - y=PADDING, - ) - ) - - -@titled("Geometry from file") -@boxed -class GeometryFromFileSettings(VerticalLayout): - def __init__(self, device_id, schema_hash): - super().__init__(padding=0) - self.children.extend( - [ - EditableRow(device_id, schema_hash, "geometryFile.filePath", 4, 8), - EditableRow(device_id, schema_hash, "geometryFile.fileType", 4, 8), - ] - ) - self.children.append( - HorizontalLayout( - LabelModel(text="Offset", width=3 * BASE_INC, height=BASE_INC), - DoubleLineEditModel( - keys=[f"{device_id}.geometryFile.offset.x"], - width=4 * BASE_INC, - height=BASE_INC, - ), - DoubleLineEditModel( - keys=[f"{device_id}.geometryFile.offset.y"], - width=4 * BASE_INC, - height=BASE_INC, - ), - ) - ) - self.children.append( - EditableRow(device_id, schema_hash, "geometryFile.updateManualOnLoad", 6, 2) - ) - self.children.append( - DisplayCommandModel( - keys=[f"{device_id}.geometryFile.loadFromFile"], - width=6 * BASE_INC, - height=BASE_INC, - ), - ) - - @titled("Stats") @boxed class StatsBox(HorizontalLayout): @@ -1292,33 +606,6 @@ class PreviewDisplayArea(VerticalLayout): ) -# section: generating actual scenes - - -def schema_to_hash(schema): - if isinstance(schema, (karathon.Hash, karabo.native.Hash)): - return schema - elif isinstance(schema, karathon.Schema): - return schema.getParameterHash() - else: - return schema.hash - - -def scene_generator(fun): - # TODO: pretty decorator - def aux(*args, **kwargs): - content = fun(*args, **kwargs) - - scene = SceneModel( - children=content.render(PADDING, PADDING), - width=content.width + 2 * PADDING, - height=content.height + 2 * PADDING, - ) - return write_scene(scene) - - return aux - - @scene_generator def correction_device_overview(device_id, schema): schema_hash = schema_to_hash(schema) @@ -1511,9 +798,7 @@ def roitool_overview(device_id, schema): schema_hash = schema_to_hash(schema) return VerticalLayout( HorizontalLayout( - RoiSelection( - f"{device_id}.output.schema.fullImage", [f"{device_id}.roi"] - ), + RoiSelection(f"{device_id}.output.schema.fullImage", [f"{device_id}.roi"]), VerticalLayout( RoiBox(device_id), StatsBox(device_id, schema_hash), @@ -1791,57 +1076,6 @@ def gotthard2_assembler_overview(device_id, schema): ) -@scene_generator -def quadrant_geometry_overview(device_id, schema): - schema_hash = schema_to_hash(schema) - return VerticalLayout( - HorizontalLayout( - ManualQuadrantGeometrySettings(device_id), - GeometryFromFileSettings(device_id, schema_hash), - TweakCurrentGeometry(device_id), - ), - GeometryPreview(device_id), - ) - - -@scene_generator -def origin_geometry_overview(device_id, schema): - # TODO: handle loading - return VerticalLayout( - HorizontalLayout( - ManualOriginGeometrySettings(device_id), - # GeometryFromFileSettings(device_id, schema_hash), - # TweakCurrentGeometry(device_id), - ), - GeometryPreview(device_id), - ) - - -@scene_generator -def relative_geometry_overview(device_id, schema): - return VerticalLayout( - HorizontalLayout( - RelativeGeometrySettings(device_id), - # GeometryFromFileSettings(device_id, schema_hash), - # TweakCurrentGeometry(device_id), - ), - GeometryPreview(device_id), - ) - - -@scene_generator -def modules_geometry_overview(device_id, schema): - schema_hash = schema_to_hash(schema) - return VerticalLayout( - HorizontalLayout( - ManualModulesGeometrySettings(device_id), - GeometryFromFileSettings(device_id, schema_hash), - TweakCurrentGeometry(device_id), - ), - GeometryPreview(device_id), - ) - - @scene_generator def condition_checker_overview(device_id, schema): schema_hash = schema_to_hash(schema) @@ -1883,96 +1117,3 @@ def condition_checker_overview(device_id, schema): height=20 * BASE_INC, ), ) - - -# section: here be monsters - - -def recursive_editable( - device_id, - schema_hash, - prefix, - depth=1, - max_depth=3, - title=None, - row_class=MaybeDisplayMaybeEditableRow, -): - schema_hash = schema_to_hash(schema_hash) - # note: not just using sets because that loses ordering - node_keys = [] - value_keys = [] - slot_keys = [] - attr = schema_hash.getAttributes(prefix) - - if title is None: - if "displayedName" in attr: - title = attr.get("displayedName") - else: - title = prefix.split(".")[-1] - - for key in schema_hash.get(prefix).getKeys(): - attrs = schema_hash.getAttributes(f"{prefix}.{key}") - if attrs.get("nodeType") == karabo.native.NodeType.Node.value: - if "classId" in attrs and attrs.get("classId") == "Slot": - slot_keys.append(key) - else: - node_keys.append(key) - else: - value_keys.append(key) - res = titled(title)(boxed(VerticalLayout))( - children=[ - row_class(device_id, schema_hash, f"{prefix}.{key}") for key in value_keys - ] - + [ - DisplayCommandModel( - keys=[f"{device_id}.{prefix}.{key}"], - width=10 * BASE_INC, - height=BASE_INC, - ) - for key in slot_keys - ], - padding=0, - ) - if depth < max_depth: - res.children.append( - VerticalLayout( - children=[ - recursive_editable( - device_id, - schema_hash, - f"{prefix}.{key}", - depth=depth + 1, - max_depth=max_depth, - row_class=row_class, - ) - for key in node_keys - ] - ) - ) - else: - res.children.extend( - [ - VerticalLayout( - DeviceSceneLinkModel( - text=key, - keys=[f"{device_id}.availableScenes"], - target=f"browse_schema:{prefix}.{key}", - target_window=SceneTargetWindow.Dialog, - width=5 * BASE_INC, - height=BASE_INC, - ), - ) - for key in node_keys - ] - ) - return res - - -@scene_generator -def recursive_subschema_scene( - device_id, - device_schema, - prefix="managedKeys", -): - mds_hash = schema_to_hash(device_schema) - return recursive_editable(device_id, mds_hash, prefix) diff --git a/src/calng/shmem_utils.py b/src/calng/shmem_utils.py deleted file mode 100644 index 989171d07f80e208862bd444b48dd8901be0e521..0000000000000000000000000000000000000000 --- a/src/calng/shmem_utils.py +++ /dev/null @@ -1,188 +0,0 @@ -import multiprocessing.shared_memory - -import numpy as np - -from . import utils - - -def parse_shmem_handle(handle_string): - buffer_name, dtype, shape, index = handle_string.split("$") - dtype = getattr(np, dtype) - shape = tuple(int(n) for n in shape.split(",")) - index = int(index) - return buffer_name, dtype, shape, index - - -def open_shmem_from_handle(handle_string): - """Conveniently open readonly SharedMemory with ndarray view from a handle.""" - shm_name, dtype, shape, _ = parse_shmem_handle(handle_string) - shm_mem = multiprocessing.shared_memory.SharedMemory( - name=buffer_name, create=False - ) - array = np.ndarray( - shape=shape, - dtype=dtype, - buffer=shm_mem.buf, - ) - - return buffer_mem, array - - -class ShmemCircularBufferReceiver: - """The receiving end of ShmemCircularBuffer. Will receive shmem handles and open - the corresponding buffers automatically when needed in `get`. For convenience, - includes `dereference_shmem_handles` for hashes.""" - def __init__(self): - self._name_to_mem = {} - self._name_to_ary = {} - - def __del__(self): - for mem in self._name_to_mem.values(): - mem.close() - - def get(self, handle_string): - shm_name, dtype, shape, index = parse_shmem_handle(handle_string) - if shm_name not in self._name_to_mem: - mem = multiprocessing.shared_memory.SharedMemory( - name=shm_name, create=False - ) - self._name_to_mem[shm_name] = mem - ary = np.ndarray( - shape=shape, - dtype=dtype, - buffer=mem.buf, - ) - self._name_to_ary[shm_name] = ary - return ary[index] - - ary = self._name_to_ary[shm_name] - if ary.shape != shape or ary.dtype != dtype: - del ary - mem = self._name_to_mem[shm_name] - ary = np.ndarray( - shape=shape, - dtype=dtype, - buffer=mem.buf, - ) - self._name_to_ary[shm_name] = ary - - return ary[index] - - def dereference_shmem_handles(self, data_hash): - if data_hash.has("calngShmemPaths"): - shmem_paths = list(data_hash["calngShmemPaths"]) - data_hash.erase("calngShmemPaths") - for shmem_path in shmem_paths: - if not data_hash.has(shmem_path): - # TODO: proper warnings - print(f"Warning: hash did not contain {shmem_path}") - continue - dereferenced = self.get(data_hash[shmem_path]) - data_hash[shmem_path] = dereferenced - - -class ShmemCircularBuffer: - """Convenience wrapper around shmem-backed ndarray buffers - - The underlying memory will be opened as an ndarray with shape (buffer_size, ) + - array_shape where buffer_size is memory_budget // dtype * array size. Each call - to next_slot will return the next entry along the first dimension of this array - (both a handle for IPC usage and the ndarray view). - """ - - def __init__(self, memory_budget, array_shape, dtype, shmem_name): - # for portable use: name has leading slash and no other slashes - self.shmem_name = "/" + shmem_name.lstrip("/").replace("/", "_") - self._cuda_pinned = False - self._shared_memory = None - try: - self._shared_memory = multiprocessing.shared_memory.SharedMemory( - name=self.shmem_name, - size=memory_budget, - create=True, - ) - except FileExistsError: - # maybe device was restarted uncleanly and there's a lingering shmem "file" - self._shared_memory = multiprocessing.shared_memory.SharedMemory( - name=self.shmem_name, - create=False, - ) - # but may need to recreate if existing one is not suitable - if self._shared_memory.size != memory_budget: - self._shared_memory.close() - self._shared_memory.unlink() - # if it fails again, we're in real trouble, so not catching this - self._shared_memory = multiprocessing.shared_memory.SharedMemory( - name=self.shmem_name, - size=memory_budget, - create=True, - ) - self._buffer_ary = None - self._update_shape(array_shape, dtype) - self._next_slot_index = 0 - - def _update_shape(self, array_shape, dtype): - array_shape = tuple(array_shape) - self._array_bytes = np.dtype(dtype).itemsize * np.product(array_shape) - num_slots = self._shared_memory.size // self._array_bytes - if num_slots == 0: - raise ValueError("Array size exceeds size of allocated memory block") - full_shape = (num_slots,) + array_shape - - if self._buffer_ary is not None: - del self._buffer_ary - self._buffer_ary = np.ndarray( - shape=full_shape, - dtype=dtype, - buffer=self._shared_memory.buf, - ) - shape_str = ",".join(str(n) for n in full_shape) - self.shmem_handle_template = ( - f"{self.shmem_name}${np.dtype(dtype)}${shape_str}${{index}}" - ) - - def change_shape(self, array_shape, dtype=None): - """Set new array shape to buffer. Note that the existing SharedMemory object is - still used. Old data in there will be mangled and number of slots will depend - upon new array shape and original memory budget. - """ - old_array_bytes = self._array_bytes - if dtype is None: - dtype = self._buffer_ary.dtype - self._update_shape(array_shape, dtype) - # continue from "next" (least recently touched) slot aligned to new array size - self._next_slot_index = ( - utils.ceil_div(old_array_bytes * self._next_slot_index, self._array_bytes) - ) % self.num_slots - - def cuda_pin(self): - import cupy - - self._memory_pointer = self._buffer_ary.ctypes.get_data() - cupy.cuda.runtime.hostRegister( - self._memory_pointer, self._shared_memory.size, 0 - ) - - def __del__(self): - if self._shared_memory is None: - return - - if self._cuda_pinned: - import cupy - - cupy.cuda.runtime.hostUnregister(self._memory_pointer) - del self._buffer_ary - self._shared_memory.close() - self._shared_memory.unlink() - del self._shared_memory - - @property - def num_slots(self): - return self._buffer_ary.shape[0] - - def next_slot(self): - current_index = self._next_slot_index - self._next_slot_index = (self._next_slot_index + 1) % self.num_slots - shmem_handle = self.shmem_handle_template.format(index=current_index) - data = self._buffer_ary[current_index] - return shmem_handle, data diff --git a/src/calng/stacking_utils.py b/src/calng/stacking_utils.py index b41c4de52f0f4f44d0146f44459c0ad0903e8536..4d6e7008afa8a68ae44deabfef3d651cf2f7f910 100644 --- a/src/calng/stacking_utils.py +++ b/src/calng/stacking_utils.py @@ -3,6 +3,7 @@ import concurrent.futures import enum import re +from calngUtils import misc as utils from karabo.bound import ( BOOL_ELEMENT, INT32_ELEMENT, @@ -14,8 +15,6 @@ from karabo.bound import ( ) import numpy as np -from . import utils - class GroupType(enum.Enum): MULTISOURCE = "sources" # same key stacked from multiple sources in new source diff --git a/src/calng/utils.py b/src/calng/utils.py index 1fa1b1a058e4a2b7f2bcbd504c44efbf26459dcf..fd7c6c284b4409e33c5ea9816eac8cfc17d6744c 100644 --- a/src/calng/utils.py +++ b/src/calng/utils.py @@ -2,13 +2,10 @@ import contextlib import collections import enum import functools -import inspect import itertools -import threading -import time -from timeit import default_timer import numpy as np +from calngUtils import misc class WarningContextSystem: @@ -150,90 +147,6 @@ def pick_frame_index(selection_mode, index, cell_table, pulse_table): return (frame_index, cell_id, pulse_id), warning -def threadsafe_cache(fun): - """This decorator imitates functools.cache, but threadsafer - - With multiple threads hitting a function cached by functools.cache, it is possible - to trigger recomputation. This decorator adds granular locking: each key in the - cache (derived from arguments) has its own lock. - """ - - locks = {} - results = {} - fun_sig = inspect.signature(fun) - - @functools.wraps(fun) - def aux(*args, **kwargs): - bound_args = fun_sig.bind(*args, **kwargs) - bound_args.apply_defaults() - key = bound_args.args + tuple(bound_args.kwargs.items()) - if key in results: - return results[key] - with locks.setdefault(key, threading.Lock()): - if key in results: - # someone else did this - may still be processing - return results[key] - else: - res = fun(*args, **kwargs) - results[key] = res - return res - - return aux - - -@functools.lru_cache() -def transpose_order(axes_in, axes_out): - """Computes the order of axes_out relative to axes_in for transposition purposes - - Both axes_in and axes_out are assumed to be strings in which each letter represents - an axis (duck typing accepts: any iterable of hashable elements). They should - probably be of the same length and have no repetitions, but this is not enforced. - Off-label use voids warranty. - """ - axis_order = {axis: index for index, axis in enumerate(axes_in)} - return tuple(axis_order[axis] for axis in axes_out) - - -def stacking_buffer_shape(array_shape, stack_num, axis=0): - """Figures out the shape you would need for np.stack. Think of the axis in terms of - array after adding additional axis, i.e. the number of axes is len(aray_shape)+1.""" - if axis > len(array_shape) or axis < -len(array_shape) - 1: - # complain when np.stack would - raise np.AxisError( - f"axis {axis} is out of bounds " - f"for array of dimension {len(array_shape) + 1}" - ) - if axis < 0: - axis += len(array_shape) + 1 - return array_shape[:axis] + (stack_num,) + array_shape[axis:] - - -def interleaving_buffer_shape(array_shape, stack_num, axis): - """Figures out the shape you would need to interleave stack_num arrays on axis. The - shape is the same as one would get from np.concatenate.""" - if axis > len(array_shape) - 1 or axis < -len(array_shape): - raise np.AxisError( - f"axis {axis} is out of bounds for array shape {array_shape}" - ) - if axis < 0: - axis += len(array_shape) + 1 - return ( - array_shape[:axis] + (array_shape[axis] * stack_num,) + array_shape[axis + 1 :] - ) - - -def set_on_axis(array, vals, index, axis): - """set_on_axis(A, x, 1, 2) corresponds to A[:, :, 1] = x""" - if axis >= array.ndim: - raise IndexError( - f"too many indices for array: array is {len(array.shape)}-dimensional, " - f"but {axis+1} were indexed" - ) - # TODO: maybe support negative axis with wraparound - indices = np.index_exp[:] * axis + np.index_exp[index] - array[indices] = vals - - _np_typechar_to_c_typestring = { "?": "bool", "B": "unsigned char", @@ -269,139 +182,6 @@ def enum_to_c_template(enum_class): return "\n".join(res) -def ceil_div(num, denom): - return (num + denom - 1) // denom - - -class RepeatingTimer: - """A timer which will call callback every interval seconds""" - - def __init__( - self, - interval, - callback, - start_now=True, - daemon=True, - ): - self.stopped = True - self.interval = interval - self.callback = callback - self.daemonize = daemon - if start_now: - self.start() - - def start(self): - self.stopped = False - self.wakeup_time = default_timer() + self.interval - - def runner(): - while not self.stopped: - now = default_timer() - while now < self.wakeup_time: - diff = self.wakeup_time - now - time.sleep(diff) - if self.stopped: - return - now = default_timer() - self.callback() - self.wakeup_time = default_timer() + self.interval - - self.thread = threading.Thread(target=runner, daemon=self.daemonize) - self.thread.start() - - def stop(self): - self.stopped = True - - -class ExponentialMovingAverage: - def __init__(self, alpha, use_first_value=True): - self.alpha = alpha - self.initialised = not use_first_value - self.mean = 0 - - def update(self, value): - if self.initialised: - self.mean += self.alpha * (value - self.mean) - else: - self.mean = value - self.initialised = True - - def get(self): - return self.mean - - -class WindowRateTracker: - def __init__(self, buffer_size=20, time_window=10): - self.time_window = time_window - self.buffer_size = buffer_size - self.deque = collections.deque(maxlen=self.buffer_size) - - def update(self): - self.deque.append(default_timer()) - - def get(self): - now = default_timer() - cutoff = now - self.time_window - try: - while self.deque[0] < cutoff: - self.deque.popleft() - except IndexError: - return 0 - if len(self.deque) < 2: - return 0 - if len(self.deque) < self.buffer_size: - # TODO: estimator avoiding ramp-up of when starting anew - return len(self.deque) / self.time_window - else: - # if going faster than buffer size per time window, look at timestamps - oldest, newest = self.deque[0], self.deque[-1] - buffer_span = newest - oldest - period = buffer_span / (self.buffer_size - 1) - if (now - newest) < period: - # no new estimate yet, expecting new event after period - return 1 / period - else: - return self.buffer_size / (now - oldest) - - -class Stopwatch: - """Context manager measuring time spent in context. - - Keyword arguments: - name: if not None, will appear in string representation - also, if not None, will automatically print self when done - """ - - def __init__(self, name=None): - self.stop_time = None - self.name = name - - def __enter__(self): - self.start_time = default_timer() - return self - - def __exit__(self, t, v, tb): # type, value and traceback irrelevant - self.stop_time = default_timer() - if self.name is not None: - print(repr(self)) - - @property - def elapsed(self): - if self.stop_time is not None: - return self.stop_time - self.start_time - else: - return default_timer() - self.start_time - - def __str__(self): - return self.__repr__() - - def __repr__(self): - if self.name is None: - return f"{self.elapsed:.3f} s" - else: - return f"{self.name}: {self.elapsed:.3f} s" - - class StateContext: """What if device state was a stack?""" @@ -421,121 +201,34 @@ class StateContext: self.device.updateState(self.revert_to) -class NonMonotonicTrainIdWarning(Warning): - pass - - -class TrainRatioTracker: - """Measure how many percent of recent train IDs (from contiguous set) were seen - - The tracker will maintain a queue of buffer_size train IDs going back at most - buffer_size from latest train ID (depending on calls to get). Call update(train_id) - when you see a new train and call get to get() the ratio of recent trains seen. - - Updating will raise NonMonotonicTrainIdWarning or LargeTrainIdGapWarning in case - train ID looks like it's from far in the future or from some time in the past. - Device using this tracker should decide what to do; maybe call reset. - """ - - def __init__(self, buffer_size=50): - self._train_id_queue = collections.deque(maxlen=buffer_size) - - def get(self, current_train=None, expected_delay=None): - """Get the ratio of recent trains based on buffer contents - - If current_train is provided, the train ID span used to compute the ratio is - from oldest tid in buffer to current_train. Otherwise, latest update tid is - used as stand-in for current_train. - - If expected_delay is provided in addition to current_train, will take max of - latest update tid and current train minus expected delay. - """ - try: - if current_train is None: - current_train = self._train_id_queue[-1] - elif expected_delay is not None: - current_train = max( - current_train - expected_delay, self._train_id_queue[-1] - ) - oldest_train = self._train_id_queue[0] - except IndexError: - return 0 - - return len(self._train_id_queue) * 100 / (current_train - oldest_train + 1) - - def reset(self): - self._train_id_queue.clear() - - def update(self, train_id): - # allows same train ID multiple times - if self._train_id_queue and (last_seen := self._train_id_queue[-1]) > train_id: - raise NonMonotonicTrainIdWarning( - "New train ID not greater than last train ID seen! " - f"New: {train_id}, previous: {last_seen}" - ) - self._train_id_queue.append(train_id) - - -class ChainHash: - """Like read-only ChainMap, but for karabo.bound.Hash(es) instead!""" - - def __init__(self, *hashes): - self._hashes = hashes - - def __getitem__(self, key): - for h in self._hashes: - if h.has(key): - return h[key] - raise KeyError() - - def get(self, key): - return self[key] - - -class SkippingThrottler: - def __init__(self, min_period): - self.min_period = min_period - self.latest_ts = float("-inf") - self.lock = threading.Lock() - - def test_and_set(self): - with self.lock: - now = default_timer() - if (now - self.latest_ts) >= self.min_period: - self.latest_ts = now - return True - else: - return False - - class BadPixelValues(enum.IntFlag): """The European XFEL Bad Pixel Encoding Straight from pycalibration's enum.py""" - OFFSET_OUT_OF_THRESHOLD = 2 ** 0 - NOISE_OUT_OF_THRESHOLD = 2 ** 1 - OFFSET_NOISE_EVAL_ERROR = 2 ** 2 - NO_DARK_DATA = 2 ** 3 - CI_GAIN_OUT_OF_THRESHOLD = 2 ** 4 - CI_LINEAR_DEVIATION = 2 ** 5 - CI_EVAL_ERROR = 2 ** 6 - FF_GAIN_EVAL_ERROR = 2 ** 7 - FF_GAIN_DEVIATION = 2 ** 8 - FF_NO_ENTRIES = 2 ** 9 - CI2_EVAL_ERROR = 2 ** 10 - VALUE_IS_NAN = 2 ** 11 - VALUE_OUT_OF_RANGE = 2 ** 12 - GAIN_THRESHOLDING_ERROR = 2 ** 13 - DATA_STD_IS_ZERO = 2 ** 14 - ASIC_STD_BELOW_NOISE = 2 ** 15 - INTERPOLATED = 2 ** 16 - NOISY_ADC = 2 ** 17 - OVERSCAN = 2 ** 18 - NON_SENSITIVE = 2 ** 19 - NON_LIN_RESPONSE_REGION = 2 ** 20 - WRONG_GAIN_VALUE = 2 ** 21 - NON_STANDARD_SIZE = 2 ** 22 + OFFSET_OUT_OF_THRESHOLD = 2**0 + NOISE_OUT_OF_THRESHOLD = 2**1 + OFFSET_NOISE_EVAL_ERROR = 2**2 + NO_DARK_DATA = 2**3 + CI_GAIN_OUT_OF_THRESHOLD = 2**4 + CI_LINEAR_DEVIATION = 2**5 + CI_EVAL_ERROR = 2**6 + FF_GAIN_EVAL_ERROR = 2**7 + FF_GAIN_DEVIATION = 2**8 + FF_NO_ENTRIES = 2**9 + CI2_EVAL_ERROR = 2**10 + VALUE_IS_NAN = 2**11 + VALUE_OUT_OF_RANGE = 2**12 + GAIN_THRESHOLDING_ERROR = 2**13 + DATA_STD_IS_ZERO = 2**14 + ASIC_STD_BELOW_NOISE = 2**15 + INTERPOLATED = 2**16 + NOISY_ADC = 2**17 + OVERSCAN = 2**18 + NON_SENSITIVE = 2**19 + NON_LIN_RESPONSE_REGION = 2**20 + WRONG_GAIN_VALUE = 2**21 + NON_STANDARD_SIZE = 2**22 def downsample_2d(arr, factor, reduction_fun=np.nanmax): @@ -589,30 +282,10 @@ def cell_table_to_string(cell_table): def grid_to_cover_shape_with_blocks(full_shape, block_shape): - return tuple(itertools.starmap(ceil_div, zip(full_shape, block_shape))) + return tuple(itertools.starmap(misc.ceil_div, zip(full_shape, block_shape))) def apply_partial_lut(data, lut, mask, out, missing=np.nan): tmp = out.ravel() tmp[~mask] = data.ravel()[lut] tmp[mask] = missing - - -def add_unsafe_get(device_class): - # forward-compatible unsafe_get proposed by @haufs - if not hasattr(device_class, "unsafe_get"): - - def unsafe_get(self, key): - """Look up key in device schema quickly, but without consistency locks - - This is only relevant for use in hot path (input handler). Circumvents the - locking done by PythonDevice.get. Note that PythonDevice.get does handle - some special types (by looking at full schema for type information). In - particular, device state enum: `self.get("state")` will return a State - whereas `self.unsafe_get("state")` will return a string. Handle with care! - """ - - # at least until Karabo 2.14, self._parameters is maintained by PythonDevice - return self._parameters.get(key) - - setattr(device_class, "unsafe_get", unsafe_get) diff --git a/src/tests/problem.py b/src/tests/problem.py deleted file mode 100644 index a9590844bdc35ec0f69a56eff5856af995fab51d..0000000000000000000000000000000000000000 --- a/src/tests/problem.py +++ /dev/null @@ -1,22 +0,0 @@ -from calng import utils - - -calls = 0 - - -@utils.threadsafe_cache -def will_raise_once(argument): - global calls - calls += 1 - if calls == 1: - raise Exception("That's just what I do") - return argument + 1 - - -try: - will_raise_once(0) -except Exception as ex: - print("As expected, firs call raised:", ex) - -print("Now calling again:") -print(will_raise_once(0)) diff --git a/src/tests/test_shmem_utils.py b/src/tests/test_shmem_utils.py deleted file mode 100644 index eab92629c1808316fddc6a41e4179208cad4a4f6..0000000000000000000000000000000000000000 --- a/src/tests/test_shmem_utils.py +++ /dev/null @@ -1,137 +0,0 @@ -import multiprocessing -import pathlib -import time - -import numpy as np -import pytest -from karabo.bound import Hash - -from calng import shmem_utils - - -def test_change_shape(): - shm_fn = "test_shmem_buffer" - my_buffer = shmem_utils.ShmemCircularBuffer( - 1024 * 4, - (2, 3), - np.uint32, - shm_fn, - ) - handles = [] - for i in range(3): - handle, ary = my_buffer.next_slot() - ary.fill(i) - handles.append(handle) - my_buffer.change_shape((5, 7)) - for i in range(3, 5): - handle, ary = my_buffer.next_slot() - ary.fill(i) - handles.append(handle) - receiver = shmem_utils.ShmemCircularBufferReceiver() - # old handles don't get immediately mangled after change_shape - for i, handle in enumerate(handles): - ary = receiver.get(handle) - assert np.all(ary == i) - - del my_buffer - assert not (pathlib.Path("/dev/shm") / shm_fn).exists() - - -def test_multiprocessing(): - # note: test doesn't use hashes because they can't easily be pickled :( - # note: will ignore resource_tracker warning for now; might be CPython3.8 bug - # (we check that "file" is at least gone) - shm_fn = "test_multiproc_shmem_buffer" - num_messages = 10 - - def sender(handle_q, sent_q, barrier): - # handle_q: queue replacing channel communication - # sent_q: queue with original data for comparison - shm_buffer = shmem_utils.ShmemCircularBuffer( - memory_budget=1_000_000, - array_shape=(1, 10, 20), - dtype=np.float64, - shmem_name=shm_fn, - ) - - for i in range(num_messages): - # switches "number of frames" for every train - some_array = np.random.random(size=(i + 1, 10, 20)) - # could consider adding a convenience "put" function to buffer - shm_buffer.change_shape( - array_shape=some_array.shape, dtype=some_array.dtype - ) - handle, shm_array = shm_buffer.next_slot() - shm_array[:] = some_array - handle_q.put(handle) - sent_q.put(some_array) - handle_q.close() - sent_q.close() - # wait before dying so shmem buffer is not removed - barrier.wait(timeout=5) - - def receiver(handle_q, received_q, barrier): - shm_recv = shmem_utils.ShmemCircularBufferReceiver() - for i in range(num_messages): - handle = handle_q.get(timeout=5) - data = shm_recv.get(handle) - received_q.put(data) - received_q.close() - barrier.wait(timeout=5) - - barrier = multiprocessing.Barrier(3) - sent_q = multiprocessing.Queue() - received_q = multiprocessing.Queue() - handle_q = multiprocessing.Queue() - send_proc = multiprocessing.Process(target=sender, args=(handle_q, sent_q, barrier)) - recv_proc = multiprocessing.Process( - target=receiver, args=(handle_q, received_q, barrier) - ) - send_proc.start() - recv_proc.start() - for i in range(num_messages): - expected = sent_q.get(timeout=5) - got = received_q.get(timeout=5) - assert np.array_equal(expected, got) - assert (pathlib.Path("/dev/shm") / shm_fn).exists() - barrier.wait(timeout=5) - send_proc.join() - recv_proc.join() - - assert not (pathlib.Path("/dev/shm") / shm_fn).exists() - - -def test_lingering_file(): - # will kill a process ungracefully to mess up cleanup - shm_fn = "test_lingering_shmem_buffer" - - def tragic_function(barrier): - shm_buffer = shmem_utils.ShmemCircularBuffer( - memory_budget=1_000_000, - array_shape=(1, 10, 20), - dtype=np.float64, - shmem_name=shm_fn, - ) - barrier.wait(timeout=5) - # will not survive this sleep - time.sleep(10) - - barrier = multiprocessing.Barrier(2) - poor_proc = multiprocessing.Process(target=tragic_function, args=(barrier,)) - poor_proc.start() - barrier.wait(timeout=5) - assert (pathlib.Path("/dev/shm") / shm_fn).exists() - poor_proc.kill() - assert (pathlib.Path("/dev/shm") / shm_fn).exists() - # so now there's a lingering file; that's problem - # but we can just make a new one - shm_buffer = shmem_utils.ShmemCircularBuffer( - memory_budget=2_000_000, - array_shape=(1, 10, 20), - dtype=np.float64, - shmem_name=shm_fn, - ) - assert shm_buffer._shared_memory.size == 2_000_000 - - -# note: would be nice to prevent multiple writers on same memory... diff --git a/src/tests/test_utils.py b/src/tests/test_utils.py deleted file mode 100644 index d53a31eadc4818994516267f5b83f9d55be7f522..0000000000000000000000000000000000000000 --- a/src/tests/test_utils.py +++ /dev/null @@ -1,150 +0,0 @@ -import random -import threading -import time -import timeit - -import numpy as np -import pytest -from calng import utils - - -def test_stacking_buffer_shape(): - original_shape = (1, 2, 3) - assert utils.stacking_buffer_shape(original_shape, 4, 0) == (4, 1, 2, 3) - assert utils.stacking_buffer_shape(original_shape, 4, 2) == (1, 2, 4, 3) - assert utils.stacking_buffer_shape(original_shape, 4, 3) == (1, 2, 3, 4) - assert utils.stacking_buffer_shape(original_shape, 4, -1) == (1, 2, 3, 4) - assert utils.stacking_buffer_shape(original_shape, 4, -4) == (4, 1, 2, 3) - with pytest.raises(np.AxisError): - utils.stacking_buffer_shape(original_shape, 4, 4) - with pytest.raises(np.AxisError): - utils.stacking_buffer_shape(original_shape, 4, -5) - - -def test_set_on_axis(): - A = np.array([[1, 1, 1, 1, 1], [2, 2, 2, 2, 2], [3, 3, 3, 3, 3]]) - - manual = A.copy() - manual[0] = 0 - fun = A.copy() - utils.set_on_axis(fun, 0, 0, 0) - assert np.array_equal(manual, fun) - - manual = A.copy() - manual[1] = np.arange(5) - fun = A.copy() - utils.set_on_axis(fun, np.arange(5), 1, 0) - assert np.array_equal(manual, fun) - - manual = A.copy() - manual[:, 1] = 0 - fun = A.copy() - utils.set_on_axis(fun, 0, 1, 1) - assert np.array_equal(manual, fun) - - with pytest.raises(IndexError): - utils.set_on_axis(fun, ..., ..., 3) - - # case triggering obvious bug I had made - A = np.array([[[1, 2, 3], [4, 5, 6]]]) - manual = A.copy() - manual[:, 1] = 0 - fun = A.copy() - utils.set_on_axis(fun, 0, 1, 1) - assert np.array_equal(manual, fun) - - -def test_get_c_type(): - assert utils.np_dtype_to_c_type(np.float16) == "half" - assert utils.np_dtype_to_c_type(np.float32) == "float" - assert utils.np_dtype_to_c_type(np.float64) == "double" - - assert utils.np_dtype_to_c_type(np.uint8) == "unsigned char" - assert utils.np_dtype_to_c_type(np.uint16) == "unsigned short" - assert utils.np_dtype_to_c_type(np.uint32) in ("unsigned", "unsigned int") - assert utils.np_dtype_to_c_type(np.uint64) == "unsigned long" - - assert utils.np_dtype_to_c_type(np.int8) == "char" - assert utils.np_dtype_to_c_type(np.int16) == "short" - assert utils.np_dtype_to_c_type(np.int32) == "int" - assert utils.np_dtype_to_c_type(np.int64) == "long" - - -class TestThreadsafeCache: - def test_arg_key_wrap(self): - calls = [] - - @utils.threadsafe_cache - def fun(a, b, c=1, d=2, *args, **kwargs): - calls.append((a, b, c, d, args, kwargs)) - - # reordering kwargs /does/ matter because dicts are ordered now - # (note: functools.lru_cache doesn't sort, claims because of speed) - fun(1, 2, 3, 4, 5, six=6, seven=7) - fun(1, 2, 3, 4, 5, seven=7, six=6) - assert len(calls) == 2, "kwargs order matters" - calls.clear() - - # reordering kw-style positional args does not matter - fun(1, 2, 1, 2) - fun(a=1, c=1, b=2, d=2) - assert len(calls) == 1, "reordering regular args as kws doesn't matter" - # and omitting default values does not matter - fun(b=2, a=1) - fun(1, 2) - assert len(calls) == 1, "omitting default args doesn't matter" - - def test_threadsafeness(self): - # wow, synchronization (presumably) makes this take forever *without* decorator - from_was_called = [] - - base_sleep = 1 - random_sleep = 0.1 - - @utils.threadsafe_cache - def was_called(x): - time.sleep(random.random() * random_sleep + base_sleep) - from_was_called.append(x) - - threads = [] - num_threads = 1000 - letters = "abcd" - start_ts = timeit.default_timer() - for i in range(num_threads): - for letter in letters: - thread = threading.Thread(target=was_called, args=(letter,)) - thread.start() - threads.append(thread) - submitted_ts = timeit.default_timer() - print(f"Right after: {len(from_was_called)}") - for thread in threads: - thread.join() - stop_ts = timeit.default_timer() - total_time = stop_ts - start_ts - print(f"After join: {len(from_was_called)}") - print(f"Time to submit: {submitted_ts - start_ts}") - print(f"Wait for join: {stop_ts - submitted_ts}") - print(f"Total: {total_time}") - - # check that function was only called with each letter once - # this is where the decorator from functools will fail - assert len(from_was_called) == len( - letters - ), "Caching prevents recomputation due to threading" - - # check that the function was not locked too broadly - # (should run faster than sequential lower bound) - reasonable_time_to_spawn_thread = 0.45 / 1000 - cutoff = ( - len(letters) * base_sleep + reasonable_time_to_spawn_thread * num_threads - ) - print(f"Cutoff (sequential lower bound): {cutoff}") - assert ( - total_time < cutoff - ), "Locking should not be so broad as to make sequential" - print( - f"Each thread would have slept [{base_sleep}, {base_sleep + random_sleep})" - ) - - # check that time doesn't go backwards suddenly - assert total_time >= base_sleep, "These tests should measure time correctly" diff --git a/src/tests/test_agipd_kernels.py b/tests/test_agipd_kernels.py similarity index 100% rename from src/tests/test_agipd_kernels.py rename to tests/test_agipd_kernels.py diff --git a/src/tests/test_calcat_utils.py b/tests/test_calcat_utils.py similarity index 97% rename from src/tests/test_calcat_utils.py rename to tests/test_calcat_utils.py index c78fb107fde13f425acd0f044c94b3ac9f8807ac..e81bcfc7b2bdd3475d246080b72433c70671d802 100644 --- a/src/tests/test_calcat_utils.py +++ b/tests/test_calcat_utils.py @@ -1,7 +1,7 @@ import pathlib from calng.corrections import AgipdCorrection, DsscCorrection -from calng.utils import Stopwatch +from calngUtils.timing import Stopwatch from karabo.bound import Hash, Schema import pytest @@ -56,7 +56,7 @@ class DummyDsscDevice(DummyBaseDevice): @staticmethod def expectedParameters(expected): - DsscCorrection.DsscCalcatFriend.add_schema(expected, DummyDsscDevice) + DsscCorrection.DsscCalcatFriend.add_schema(expected) def __init__(self, config): # TODO: check config against schema (as Karabo would) diff --git a/src/tests/test_detectorassembler.py b/tests/test_detectorassembler.py similarity index 100% rename from src/tests/test_detectorassembler.py rename to tests/test_detectorassembler.py diff --git a/src/tests/test_dssc_kernels.py b/tests/test_dssc_kernels.py similarity index 100% rename from src/tests/test_dssc_kernels.py rename to tests/test_dssc_kernels.py diff --git a/src/tests/test_pnccd_kernels.py b/tests/test_pnccd_kernels.py similarity index 100% rename from src/tests/test_pnccd_kernels.py rename to tests/test_pnccd_kernels.py diff --git a/src/tests/test_stacking.py b/tests/test_stacking.py similarity index 100% rename from src/tests/test_stacking.py rename to tests/test_stacking.py diff --git a/src/tests/test_strixel.py b/tests/test_strixel.py similarity index 100% rename from src/tests/test_strixel.py rename to tests/test_strixel.py diff --git a/tests/test_utils.py b/tests/test_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..8864e87cfddd43886d5b0fa21fa0d2e8b06a4f90 --- /dev/null +++ b/tests/test_utils.py @@ -0,0 +1,18 @@ +import numpy as np +from calng import utils + + +def test_get_c_type(): + assert utils.np_dtype_to_c_type(np.float16) == "half" + assert utils.np_dtype_to_c_type(np.float32) == "float" + assert utils.np_dtype_to_c_type(np.float64) == "double" + + assert utils.np_dtype_to_c_type(np.uint8) == "unsigned char" + assert utils.np_dtype_to_c_type(np.uint16) == "unsigned short" + assert utils.np_dtype_to_c_type(np.uint32) in ("unsigned", "unsigned int") + assert utils.np_dtype_to_c_type(np.uint64) == "unsigned long" + + assert utils.np_dtype_to_c_type(np.int8) == "char" + assert utils.np_dtype_to_c_type(np.int16) == "short" + assert utils.np_dtype_to_c_type(np.int32) == "int" + assert utils.np_dtype_to_c_type(np.int64) == "long"