diff --git a/src/Correction/dssc_correction.py b/src/Correction/dssc_correction.py index 27565f7e094cedcd8405ce656a9c7bdd6b84979f..df2f16ad865cd7cd0fdc4923ea97a1eed4c63081 100644 --- a/src/Correction/dssc_correction.py +++ b/src/Correction/dssc_correction.py @@ -64,251 +64,307 @@ class DsscCorrection(calibrationBase.CalibrationReceiverBaseDevice): "Offset", "Dark", expected, optional=True, mandatoryForIteration=True ) - SLOT_ELEMENT(expected).key("askConnectedReadersToSendMySources").displayedName( - "Request sources from connected RunToPipe" - ).description( - "Only relevant for development environment. When running without a " - "CAL_MANAGER, we need to tell RunToPipe instances which sources " - "to send us." - ).commit() - - BOOL_ELEMENT(expected).key("doAnything").displayedName( - "Enable input processing" - ).description( - "Toggle handling of input (at all). If False, the input handler " - "of this device will be skipped. Useful to decrease logspam if " - "device is misconfigured." - ).assignmentOptional().defaultValue( - True - ).reconfigurable().commit() - - BOOL_ELEMENT(expected).key("applyCorrection").displayedName( - "Enable correction(s)" - ).description( - "Toggle whether or not correction(s) are applied to image data. " - "If false, this device still reshapes data to output shape, " - "applies the pulse filter, and casts to output dtype. Useful for " - "inspecting the raw data in the same format as corrected data." - ).assignmentOptional().defaultValue( - True - ).reconfigurable().commit() - - INPUT_CHANNEL(expected).key("dataInput").commit() - # note: output schema not set, will be updated to match data later - OUTPUT_CHANNEL(expected).key("dataOutput").commit() - - VECTOR_STRING_ELEMENT(expected).key("fastSources").displayedName( - "Fast data sources" - ).description( - "Sources to fast data as provided in channel metadata. " - "Provide in the form source@path.in.hash to identify both source " - "and path in the source data hash.\n" - "Currently ignores the path.in.hash part (for hardcoded image.data)" - ).assignmentMandatory().commit() - - STRING_ELEMENT(expected).key("pulseFilter").displayedName( - "Pulse filter" - ).description( - "Filter pulses: will be evaluated as array of indices to keep from data. " - "Can be anything which can be turned into numpy uint16 array. " - "Numpy is available as np. " - "Take care not to include duplicates. " - "If empty, will not filter at all." - ).assignmentOptional().defaultValue( - "" - ).reconfigurable().commit() - - NODE_ELEMENT(expected).key("dataFormat").displayedName( - "Data format (in/out)" - ).commit() - STRING_ELEMENT(expected).key("dataFormat.inputImageDtype").displayedName( - "Input image data dtype" - ).description("The (numpy) dtype to expect for incoming image data.").options( - "uint16,float32" - ).assignmentOptional().defaultValue( - "uint16" - ).commit() - STRING_ELEMENT(expected).key("dataFormat.outputImageDtype").displayedName( - "Output image data dtype" - ).description( - "The (numpy) dtype to use for outgoing image data. " - "Input is cast to float32, corrections are applied, and only then " - "will the result be cast back to outputImageDtype (all on GPU)." - ).options( - "float16,float32,uint16" - ).assignmentOptional().defaultValue( - "float32" - ).commit() - # important: shape of data as going into correction - UINT32_ELEMENT(expected).key("dataFormat.pixelsX").displayedName( - "Pixels x" - ).description( - "Number of pixels of image data along X axis" - ).assignmentMandatory().commit() - UINT32_ELEMENT(expected).key("dataFormat.pixelsY").displayedName( - "Pixels y" - ).description( - "Number of pixels of image data along Y axis" - ).assignmentMandatory().commit() - UINT32_ELEMENT(expected).key("dataFormat.memoryCells").displayedName( - "Memory cells" - ).description( - "Full number of memory cells in incoming data" - ).assignmentMandatory().commit() - UINT32_ELEMENT(expected).key("dataFormat.memoryCellsCorrection").displayedName( - "(Debug) Memory cells in correction map" - ).description( - "Full number of memory cells in currently loaded correction map. " - "May exceed memory cell number in input if veto is on. " - "This value just displayed for debugging." - ).readOnly().initialValue( - 0 - ).commit() - VECTOR_UINT32_ELEMENT(expected).key("dataFormat.inputDataShape").displayedName( - "Input data shape" - ).description( - "Image data shape in incoming data (from reader / DAQ). " - "Value computed from pixelsX, pixelsY, and memoryCells - " - "this slot is just showing you what is currently expected." - ).readOnly().initialValue( - [] - ).commit() - VECTOR_UINT32_ELEMENT(expected).key("dataFormat.outputDataShape").displayedName( - "Output data shape" - ).description( - "Image data shape for data output from this device. " - "Value computed from pixelsX, pixelsY, and the size of the pulse filter - " - "this slot is just showing what is currently expected." - ).readOnly().initialValue( - [] - ).commit() - - UINT32_ELEMENT(expected).key("outputShmemBufferLength").displayedName( - "Output buffer length" - ).description( - "Corrected trains are written to shared memory locations. These are " - "pre-allocated and re-used. This parameter determines how big " - "(number of trains) the circular buffer will be." - ).assignmentOptional().defaultValue( - 50 - ).commit() - - # preview schema (WIP) - NODE_ELEMENT(expected).key("preview").displayedName("Preview").commit() + ( + SLOT_ELEMENT(expected) + .key("askConnectedReadersToSendMySources") + .displayedName("Request sources from connected RunToPipe") + .description( + "Only relevant for development environment. When running " + "without a CAL_MANAGER, we need to tell RunToPipe instances " + "which sources to send us." + ) + .commit(), + BOOL_ELEMENT(expected) + .key("doAnything") + .displayedName("Enable input processing") + .description( + "Toggle handling of input (at all). If False, the input " + "handler of this device will be skipped. Useful to decrease " + "logspam if device is misconfigured." + ) + .assignmentOptional() + .defaultValue(True) + .reconfigurable() + .commit(), + BOOL_ELEMENT(expected) + .key("applyCorrection") + .displayedName("Enable correction(s)") + .description( + "Toggle whether or not correction(s) are applied to image data. " + "If false, this device still reshapes data to output shape, " + "applies the pulse filter, and casts to output dtype. Useful for " + "inspecting the raw data in the same format as corrected data." + ) + .assignmentOptional() + .defaultValue(True) + .reconfigurable() + .commit(), + INPUT_CHANNEL(expected).key("dataInput").commit(), + # note: output schema not set, will be updated to match data later + OUTPUT_CHANNEL(expected).key("dataOutput").commit(), + VECTOR_STRING_ELEMENT(expected) + .key("fastSources") + .displayedName("Fast data sources") + .description( + "Sources to fast data as provided in channel metadata. " + "Provide in the form source@path.in.hash to identify both source " + "and path in the source data hash.\n" + "Currently ignores the path.in.hash part (for hardcoded image.data)" + ) + .assignmentMandatory() + .commit(), + STRING_ELEMENT(expected) + .key("pulseFilter") + .displayedName("Pulse filter") + .description( + "Filter pulses: will be evaluated as array of indices to keep from data. " + "Can be anything which can be turned into numpy uint16 array. " + "Numpy is available as np. " + "Take care not to include duplicates. " + "If empty, will not filter at all." + ) + .assignmentOptional() + .defaultValue("") + .reconfigurable() + .commit(), + UINT32_ELEMENT(expected) + .key("outputShmemBufferLength") + .displayedName("Output buffer length") + .description( + "Corrected trains are written to shared memory locations. " + "These are pre-allocated and re-used. This parameter " + "determines how big (number of trains) the circular buffer " + "will be." + ) + .assignmentOptional() + .defaultValue(50) + .commit(), + ) + + ( + NODE_ELEMENT(expected) + .key("dataFormat") + .displayedName("Data format (in/out)") + .commit(), + STRING_ELEMENT(expected) + .key("dataFormat.inputImageDtype") + .displayedName("Input image data dtype") + .description("The (numpy) dtype to expect for incoming image data.") + .options("uint16,float32") + .assignmentOptional() + .defaultValue("uint16") + .commit(), + STRING_ELEMENT(expected) + .key("dataFormat.outputImageDtype") + .displayedName("Output image data dtype") + .description( + "The (numpy) dtype to use for outgoing image data. Input is " + "cast to float32, corrections are applied, and only then will " + "the result be cast back to outputImageDtype (all on GPU)." + ) + .options("float16,float32,uint16") + .assignmentOptional() + .defaultValue("float32") + .commit(), + # important: shape of data as going into correction + UINT32_ELEMENT(expected) + .key("dataFormat.pixelsX") + .displayedName("Pixels x") + .description("Number of pixels of image data along X axis") + .assignmentMandatory() + .commit(), + UINT32_ELEMENT(expected) + .key("dataFormat.pixelsY") + .displayedName("Pixels y") + .description("Number of pixels of image data along Y axis") + .assignmentMandatory() + .commit(), + UINT32_ELEMENT(expected) + .key("dataFormat.memoryCells") + .displayedName("Memory cells") + .description("Full number of memory cells in incoming data") + .assignmentMandatory() + .commit(), + UINT32_ELEMENT(expected) + .key("dataFormat.memoryCellsCorrection") + .displayedName("(Debug) Memory cells in correction map") + .description( + "Full number of memory cells in currently loaded correction map. " + "May exceed memory cell number in input if veto is on. " + "This value just displayed for debugging." + ) + .readOnly() + .initialValue(0) + .commit(), + VECTOR_UINT32_ELEMENT(expected) + .key("dataFormat.inputDataShape") + .displayedName("Input data shape") + .description( + "Image data shape in incoming data (from reader / DAQ). " + "Value computed from pixelsX, pixelsY, and memoryCells - " + "this slot is just showing you what is currently expected." + ) + .readOnly() + .initialValue([]) + .commit(), + VECTOR_UINT32_ELEMENT(expected) + .key("dataFormat.outputDataShape") + .displayedName("Output data shape") + .description( + "Image data shape for data output from this device. " + "Value computed from pixelsX, pixelsY, and the size of the pulse filter - " + "this slot is just showing what is currently expected." + ) + .readOnly() + .initialValue([]) + .commit(), + ) + preview_schema = Schema() - NODE_ELEMENT(preview_schema).key("data").commit() - NDARRAY_ELEMENT(preview_schema).key("data.adc").dtype("FLOAT").commit() - OUTPUT_CHANNEL(expected).key("preview.outputRaw").dataSchema( - preview_schema - ).commit() - OUTPUT_CHANNEL(expected).key("preview.outputCorrected").dataSchema( - preview_schema - ).commit() - BOOL_ELEMENT(expected).key("preview.enable").displayedName( - "Enable preview data generation" - ).assignmentOptional().defaultValue(True).reconfigurable().commit() - INT32_ELEMENT(expected).key("preview.pulse").displayedName( - "Pulse (or stat) for preview" - ).description( - "If this value is ≥ 0, the corresponding index from data will be " - "sliced for the preview. If this value is ≤ 0, preview will be one " - "of the following stats:\n" - "-1: max\n" - "-2: mean\n" - "-3: sum\n" - "-4: stdev\n" - "Max means selecting the pulse with the maximum integrated value. " - "The others are computed across all filtered pulses in the train." - ).assignmentOptional().defaultValue( - 0 - ).reconfigurable().commit() - UINT32_ELEMENT(expected).key("preview.trainIdModulo").displayedName( - "Train modulo for throttling" - ).description( - "Preview will only be sent for trains whose ID modulo this number " - "is zero. Higher values means fewer preview updates. Should be " - "adjusted based on input rate. Keep in mind that the GUI has " - "limited refresh rate anyway and that network is precious." - ).assignmentOptional().defaultValue( - 6 - ).reconfigurable().commit() - - # timer-related settings - NODE_ELEMENT(expected).key("performance").displayedName( - "Performance measures" - ).commit() - FLOAT_ELEMENT(expected).key("performance.rateUpdateInterval").displayedName( - "Rate update interval" - ).description( - "Maximum interval (seconds) between updates of the rate. " - "Mostly relevant if not rateUpdateOnEachInput or if input is slow." - ).assignmentOptional().defaultValue( - 1 - ).reconfigurable().commit() - FLOAT_ELEMENT(expected).key("performance.rateBufferSpan").displayedName( - "Rate measurement buffer span" - ).description( - "Event buffer timespan (in seconds) for measuring rate" - ).assignmentOptional().defaultValue( - 20 - ).reconfigurable().commit() - BOOL_ELEMENT(expected).key("performance.rateUpdateOnEachInput").displayedName( - "Update rate on each input" - ).description( - "Whether or not to update the device rate for each input " - "(otherwise only based on rateUpdateInterval). " - "Note that processed trains are always registered - this just " - "impacts when the rate is computed based on this." - ).assignmentOptional().defaultValue( - False - ).reconfigurable().commit() - - FLOAT_ELEMENT(expected).key("processingStateTimeout").description( - "Timeout after which the device goes from PROCESSING back to ACTIVE " - "if no new input is processed" - ).assignmentOptional().defaultValue(10).reconfigurable().commit() - - # just measurements and counters to display - UINT64_ELEMENT(expected).key("trainId").displayedName("Train ID").description( - "ID of latest train processed by this device." - ).readOnly().initialValue(0).commit() - FLOAT_ELEMENT(expected).key("performance.lastProcessingDuration").displayedName( - "Processing time" - ).description( - "Amount of time spent in processing latest train. " - "Time includes generating preview and sending data." - ).unit( - Unit.SECOND - ).metricPrefix( - MetricPrefix.MILLI - ).readOnly().initialValue( - 0 - ).commit() - FLOAT_ELEMENT(expected).key("performance.rate").displayedName( - "Rate" - ).description( - "Actual rate with which this device gets / processes / sends trains" - ).unit( - Unit.HERTZ - ).readOnly().initialValue( - 0 - ).commit() - FLOAT_ELEMENT(expected).key("performance.theoreticalRate").displayedName( - "Processing rate (hypothetical)" - ).description( - "Rate with which this device could hypothetically process trains. " - "Based on lastProcessingDuration." - ).unit( - Unit.HERTZ - ).readOnly().initialValue( - float("NaN") - ).warnLow( - 10 - ).info( - "Processing not fast enough for full speed" - ).needsAcknowledging( - False - ).commit() + ( + NODE_ELEMENT(expected).key("preview").displayedName("Preview").commit(), + NODE_ELEMENT(preview_schema).key("data").commit(), + NDARRAY_ELEMENT(preview_schema).key("data.adc").dtype("FLOAT").commit(), + OUTPUT_CHANNEL(expected) + .key("preview.outputRaw") + .dataSchema(preview_schema) + .commit(), + OUTPUT_CHANNEL(expected) + .key("preview.outputCorrected") + .dataSchema(preview_schema) + .commit(), + BOOL_ELEMENT(expected) + .key("preview.enable") + .displayedName("Enable preview data generation") + .assignmentOptional() + .defaultValue(True) + .reconfigurable() + .commit(), + INT32_ELEMENT(expected) + .key("preview.pulse") + .displayedName("Pulse (or stat) for preview") + .description( + "If this value is ≥ 0, the corresponding index from data will be " + "sliced for the preview. If this value is ≤ 0, preview will be one " + "of the following stats:\n" + "-1: max\n" + "-2: mean\n" + "-3: sum\n" + "-4: stdev\n" + "Max means selecting the pulse with the maximum integrated value. " + "The others are computed across all filtered pulses in the train." + ) + .assignmentOptional() + .defaultValue(0) + .reconfigurable() + .commit(), + UINT32_ELEMENT(expected) + .key("preview.trainIdModulo") + .displayedName("Train modulo for throttling") + .description( + "Preview will only be sent for trains whose ID modulo this number " + "is zero. Higher values means fewer preview updates. Should be " + "adjusted based on input rate. Keep in mind that the GUI has " + "limited refresh rate anyway and that network is precious." + ) + .assignmentOptional() + .defaultValue(6) + .reconfigurable() + .commit(), + ) + + ( + NODE_ELEMENT(expected) + .key("performance") + .displayedName("Performance measures") + .commit(), + FLOAT_ELEMENT(expected) + .key("performance.rateUpdateInterval") + .displayedName("Rate update interval") + .description( + "Maximum interval (seconds) between updates of the rate. " + "Mostly relevant if not rateUpdateOnEachInput or if input is slow." + ) + .assignmentOptional() + .defaultValue(1) + .reconfigurable() + .commit(), + FLOAT_ELEMENT(expected) + .key("performance.rateBufferSpan") + .displayedName("Rate measurement buffer span") + .description("Event buffer timespan (in seconds) for measuring rate") + .assignmentOptional() + .defaultValue(20) + .reconfigurable() + .commit(), + BOOL_ELEMENT(expected) + .key("performance.rateUpdateOnEachInput") + .displayedName("Update rate on each input") + .description( + "Whether or not to update the device rate for each input " + "(otherwise only based on rateUpdateInterval). " + "Note that processed trains are always registered - this just " + "impacts when the rate is computed based on this." + ) + .assignmentOptional() + .defaultValue(False) + .reconfigurable() + .commit(), + FLOAT_ELEMENT(expected) + .key("processingStateTimeout") + .description( + "Timeout after which the device goes from PROCESSING back to ACTIVE " + "if no new input is processed" + ) + .assignmentOptional() + .defaultValue(10) + .reconfigurable() + .commit(), + # just measurements and counters to display + UINT64_ELEMENT(expected) + .key("trainId") + .displayedName("Train ID") + .description("ID of latest train processed by this device.") + .readOnly() + .initialValue(0) + .commit(), + FLOAT_ELEMENT(expected) + .key("performance.lastProcessingDuration") + .displayedName("Processing time") + .description( + "Amount of time spent in processing latest train. " + "Time includes generating preview and sending data." + ) + .unit(Unit.SECOND) + .metricPrefix(MetricPrefix.MILLI) + .readOnly() + .initialValue(0) + .commit(), + FLOAT_ELEMENT(expected) + .key("performance.rate") + .displayedName("Rate") + .description( + "Actual rate with which this device gets / processes / sends trains" + ) + .unit(Unit.HERTZ) + .readOnly() + .initialValue(0) + .commit(), + FLOAT_ELEMENT(expected) + .key("performance.theoreticalRate") + .displayedName("Processing rate (hypothetical)") + .description( + "Rate with which this device could hypothetically process trains. " + "Based on lastProcessingDuration." + ) + .unit(Unit.HERTZ) + .readOnly() + .initialValue(float("NaN")) + .warnLow(10) + .info("Processing not fast enough for full speed") + .needsAcknowledging(False) + .commit(), + ) # stuff from typical calPy that we don't use right now # Included to avoid errors due to unexpected configuration from init device