diff --git a/src/calng/AgipdCorrection.py b/src/calng/AgipdCorrection.py index 451cd0116c5aad03dafa6eeabc3c87f977443783..c9442d4b0c7bc1ac9e0ac0c330a549eaca17f8e5 100644 --- a/src/calng/AgipdCorrection.py +++ b/src/calng/AgipdCorrection.py @@ -31,6 +31,7 @@ class AgipdCorrection(BaseCorrection): ("badPixels", CorrectionFlags.BPMASK), ) _gpu_runner_class = AgipdGpuRunner + _schema_cache_slots = BaseCorrection._schema_cache_slots | {"sendGainMap"} @staticmethod def expectedParameters(expected): @@ -118,7 +119,14 @@ class AgipdCorrection(BaseCorrection): .assignmentOptional() .defaultValue("ADAPTIVE_GAIN") .options("ADAPTIVE_GAIN,FIXED_HIGH_GAIN,FIXED_MEDIUM_GAIN,FIXED_LOW_GAIN") - .commit() + .commit(), + BOOL_ELEMENT(expected) + .key("sendGainMap") + .displayedName("Send gain map on dataOutput") + .assignmentOptional() + .defaultValue(False) + .reconfigurable() + .commit(), ) # TODO: hook this up to actual correction done # NOTE: wanted to configure this as table, could not make reaonly table with reconfigurable bools in rows diff --git a/src/calng/agipd_gpu.py b/src/calng/agipd_gpu.py index 808a9de30ebf384f6912fb7b04c80011ca62e496..60d0cd1c0e0a0c0fb537922817942083d9a5ecd5 100644 --- a/src/calng/agipd_gpu.py +++ b/src/calng/agipd_gpu.py @@ -249,6 +249,11 @@ class AgipdGpuRunner(base_gpu.BaseGpuRunner): ), ) + def get_gain_map(self, out=None): + return cupy.ascontiguousarray( + cupy.transpose(self.gain_map_gpu, self.output_transpose) + ).get(out=out) + def _init_kernels(self): kernel_source = self._kernel_template.render( { diff --git a/src/calng/agipd_gpu_kernels.cpp b/src/calng/agipd_gpu_kernels.cpp index 2d701d981d467ac4d9486114ca93656f47b45d58..2a9d7dc375765ffa903605cce0c948566d734150 100644 --- a/src/calng/agipd_gpu_kernels.cpp +++ b/src/calng/agipd_gpu_kernels.cpp @@ -124,6 +124,7 @@ extern "C" { if (corr_flags & REL_GAIN_XRAY) { // TODO //corrected *= rel_gain_xray_map[map_index]; + // TODO: G_gain_value } } diff --git a/src/calng/base_correction.py b/src/calng/base_correction.py index 117ebb86a93ed1e13b56638b3461cfbe0bc1ae1f..29451fcae6ec87e92d97d64ec1837a16de50e729 100644 --- a/src/calng/base_correction.py +++ b/src/calng/base_correction.py @@ -510,6 +510,8 @@ class BaseCorrection(calibrationBase.CalibrationReceiverBaseDevice): def _write_combiner_preview(self, data_raw, data_corrected, train_id, source): # TODO: take into account updated pulse table after pulse filter + # TODO: send as ImageData (requires updated assembler) + # TODO: allow sending *all* frames for commissioning (request: Jola) preview_hash = Hash() preview_hash.set("image.trainId", train_id) preview_hash.set("image.pulseId", self._schema_cache["preview.pulse"]) diff --git a/src/calng/base_gpu.py b/src/calng/base_gpu.py index fc716ee3e3d4d87dd4655a8d537493f86c5a4dbb..1ee0a49df4cce5129655adde59fad164f4a6d743 100644 --- a/src/calng/base_gpu.py +++ b/src/calng/base_gpu.py @@ -137,8 +137,8 @@ class BaseGpuRunner: def compute_preview(self, preview_index): """Generate single slice or reduction preview of raw and corrected data - Special values of preview_index are -1 for max (select max integrated intensity - frame), -2 for mean, -3 for sum, and -4 for stdev (across cells). + Special values of preview_index are -1 for max, -2 for mean, -3 for sum, and + -4 for stdev (across cells). Note that preview_index is taken from data without checking cell table. Caller has to figure out which index along memory cell dimension they @@ -163,14 +163,13 @@ class BaseGpuRunner: # TODO: change axis order when moving reshape to after correction image_data[preview_index].astype(np.float32).get(out=output_buffer) elif preview_index == -1: - # TODO: select argmax independently for raw and corrected? - # TODO: send frame sums somewhere to compute global max frame - max_index = cupy.argmax( - cupy.sum(image_data, axis=(1, 2), dtype=cupy.float32) - ) - image_data[max_index].astype(np.float32).get(out=output_buffer) + # TODO: confirm that max is pixel and not integrated intensity + # separate from next case because dtype not applicable here + cupy.max(image_data, axis=0).astype(cupy.float32).get(out=output_buffer) elif preview_index in (-2, -3, -4): - stat_fun = {-2: cupy.mean, -3: cupy.sum, -4: cupy.std}[preview_index] + stat_fun = {-1: cupy.max, -2: cupy.mean, -3: cupy.sum, -4: cupy.std}[ + preview_index + ] stat_fun(image_data, axis=0, dtype=cupy.float32).get(out=output_buffer) return self.preview_raw, self.preview_corrected diff --git a/src/tests/test_dssc_kernels.py b/src/tests/test_dssc_kernels.py index 1ed780620c06583a4935b83e924b9e0ae89e475c..3790c56095b8d0dbf67318490db93e866ea97b9a 100644 --- a/src/tests/test_dssc_kernels.py +++ b/src/tests/test_dssc_kernels.py @@ -92,22 +92,15 @@ def test_preview_slice(): def test_preview_max(): - # can it find max intensity frame? # note: in case correction failed, still test this separately kernel_runner.load_data(raw_data) kernel_runner.processed_data_gpu.set(corrected_data) preview_raw, preview_corrected = kernel_runner.compute_preview(-1) assert np.allclose( - preview_raw, - raw_data[np.argmax(np.sum(raw_data, axis=(1, 2), dtype=np.float32))] - .astype(np.float32) - .transpose(), + preview_raw, np.max(raw_data, axis=0).astype(np.float32).transpose() ) assert np.allclose( - preview_corrected, - corrected_data[np.argmax(np.sum(corrected_data, axis=(1, 2), dtype=np.float32))] - .astype(np.float32) - .transpose(), + preview_corrected, np.max(corrected_data, axis=0).astype(np.float32).transpose() )