diff --git a/src/tests/test_agipd_kernels.py b/src/tests/test_agipd_kernels.py
index dd1fef68eb5d656d6bcedad283fb62437e5bf8d1..4a5aa10db181738274ebbd2c21eb90e44c2b39c1 100644
--- a/src/tests/test_agipd_kernels.py
+++ b/src/tests/test_agipd_kernels.py
@@ -37,7 +37,6 @@ kernel_runner = agipd_gpu.AgipdGpuRunner(
     constant_memory_cells=memory_cells,
     input_data_dtype=input_dtype,
     output_data_dtype=output_dtype,
-    output_gain_map=True,
 )
 
 
diff --git a/src/tests/test_calcat_utils.py b/src/tests/test_calcat_utils.py
index 69641d2cc380e60a9d2a62ede006ec728ca20dd2..81a866a55c3abdc42c9083086f3dcca3739bb218 100644
--- a/src/tests/test_calcat_utils.py
+++ b/src/tests/test_calcat_utils.py
@@ -2,6 +2,7 @@ import pathlib
 import timeit
 
 from calng import calcat_utils
+from calng.utils import Stopwatch
 from karabo.bound import Hash, Schema
 
 # TODO: secrets management
@@ -12,33 +13,29 @@ _test_calcat_secrets_fn = _test_dir / "calibration-client-secrets.json"
 class DummyLogger:
     DEBUG = print
     INFO = print
+    WARN = print
 
 
-class Stopwatch:
-    def __enter__(self):
-        self.start_ts = timeit.default_timer()
-        self.running = True
-        return self
+class DummyBaseDevice:
+    log = DummyLogger()
 
-    def __exit__(self, t, v, tb):
-        self.stop_ts = timeit.default_timer()
-        self.running = False
+    def log_status_info(self, msg):
+        self.log.INFO(msg)
+
+    def log_status_warn(self, msg):
+        self.log.WARN(msg)
+
+    def get(self, key):
+        return self.schema.get(key)
 
-    @property
-    def elapsed(self):
-        if self.running:
-            return timeit.default_timer() - self.start_ts
-        else:
-            return self.stop_ts - self.start_ts
+    def set(self, key, value):
+        print(f'Would set "{key}" = {value}')
 
 
 # TODO: consider testing by attaching to real karabo.bound.PythonDevice
-class DummyAgipdDevice:
+class DummyAgipdDevice(DummyBaseDevice):
     device_class_schema = Schema()
-    managed_keys = []
-
-    def log_status_info(self, msg):
-        print(msg)
+    managed_keys = set()
 
     @staticmethod
     def expectedParameters(expected):
@@ -47,7 +44,6 @@ class DummyAgipdDevice:
         )
 
     def __init__(self, config):
-        self.log = DummyLogger()
         self.schema = config
         self.calibration_constant_manager = calcat_utils.AgipdCalcatFriend(
             self,
@@ -55,23 +51,15 @@ class DummyAgipdDevice:
         )
         print(self.managed_keys)
 
-    def get(self, key):
-        return self.schema.get(key)
-
-    def set(self, key, value):
-        print(f'Would set "{key}" = {value}')
-
 
 DummyAgipdDevice.expectedParameters(DummyAgipdDevice.device_class_schema)
 print(DummyAgipdDevice.device_class_schema)
 print(DummyAgipdDevice.managed_keys)
 
-class DummyDsscDevice:
-    device_class_schema = Schema()
-    managed_keys = []
 
-    def log_status_info(self, msg):
-        print(msg)
+class DummyDsscDevice(DummyBaseDevice):
+    device_class_schema = Schema()
+    managed_keys = set()
 
     @staticmethod
     def expectedParameters(expected):
@@ -79,7 +67,6 @@ class DummyDsscDevice:
         calcat_utils.DsscCalcatFriend.add_schema(expected, DummyDsscDevice.managed_keys)
 
     def __init__(self, config):
-        self.log = DummyLogger()
         # TODO: check config against schema (as Karabo would)
         self.schema = config
         self.calibration_constant_manager = calcat_utils.DsscCalcatFriend(
@@ -87,12 +74,6 @@ class DummyDsscDevice:
             _test_calcat_secrets_fn,
         )
 
-    def get(self, key):
-        return self.schema.get(key)
-
-    def set(self, key, value):
-        print(f'Would set "{key}" = {value}')
-
 
 DummyDsscDevice.expectedParameters(DummyDsscDevice.device_class_schema)
 
diff --git a/src/tests/test_dssc_kernels.py b/src/tests/test_dssc_kernels.py
index b8e90ceea635f8ad198e6ac8136a2457a21f7248..ecd7c0f0c00c54be0b066eb9b24be520bac2aa7e 100644
--- a/src/tests/test_dssc_kernels.py
+++ b/src/tests/test_dssc_kernels.py
@@ -74,20 +74,22 @@ def test_correct_oob_cells():
 
 def test_reshape():
     kernel_runner.processed_data_gpu.set(corrected_data)
-    assert np.allclose(kernel_runner.reshape(output_order="xyc"), corrected_data.transpose())
+    assert np.allclose(
+        kernel_runner.reshape(output_order="xyc"), corrected_data.transpose()
+    )
 
 
 def test_preview_slice():
     kernel_runner.load_data(raw_data)
     kernel_runner.processed_data_gpu.set(corrected_data)
-    preview_raw, preview_corrected = kernel_runner.compute_preview(42)
+    preview_raw, preview_corrected = kernel_runner.compute_previews(42)
     assert np.allclose(
         preview_raw,
-        raw_data[42].astype(np.float32).transpose(),
+        raw_data[42].astype(np.float32),
     )
     assert np.allclose(
         preview_corrected,
-        corrected_data[42].astype(np.float32).transpose(),
+        corrected_data[42].astype(np.float32),
     )
 
 
@@ -95,53 +97,45 @@ def test_preview_max():
     # note: in case correction failed, still test this separately
     kernel_runner.load_data(raw_data)
     kernel_runner.processed_data_gpu.set(corrected_data)
-    preview_raw, preview_corrected = kernel_runner.compute_preview(-1)
+    preview_raw, preview_corrected = kernel_runner.compute_previews(-1)
+    assert np.allclose(preview_raw, np.max(raw_data, axis=0).astype(np.float32))
     assert np.allclose(
-        preview_raw, np.max(raw_data, axis=0).astype(np.float32).transpose()
-    )
-    assert np.allclose(
-        preview_corrected, np.max(corrected_data, axis=0).astype(np.float32).transpose()
+        preview_corrected, np.max(corrected_data, axis=0).astype(np.float32)
     )
 
 
 def test_preview_mean():
     kernel_runner.load_data(raw_data)
     kernel_runner.processed_data_gpu.set(corrected_data)
-    preview_raw, preview_corrected = kernel_runner.compute_preview(-2)
-    assert np.allclose(
-        preview_raw, np.mean(raw_data, axis=0, dtype=np.float32).transpose()
-    )
+    preview_raw, preview_corrected = kernel_runner.compute_previews(-2)
+    assert np.allclose(preview_raw, np.nanmean(raw_data, axis=0, dtype=np.float32))
     assert np.allclose(
-        preview_corrected, np.mean(corrected_data, axis=0, dtype=np.float32).transpose()
+        preview_corrected, np.nanmean(corrected_data, axis=0, dtype=np.float32)
     )
 
 
 def test_preview_sum():
     kernel_runner.load_data(raw_data)
     kernel_runner.processed_data_gpu.set(corrected_data)
-    preview_raw, preview_corrected = kernel_runner.compute_preview(-3)
-    assert np.allclose(
-        preview_raw, np.sum(raw_data, axis=0, dtype=np.float32).transpose()
-    )
+    preview_raw, preview_corrected = kernel_runner.compute_previews(-3)
+    assert np.allclose(preview_raw, np.nansum(raw_data, axis=0, dtype=np.float32))
     assert np.allclose(
-        preview_corrected, np.sum(corrected_data, axis=0, dtype=np.float32).transpose()
+        preview_corrected, np.nansum(corrected_data, axis=0, dtype=np.float32)
     )
 
 
 def test_preview_std():
     kernel_runner.load_data(raw_data)
     kernel_runner.processed_data_gpu.set(corrected_data)
-    preview_raw, preview_corrected = kernel_runner.compute_preview(-4)
-    assert np.allclose(
-        preview_raw, np.std(raw_data, axis=0, dtype=np.float32).transpose()
-    )
+    preview_raw, preview_corrected = kernel_runner.compute_previews(-4)
+    assert np.allclose(preview_raw, np.nanstd(raw_data, axis=0, dtype=np.float32))
     assert np.allclose(
-        preview_corrected, np.std(corrected_data, axis=0, dtype=np.float32).transpose()
+        preview_corrected, np.nanstd(corrected_data, axis=0, dtype=np.float32)
     )
 
 
 def test_preview_valid_index():
     with pytest.raises(ValueError):
-        kernel_runner.compute_preview(-5)
+        kernel_runner.compute_previews(-5)
     with pytest.raises(ValueError):
-        kernel_runner.compute_preview(memory_cells)
+        kernel_runner.compute_previews(memory_cells)