diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index c4d3d5ba6246b038763fc27a2199497d8682d109..7ae2b28ae72dca0aff0b83c2059f13f209dde184 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -1,6 +1,7 @@
 stages:
   - check
   - test
+  - automated_test
 
 .before_script: &before_script
   before_script:
@@ -45,11 +46,29 @@ pytest:
     - export LANG=C  # Hopefully detect anything relying on locale
     - python3 -m pip install ".[test]"
     - python3 -m pytest --color yes --verbose --cov=cal_tools --cov=xfel_calibrate
-#  Nope... https://docs.gitlab.com/12.10/ee/user/project/merge_requests/test_coverage_visualization.html#enabling-the-feature
-#    - coverage xml
-#  artifacts:
-#    reports:
-#      cobertura: coverage.xml
+  #  Nope... https://docs.gitlab.com/12.10/ee/user/project/merge_requests/test_coverage_visualization.html#enabling-the-feature
+  #    - coverage xml
+  #  artifacts:
+  #    reports:
+  #      cobertura: coverage.xml
+
+automated_test:
+  variables:
+    OUTPUT: $CI_MERGE_REQUEST_SOURCE_BRANCH_NAME
+    DETECTORS: all
+    CALIBRATION: all
+  stage: automated_test
+  only: [merge_requests]
+  when: manual
+  allow_failure: false
+  <<: *before_script
+  script:
+    - export LANG=C  # Hopefully detect anything relying on locale
+    - python3 -m pip install ".[automated_test]"
+    - echo "Running automated test. This can take sometime to finish depending on the test data."
+    - echo "Given variables are REFERENCE=$REFERENCE, OUTPUT=$OUTPUT, DETECTORS=$DETECTORS, CALIBRATION=$CALIBRATION"
+    - python3 -m pytest ./tests/test_reference_runs --color yes --verbose --release-test --reference-folder /gpfs/exfel/d/cal_tst/reference_folder --out-folder /gpfs/exfel/data/scratch/xcaltst/test/$OUTPUT  --detectors $DETECTORS --calibration $CALIBRATION
+  timeout: 24 hours
 
 cython-editable-install-test:
   stage: test
@@ -57,4 +76,4 @@ cython-editable-install-test:
   <<: *before_script
   script:
     - python3 -m pip install -e ".[test]"
-    - python3 -m pytest --color yes --verbose ./tests/test_cythonalgs.py 
+    - python3 -m pytest --color yes --verbose ./tests/test_cythonalgs.py
diff --git a/README.rst b/README.rst
index e0217e1f0369da794295b60e73bc44c02ba8a68f..98909b690a9239fbf0077e4ecc688522b8811a83 100644
--- a/README.rst
+++ b/README.rst
@@ -215,7 +215,7 @@ CORRECT<dict> {'cmd': 'python -m xfel_calibrate.calibrate {detector} CORRECT '
         '--report-to '
         '/gpfs/exfel/exp/{instrument}/{cycle}/p{proposal}/usr/Reports/{runs}/{det_instance}_{action}_{proposal}_{runs}_{time_stamp} '
         '--cal-db-timeout 300000 --cal-db-interface '
-        'tcp://max-exfl016:8015#8044',
+        'tcp://max-exfl-cal001:8015#8044',
  'in-folder': '/gpfs/exfel/exp/{instrument}/{cycle}/p{proposal}/raw',
  'out-folder': '/gpfs/exfel/d/proc/{instrument}/{cycle}/p{proposal}/{run}',
  'sched-prio': 80}
@@ -225,7 +225,7 @@ DARK<dict> {'cmd': 'python -m xfel_calibrate.calibrate {detector} DARK --concurr
         '{action}_{instrument}_{detector}_{cycle}_p{proposal}_{runs} '
         '--report-to '
         '/gpfs/exfel/d/cal/caldb_store/xfel/reports/{instrument}/{det_instance}/{action}/{action}_{proposal}_{runs}_{time_stamp} '
-        '--cal-db-interface tcp://max-exfl016:8015#8044 --db-output',
+        '--cal-db-interface tcp://max-exfl-cal001:8015#8044 --db-output',
  'in-folder': '/gpfs/exfel/exp/{instrument}/{cycle}/p{proposal}/raw',
  'out-folder': '/gpfs/exfel/u/usr/{instrument}/{cycle}/p{proposal}/dark/runs_{runs}',
  'sched-prio': 10}
@@ -473,7 +473,7 @@ https://git.xfel.eu/gitlab/detectors/pycalibration repository, so now
 
 .. code::
 
-  [xcaltst@max-exfl017 tmp]$ git clone ssh://git@git.xfel.eu:10022/detectors/pycalibration.git
+  [xcaltst@max-exfl-cal002 tmp]$ git clone ssh://git@git.xfel.eu:10022/detectors/pycalibration.git
   Cloning into 'pycalibration'...
   remote: Enumerating objects: 9414, done.
   remote: Counting objects: 100% (9414/9414), done.
diff --git a/docs/css/custom.css b/docs/css/custom.css
index 2a2f37f47f3aa6a3008c2ec8bd53f25e10578671..93d66f9c0d3017e14cfb7983577493aeae5eb142 100644
--- a/docs/css/custom.css
+++ b/docs/css/custom.css
@@ -52,7 +52,7 @@ div.autodoc-docstring {
   --md-primary-bg-color--light:#B2B2B2;
   --md-footer-bg-color:        #000020;
 
-  --md-accent-fg-color:              #152066;
+  --md-accent-fg-color:              #fcda9d;
   --md-accent-fg-color--transparent: #3c3b72;
   --md-accent-bg-color:              #ffffff;
   --md-accent-bg-color--light:       #ffffff;
@@ -83,5 +83,5 @@ div.autodoc-docstring {
   --md-footer-bg-color:                hsla(230, 9%, 13%, 0.87); 
   --md-footer-bg-color--dark:          hsla(232, 15%, 10%, 1);
 
-  --md-typeset-a-color: #e99a25;
+  --md-typeset-a-color: #f39200;
 } 
\ No newline at end of file
diff --git a/docs/development/testing_pipeline.md b/docs/development/testing_pipeline.md
index ef9cfa9e190492b9b6924aed25530a3e405c70af..174d2d7a1db486637dc318e7687f4ee22ab83aee 100644
--- a/docs/development/testing_pipeline.md
+++ b/docs/development/testing_pipeline.md
@@ -15,7 +15,6 @@ affect the produced data quality.
    2. Validate the number of HDF5 files against the number of HDF5 files in the reference folder.
 3. Validate the numerical values of the processed data against the referenced data.
 
-
 These tests are meant to run on all detector calibrations before any release. As well as run it per branch on the selected detector/calibration that is affected by the branch's changes.
 
 ## Current state
@@ -42,7 +41,14 @@ These tests are meant to run on all detector calibrations before any release. As
   ```
 
 - Tests are triggered using CLI:
-  - `pytest tests/test_reference_runs/test_pre_deployment.py --release-test --reference-folder <reference-folder-path> --out-folder <out-folder-path>`
+
+   ```bash
+   pytest tests/test_reference_runs/test_pre_deployment.py \
+   --release-test \
+   --reference-folder <reference-folder-path> \
+   --out-folder <out-folder-path>
+   ```
+
   - Arguments:
     - required arguments:
       - release-test: this is needed to trigger the automated test. To avoid triggering this as a part of the Gitlab CI this boolean was created.
@@ -76,10 +82,14 @@ Automating this test can solve the dependence on only one user to run the tests
 It is preferred to run this automated test in a common place that is accessed by the calibration team to have more operators/monitors. This would result in a more active approach in solving any problem related to the testing pipeline or the tested detector calibrations.
 Moreover exposing the running test would help in collecting more ideas and efforts in improving the testing pipeline.
 
-## Automating triggering the testing pipeline.
+## Automating triggering the testing pipeline
 
 To automate the triggering:
-  - Decide on a max-node to run the tests.
-    - Currently the tests are manually triggered on a selected node and after the calibration execution is done all checks and validations are done on this selected node.
-    - Create a cron job to schedule the automatic triggering for the tests.
-    - Keep a logging format as a reporting tool for the test results.
+
+- Decide on a max-node to run the tests.
+
+  - Currently the tests are manually triggered on a selected node and after the calibration execution is done all checks and validations are done on this selected node.
+
+  - Create a cron job to schedule the automatic triggering for the tests.
+
+  - Keep a logging format as a reporting tool for the test results.
diff --git a/docs/references/changelog.md b/docs/references/changelog.md
index a570bb140f4287f4a1424d8c3b01d7dd65090254..e7cece2bdc49fc0083deb1c908b2f6ff43bb6250 100644
--- a/docs/references/changelog.md
+++ b/docs/references/changelog.md
@@ -1,30 +1,139 @@
 # Release Notes
 
+## 3.11.5
+- Update CalParrot==0.3 and EXtra-data==1.15.1 dependencies
+- [DSSC][Dark][Correct] No longer restrict memory cells to a multiple of 100s and add lower deviation for memory cells parameter conditions.
+- [Webservice] Catch errors on failure to launch dark processing
+- Add script to update dark run status in myMdC
+- [Epix100][Correct] Calcat error when no gain is retrieved
+- [REMI] Disable trailing trigger by default
+
+## 3.11.4
+- [Jungfrau][Correct] Force fixed gain for JF data in burst mode
+
+- [Jungfrau][Correct] Force replacement for gain value in Jungfrau correction
+- [DSSC] Allow 900 memory cells for DSSC darks to workaround appearance of cell 810
+
+- [Jungfrau][Dark] Reflect WRONG_GAIN_VALUE over a pixel in all gain for badpixels map
+- [AGIPD][Dark] Add timings
+- [Jungfrau][Correct] New A1256 JF Strixel
+
+- [webservice] Add JUNGF and PEP 8 on the line
+
+
+## 3.11.3
+- [AGIPD][LPD][DARK] Show table for bad pixels bitmaps
+- [AGIPD][CORRECT] Process all AGIPD trains if the PPU device is missing or if no trigger
+- [AGIPD][DARK] Sort dark runs
+- [AGIPD][DARK] Improvements for reading conditions by creating a new data class for multiple runs
+- [AGIPD][FF] Fixing FF summary performance plots
+
+- [Jungfrau] [Correct] Add thresholding for ROI projections
+- [Jungfrau][Correct][Dark] Fix manual edit for operating conditions
+- [Jungfrau][DARK] Validate and reorder dark runs before processing
+- [EPIX][FF] ePixFF characterization
+
+- [REMI] Add support for virtual trailing trigger
+
+- Fix manually submitting the confirmation
+
+- move some logs to DEBUG and extend the report sleep
+
+- Expose --blc-stripes to update_config.py and fix old parameter names
+
+## 3.11.2
+- Operational release for SPB to support configurable rounding thresholds.
+
+## 3.11.1
+- [AGIPD][CORRECT] Use calcat_interface and remove precorrection notebook
+- [EPIX100] Feat: Compliance with update to receiver device
+- [REMI] Various fixes and improvements for quad DLDs
+- [REMI] Fix missing re-allocation of trigger array with neither FEL nor PPL
+- [Tests] Fix: Accept uppercase calibration type
+- [Test] Find difference by default
+- Clearer error when xfel-calibrate would run no jobs
+- Detect cycle automatically in update_config script
+- Fix link to CalCat
+
+## 3.11.0
+
+- [AGIPD][Correct] Handle selecting multiple trains per PPU trigger
+- [AGIPD][Dark] Fix: Skip corrupted frame from dark processing
+- [LPD1M] Automatically decide whether to inject & use memory cell order
+- [LPD1M][Dark] Use EXtra-data to create darks from >1 sequence file
+- [LPD1M][Correct] Use parameter names instead of IDs to find constants
+- [LPD1M][Correct] Using CALCAT interface
+- [LPD1M][Correct] Fix: Constant type conversion
+- [LPD1M][Correct] Use the fragment file and remove the precorrection notebook
+- [LPD-Mini] Rework cell order condition to match LPD-1M again
+- [JUNGFRAU][pnCCD][ePix100] Feat: new method to display CCV metadata in reports
+- [JUNGFRAU][CORRECT] Add fragment file and remove precorrection notebook
+- [EPIX][DARK] Mark dead pixels as Bad Pixels
+- [EPIX][CORR] Optimize histograms and plots
+- [GH2][Correct] Move false warning and disable gain correction as printed.
+- [GH2][Correct] Remove the precorrection notebook and add fragment
+- [TIMEPIX] Add select parameters to update_config
+- [TIMEPIX] Fix types of notebook arguments
+- [xfel-calibrate] Fix: Break the line properly into latex when the next line starts with `_`
+- [Webservice] Don't mark jobs as finished just because they disappear from `squeue` output
+- [Webservice] Use status AW in myMdC if correction failed for some detectors in a run
+- Add a pytest to run a dict of CALLAB test runs before releases
+- Look up CCVs using parameter_name in place of parameter_id
+- Replace `max-exfl016` and `max-exfl017` into `max-exfl-cal001` and `max-exfl-cal002`, respectively.
+- Make metadata directory name match report filename
+- Add reorder_axes function
+
+## 3.10.3
+
+- [LPD][Correct] Harden against empty sequencee sets with train-on-demand
+- [JF][correct] Add missing gain mode parameter
+- [Timepix3] Add centroiding notebook
+
+## 3.10.2
+
+- [PNCCD][CORRECT] Fix: Skip error for missing gain
+- [PNCCD][CORRECT] Fix: Hack to wrong ctrl bias voltage values p002857
+
+- [LPD][Correct] Fix axis order for LPD-1M RelativeGain constant
+
+- [LPDMini][Dark] Add only number of available data trains into data_samples
+- [LPDMini][Dark] Fix first notebook cell to execute CL through the webservice
+- [LPDMini] Feat: Inject gain constants notebook
+
+- [Jungfrau] Workaround for `SPB_CFEL_JF1M` as the modules start with `09` not `01`
 
+- Fix update_config to work with non-AGIPD and add REMI
 
-## 3.10.0
+## 3.10.1
+
+- [JUNGFRAU][CORRECT] Using calcat interface
+- [JUNGFRAU][CORRECT][DARK] Extend accepted detectors based on substrings of karabo_id
 
-- [[ePix100][Correct] Avoid including histogram calculator with empty array](https://git.xfel.eu/calibration/pycalibration/-/merge_requests/834)
-- [[ePix100][Correct] Remove pre notebook](https://git.xfel.eu/calibration/pycalibration/-/merge_requests/821)
+- [LPD Mini][CORRECT][DARK] Initial work on LPD Mini notebooks
+- [PNCCD][CORRECT] Avoid raising a CalCat error while retrieving metadata for missing gain constant from DB
+
+## 3.10.0
 
-- [[pnCCD][Correct] Using calcat interface](https://git.xfel.eu/calibration/pycalibration/-/merge_requests/776)
-- [[pnCCD][Correct] Record fragment file and remove pre correction notebook](https://git.xfel.eu/calibration/pycalibration/-/merge_requests/828)
-- [[ePix100][pnCCD][Correct] Display creation time for retrieved constants](https://git.xfel.eu/calibration/pycalibration/-/merge_requests/835)
+- [ePix100][Correct] Avoid including histogram calculator with empty array
+- [ePix100][Correct] Remove pre notebook
 
-- [[AGIPD][Correct] error out only if all sources are empty](https://git.xfel.eu/calibration/pycalibration/-/merge_requests/817)
-- [[AGIPD][Correct] exit notebook if no correction files are found for the selected sequence](https://git.xfel.eu/calibration/pycalibration/-/merge_requests/818)
+- [pnCCD][Correct] Using calcat interface
+- [pnCCD][Correct] Record fragment file and remove pre correction notebook
+- [ePix100][pnCCD][Correct] Display creation time for retrieved constants
 
-- [[DSSC][DARK] Group all slow data to the same aggregator](https://git.xfel.eu/calibration/pycalibration/-/merge_requests/819)
+- [AGIPD][Correct] error out only if all sources are empty
+- [AGIPD][Correct] exit notebook if no correction files are found for the selected sequence
 
-- [[JUNGFRAU][Correct] Use DataCollection.from_paths for reading JF CORR files for plots](https://git.xfel.eu/calibration/pycalibration/-/merge_requests/812)
-- [Support for saving metadata fragments & merging into calibration_metadata.yml](https://git.xfel.eu/calibration/pycalibration/-/merge_requests/781)
+- [DSSC][DARK] Group all slow data to the same aggregator
 
-- [[REMI] Save pulse amplitudes during discrimination](https://git.xfel.eu/calibration/pycalibration/-/merge_requests/822)
-- [[REMI] Make plots robust against no edges or no hits in data](https://git.xfel.eu/calibration/pycalibration/-/merge_requests/827)
+- [JUNGFRAU][Correct] Use DataCollection.from_paths for reading JF CORR files for plots
+- Support for saving metadata fragments & merging into calibration_metadata.yml
 
-- [[Webservice] Add script to check run in webservice DB](https://git.xfel.eu/calibration/pycalibration/-/merge_requests/831)
-- [[Webservice] Fix database lock timeouts](https://git.xfel.eu/calibration/pycalibration/-/merge_requests/832)
+- [REMI] Save pulse amplitudes during discrimination
+- [REMI] Make plots robust against no edges or no hits in data
 
+- [Webservice] Add script to check run in webservice DB
+- [Webservice] Fix database lock timeouts
 
 ## 3.9.2
 
@@ -38,25 +147,21 @@
 
 ## 3.9.0
 
-- [[Gotthard2][CORRECT]CALCAT interface.](https://git.xfel.eu/calibration/pycalibration/-/merge_requests/778)
-- [[ePix100][CORRECT]CALCAT interface.](https://git.xfel.eu/calibration/pycalibration/-/merge_requests/777)
-- [[pnCCD][CORRECT]Use `DataFile` to store aligned corrected data.](https://git.xfel.eu/calibration/pycalibration/-/merge_requests/788)
-- [[Gotthard2][CORRECT]Use `DataFile` to store aligned corrected data.](https://git.xfel.eu/calibration/pycalibration/-/merge_requests/804)
-- [[ePix100][CORRECT]Use `DataFile` to store aligned corrected data.](https://git.xfel.eu/calibration/pycalibration/-/merge_requests/804)
+- [Gotthard2][CORRECT]CALCAT interface.
+- [ePix100][CORRECT]CALCAT interface.
+- [pnCCD][CORRECT]Use `DataFile` to store aligned corrected data.
+- [Gotthard2][CORRECT]Use `DataFile` to store aligned corrected data.
+- [ePix100][CORRECT]Use `DataFile` to store aligned corrected data.
 
 
 - [AGIPD][SlopesFF][CORRECT]Add the deviation for all possible memory cells and enable correction using
 these FF constants with more memory cells.
   - https://git.xfel.eu/calibration/pycalibration/-/merge_requests/806
   - https://git.xfel.eu/calibration/pycalibration/-/merge_requests/613
-- [[AGIPD][CORRECT]Fix checking ccv_variant condition for AGIPD.](https://git.xfel.eu/calibration/pycalibration/-/merge_requests/803)
-
-
-- [[DSSC][Dark]Remove unused instrument parameter from DSSC dark notebook](
-  https://git.xfel.eu/calibration/pycalibration/-/merge_requests/808)
-
-- [Update `nbparameterise` to 0.6.](https://git.xfel.eu/calibration/pycalibration/-/merge_requests/805)
-- [Update `EXtra-redu` to version 0.0.7](https://git.xfel.eu/calibration/pycalibration/-/merge_requests/802)
+- [AGIPD][CORRECT]Fix checking ccv_variant condition for AGIPD.
+- [DSSC][Dark]Remove unused instrument parameter from DSSC dark notebook
+- Update `nbparameterise` to 0.6.
+- Update `EXtra-redu` to version 0.0.7
 
 ## 3.8.1
 
diff --git a/mkdocs.yml b/mkdocs.yml
index 32b2e0dfcdff3bf614cff61288d821202bfd6d4b..5b08073c2798d551768bae899135f3b0f0fe69b1 100644
--- a/mkdocs.yml
+++ b/mkdocs.yml
@@ -89,7 +89,9 @@ nav:
       - CALCAT: operation/calibration_database.md
       - myMDC: operation/myMDC.md
       - Available Calibration notebooks: operation/available_notebooks.md
-      - Calibration webservice: operation/webservice.md
+      - Calibration webservice:
+        - The webservice: operation/webservice.md
+        - Calibration Configuration: operation/calibration_configurations.md
       - Troubleshooting:
         - Correcting detector RAW data: operation/troubleshooting_correction.md
         - Calibration constant generation: operation/troubleshooting_calibration_generation.md
@@ -98,7 +100,7 @@ nav:
       - Workflow: development/workflow.md
       - How to write a notebook: development/how_to_write_xfel_calibrate_notebook_NBC.md
       - Configuration: development/configuration.md
-      - Automated tests: development/testing_pipeline
+      - Automated tests: development/testing_pipeline.md
     - Code Reference: reference/
     - Reference:
       - FAQ: references/faq.md
diff --git a/notebooks/AGIPD/AGIPD_Characterize_Gain_Combine_NBC.ipynb b/notebooks/AGIPD/AGIPD_Characterize_Gain_Combine_NBC.ipynb
index 253789c14f642f41c4fe62ef44f8b44533ebf041..0c699d8f70e8f205887704eba2d796083657a2de 100644
--- a/notebooks/AGIPD/AGIPD_Characterize_Gain_Combine_NBC.ipynb
+++ b/notebooks/AGIPD/AGIPD_Characterize_Gain_Combine_NBC.ipynb
@@ -34,7 +34,7 @@
     "local_output = True # output constants locally\n",
     "db_output = False # output constants to database\n",
     "bias_voltage = 300 # detector bias voltage\n",
-    "cal_db_interface = \"tcp://max-exfl016:5005\"  # the database interface to use\n",
+    "cal_db_interface = \"tcp://max-exfl-cal001:5005\"  # the database interface to use\n",
     "mem_cells = 64  # number of memory cells used\n",
     "instrument = \"SPB\"\n",
     "photon_energy = 9.2 # the photon energy in keV\n",
diff --git a/notebooks/AGIPD/AGIPD_Correct_and_Verify.ipynb b/notebooks/AGIPD/AGIPD_Correct_and_Verify.ipynb
index e38e3245e24c1af6486941f4c6120fcb6efa8dfd..c4431088e4a5e693f9e89cd3464c8183471240b5 100644
--- a/notebooks/AGIPD/AGIPD_Correct_and_Verify.ipynb
+++ b/notebooks/AGIPD/AGIPD_Correct_and_Verify.ipynb
@@ -21,6 +21,7 @@
     "out_folder = \"/gpfs/exfel/data/scratch/esobolev/pycal_litfrm/p002834/r0225\"  # the folder to output to, required\n",
     "metadata_folder = \"\"  # Directory containing calibration_metadata.yml when run by xfel-calibrate\n",
     "sequences = [-1] # sequences to correct, set to -1 for all, range allowed\n",
+    "overwrite = False  # IGNORED, NEEDED FOR COMPATIBILITY.\n",
     "modules = [-1] # modules to correct, set to -1 for all, range allowed\n",
     "train_ids = [-1] # train IDs to correct, set to -1 for all, range allowed\n",
     "run = 225 # runs to process, required\n",
@@ -37,9 +38,10 @@
     "slopes_ff_from_files = \"\" # Path to locally stored SlopesFF and BadPixelsFF constants, loaded in precorrection notebook\n",
     "\n",
     "creation_time = \"\"  # To overwrite the measured creation_time. Required Format: YYYY-MM-DD HR:MN:SC e.g. \"2022-06-28 13:00:00\"\n",
-    "cal_db_interface = \"tcp://max-exfl016:8015#8045\" # the database interface to use\n",
+    "cal_db_interface = \"tcp://max-exfl-cal001:8015#8045\" # the database interface to use\n",
     "cal_db_timeout = 30000 # in milliseconds\n",
     "creation_date_offset = \"00:00:00\" # add an offset to creation date, e.g. to get different constants\n",
+    "cal_db_root = '/gpfs/exfel/d/cal/caldb_store'  # The calibration database root path to access constant files. For example accessing constants from the test database.\n",
     "\n",
     "mem_cells = -1  # Number of memory cells used, set to 0 to automatically infer\n",
     "bias_voltage = -1  # bias voltage, set to 0 to use stored value in slow data.\n",
@@ -60,6 +62,7 @@
     "noisy_adc_threshold = 0.25 # threshold to mask complete adc\n",
     "ff_gain = 7.2 # conversion gain for absolute FlatField constants, while applying xray_gain\n",
     "photon_energy = -1.0 # photon energy in keV, non-positive value for XGM autodetection\n",
+    "rounding_threshold = 0.5 # the fraction to round to down, 0.5 for standard rounding rule\n",
     "\n",
     "# Correction Booleans\n",
     "only_offset = False # Apply only Offset correction. if False, Offset is applied by Default. if True, Offset is only applied.\n",
@@ -86,6 +89,7 @@
     "# Optional auxiliary devices\n",
     "use_ppu_device = ''  # Device ID for a pulse picker device to only process picked trains, empty string to disable\n",
     "ppu_train_offset = 0  # When using the pulse picker, offset between the PPU's sequence start and actually picked train\n",
+    "require_ppu_trigger = False  # Optional protection against running without PPU or without triggering trains.\n",
     "\n",
     "use_litframe_finder = 'off' # Process only illuminated frames: 'off' - disable, 'device' - use online device data, 'offline' - use offline algorithm, 'auto' - choose online/offline source automatically (default)\n",
     "litframe_device_id = '' # Device ID for a lit frame finder device, empty string to auto detection\n",
@@ -122,25 +126,24 @@
    "outputs": [],
    "source": [
     "import itertools\n",
-    "import os\n",
     "import math\n",
     "import multiprocessing\n",
-    "import re\n",
+    "import os\n",
     "import warnings\n",
     "from datetime import timedelta\n",
     "from logging import warning\n",
     "from pathlib import Path\n",
-    "from time import perf_counter\n",
     "\n",
     "import tabulate\n",
     "from dateutil import parser\n",
     "from IPython.display import Latex, Markdown, display\n",
     "\n",
     "warnings.filterwarnings('ignore')\n",
+    "import h5py\n",
     "import matplotlib\n",
     "import matplotlib.pyplot as plt\n",
     "import yaml\n",
-    "from extra_data import RunDirectory, stack_detector_data\n",
+    "from extra_data import by_id, RunDirectory, stack_detector_data\n",
     "from extra_geom import AGIPD_1MGeometry, AGIPD_500K2GGeometry\n",
     "from matplotlib import cm as colormap\n",
     "from matplotlib.colors import LogNorm\n",
@@ -154,6 +157,7 @@
     "sns.set_context(\"paper\", font_scale=1.4)\n",
     "sns.set_style(\"ticks\")\n",
     "\n",
+    "import cal_tools.restful_config as rest_cfg\n",
     "from cal_tools import agipdalgs as calgs\n",
     "from cal_tools.agipdlib import (\n",
     "    AgipdCorrections,\n",
@@ -162,13 +166,17 @@
     "    LitFrameSelection,\n",
     ")\n",
     "from cal_tools.ana_tools import get_range\n",
+    "from cal_tools.calcat_interface import (\n",
+    "    AGIPD_CalibrationData,\n",
+    "    CalCatError,\n",
+    ")\n",
     "from cal_tools.enums import AgipdGainMode, BadPixels\n",
     "from cal_tools.step_timing import StepTimer\n",
     "from cal_tools.tools import (\n",
-    "    CalibrationMetadata,\n",
     "    calcat_creation_time,\n",
     "    map_modules_from_folder,\n",
     "    module_index_to_qm,\n",
+    "    write_constants_fragment,\n",
     ")"
    ]
   },
@@ -180,7 +188,9 @@
    "source": [
     "in_folder = Path(in_folder)\n",
     "out_folder = Path(out_folder)\n",
-    "run_folder = in_folder / f'r{run:04d}'"
+    "run_folder = in_folder / f'r{run:04d}'\n",
+    "\n",
+    "step_timer = StepTimer()"
    ]
   },
   {
@@ -295,28 +305,49 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "if use_ppu_device:\n",
-    "    # Obtain trains to process if using a pulse picker device.\n",
+    "if use_ppu_device and use_ppu_device in dc.control_sources:\n",
+    "    # Obtain trains to process if using a pulse picker device and it's present.\n",
     "\n",
-    "    # Will throw an uncaught exception if the device is wrong.\n",
     "    seq_start = dc[use_ppu_device, 'trainTrigger.sequenceStart.value'].ndarray()\n",
     "\n",
     "    # The trains picked are the unique values of trainTrigger.sequenceStart\n",
     "    # minus the first (previous trigger before this run).\n",
-    "    train_ids = np.unique(seq_start)[1:] + ppu_train_offset\n",
-    "\n",
-    "    print(f'PPU device {use_ppu_device} triggered for {len(train_ids)} train(s)')\n",
+    "    start_train_ids = np.unique(seq_start)[1:] + ppu_train_offset\n",
+    "\n",
+    "    train_ids = []\n",
+    "    for train_id in start_train_ids:\n",
+    "        n_trains = dc[\n",
+    "            use_ppu_device, 'trainTrigger.numberOfTrains'\n",
+    "        ].select_trains(by_id[[train_id]]).ndarray()[0]\n",
+    "        train_ids.extend(list(range(train_id, train_id + n_trains)))\n",
+    "\n",
+    "    if train_ids:\n",
+    "        print(f'PPU device {use_ppu_device} triggered for {len(train_ids)} train(s)')\n",
+    "    elif require_ppu_trigger:\n",
+    "        raise RuntimeError(f'PPU device {use_ppu_device} not triggered but required, aborting!')\n",
+    "    else:\n",
+    "        print(f'PPU device {use_ppu_device} not triggered, processing all valid trains')\n",
+    "        train_ids = None\n",
+    "        \n",
+    "elif use_ppu_device:\n",
+    "    # PPU configured but not present.\n",
+    "    \n",
+    "    if require_ppu_trigger:\n",
+    "        raise RuntimeError(f'PPU device {use_ppu_device} required but not found, aborting!')\n",
+    "    else:\n",
+    "        print(f'PPU device {use_ppu_device} configured but not found, processing all valid trains')\n",
+    "        train_ids = None\n",
     "\n",
     "elif train_ids != [-1]:\n",
     "    # Specific trains passed by parameter, convert to ndarray.\n",
     "    train_ids = np.array(train_ids)\n",
     "    \n",
     "    print(f'Processing up to {len(train_ids)} manually selected train(s)')\n",
+    "\n",
     "else:\n",
-    "    # Process all trains.\n",
-    "    train_ids = None\n",
-    "    \n",
-    "    print(f'Processing all valid trains')"
+    "    # No PPU configured.\n",
+    "    print(f'Processing all valid trains')\n",
+    "    train_ids = None"
    ]
   },
   {
@@ -360,7 +391,6 @@
     "\n",
     "instrument_src_mod = [\n",
     "    s for s in list(dc.all_sources) if f\"{first_mod_channel}CH\" in s][0]\n",
-    "mod_channel = int(re.findall(rf\".*{first_mod_channel}CH([0-9]+):.*\", instrument_src_mod)[0])\n",
     "\n",
     "agipd_cond = AgipdCtrl(\n",
     "    run_dc=dc,\n",
@@ -498,14 +528,11 @@
     "        warning('Neither explicit photon energy nor XGM device configured, photon rounding disabled!')\n",
     "        round_photons = False\n",
     "elif round_photons:\n",
-    "    print(f'Photon energy for rounding: {photon_energy:.3f} keV')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Data processing ##"
+    "    print(f'Photon energy for rounding: {photon_energy:.3f} keV')\n",
+    "\n",
+    "if round_photons and (rounding_threshold <= .0 or 1. <= rounding_threshold):\n",
+    "    warning('Round threshould is out of (0, 1) range. Use standard 0.5 value.')\n",
+    "    rounding_threshold = 0.5"
    ]
   },
   {
@@ -536,19 +563,33 @@
     "agipd_corr.noisy_adc_threshold = noisy_adc_threshold\n",
     "agipd_corr.ff_gain = ff_gain\n",
     "agipd_corr.photon_energy = photon_energy\n",
+    "agipd_corr.rounding_threshold = rounding_threshold\n",
     "\n",
     "agipd_corr.compress_fields = compress_fields\n",
     "if recast_image_data:\n",
     "    agipd_corr.recast_image_fields['data'] = np.dtype(recast_image_data)"
    ]
   },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Retrieving constants"
+   ]
+  },
   {
    "cell_type": "code",
    "execution_count": null,
    "metadata": {},
    "outputs": [],
    "source": [
-    "module_index_to_karabo_da = {mod: da for (mod, da) in zip(modules, karabo_da)}"
+    "def get_constants_and_update_metadata(cal_data, main_metadata, constants):\n",
+    "    try:\n",
+    "        metadata = cal_data.metadata(constants)\n",
+    "        for key, value in metadata.items():\n",
+    "            main_metadata.setdefault(key, {}).update(value)\n",
+    "    except CalCatError as e:  # TODO: replace when API errors are improved.\n",
+    "        warning(f\"CalCatError: {e}\")"
    ]
   },
   {
@@ -557,54 +598,169 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "# Retrieve calibration constants to RAM\n",
-    "agipd_corr.allocate_constants(modules, (3, mem_cells_db, 512, 128))\n",
+    "step_timer.start()\n",
+    "# Instantiate agipd_cal with the read operating conditions.\n",
+    "agipd_cal = AGIPD_CalibrationData(\n",
+    "    detector_name=karabo_id,\n",
+    "    modules=karabo_da,\n",
+    "    sensor_bias_voltage=bias_voltage,\n",
+    "    memory_cells=mem_cells,\n",
+    "    acquisition_rate=acq_rate,\n",
+    "    integration_time=integration_time,\n",
+    "    source_energy=9.2,\n",
+    "    gain_mode=gain_mode,\n",
+    "    gain_setting=gain_setting,\n",
+    "    event_at=creation_time,\n",
+    "    client=rest_cfg.calibration_client(),\n",
+    "    caldb_root=Path(cal_db_root),\n",
+    ")\n",
     "\n",
-    "metadata = CalibrationMetadata(metadata_folder or out_folder)\n",
-    "# NOTE: this notebook will not overwrite calibration metadata file\n",
-    "const_yaml = metadata.get(\"retrieved-constants\", {})\n",
+    "# Prepare lists of expected calibrations\n",
+    "dark_constants = [\"Offset\", \"Noise\", \"BadPixelsDark\"]\n",
+    "if not gain_mode:  # Adaptive gain\n",
+    "    dark_constants.append(\"ThresholdsDark\")\n",
+    "\n",
+    "agipd_metadata = agipd_cal.metadata(dark_constants)\n",
+    "\n",
+    "agipd_cal.gain_mode = None  # gain_mode is not used for gain constants\n",
+    "pc_constants, ff_constants = [], []\n",
+    "if any(agipd_corr.pc_bools):\n",
+    "    pc_constants = [\"SlopesPC\", \"BadPixelsPC\"]\n",
+    "    get_constants_and_update_metadata(\n",
+    "        agipd_cal, agipd_metadata, pc_constants)\n",
+    "\n",
+    "if agipd_corr.corr_bools.get('xray_corr'):\n",
+    "    ff_constants = list(agipd_cal.illuminated_calibrations)\n",
+    "    get_constants_and_update_metadata(\n",
+    "        agipd_cal, agipd_metadata, ff_constants)\n",
+    "\n",
+    "step_timer.done_step(\"Constants were retrieved in\")\n",
+    "\n",
+    "print(\"Preparing constants (\"\n",
+    "      f\"FF: {agipd_corr.corr_bools.get('xray_corr', False)}, \"\n",
+    "      f\"PC: {any(agipd_corr.pc_bools)}, \"\n",
+    "      f\"BLC: {any(agipd_corr.blc_bools)})\")\n",
+    "# Display retrieved calibration constants timestamps\n",
+    "agipd_cal.display_markdown_retrieved_constants(metadata=agipd_metadata)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Validate constants availability and exclude modules with no offsets.\n",
+    "for da, calibrations in agipd_metadata.items():\n",
+    "    mod = modules[karabo_da.index(da)]\n",
+    "    # Constants to error out for when missing.\n",
+    "    error_missing_constants = {\"Offset\"}\n",
+    "    if not gain_mode:\n",
+    "        error_missing_constants |= {\"ThresholdsDark\"}\n",
+    "\n",
+    "    error_missing_constants -= set(calibrations)\n",
+    "    if error_missing_constants:\n",
+    "        warning(f\"Offset constant is not available to correct {da}.\")\n",
+    "        # Remove module from files to process.\n",
+    "        del mapped_files[module_index_to_qm(mod)]\n",
+    "        karabo_da.remove(da)\n",
+    "        modules.remove(mod)\n",
+    "\n",
+    "    warn_missing_constants = set(dark_constants + pc_constants + ff_constants)\n",
+    "    warn_missing_constants -= error_missing_constants\n",
+    "    warn_missing_constants -= set(calibrations)\n",
+    "    if warn_missing_constants:\n",
+    "        warning(f\"Constants {warn_missing_constants} were not retrieved for {da}.\")\n",
+    "\n",
+    "if not mapped_files:  # Offsets are missing for all modules.\n",
+    "    raise Exception(\"Could not find offset constants for any modules, will not correct data.\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Record constant details in YAML metadata\n",
+    "write_constants_fragment(\n",
+    "    out_folder=(metadata_folder or out_folder),\n",
+    "    det_metadata=agipd_metadata,\n",
+    "    caldb_root=agipd_cal.caldb_root)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Load calibration constants to RAM\n",
+    "agipd_corr.allocate_constants(modules, (3, mem_cells_db, 512, 128))\n",
     "\n",
-    "def retrieve_constants(mod):\n",
+    "def load_constants(da, module):\n",
     "    \"\"\"\n",
-    "    Retrieve calibration constants and load them to shared memory\n",
+    "    Initialize constants data from previously retrieved metadata.\n",
+    "\n",
+    "    Args:\n",
+    "        da (str): Data Aggregator (Karabo DA)\n",
+    "        module (int): Module index\n",
     "\n",
-    "    Metadata for constants is taken from yml file or retrieved from the DB\n",
+    "    Returns:\n",
+    "        (int, dict, str): Module index, {constant name: creation time}, Karabo DA\n",
     "    \"\"\"\n",
-    "    k_da = module_index_to_karabo_da[mod]\n",
-    "    # check if there is a yaml file in out_folder that has the device constants.\n",
-    "    if k_da in const_yaml:\n",
-    "        when = agipd_corr.initialize_from_yaml(k_da, const_yaml, mod)\n",
-    "        print(f\"Found constants for {k_da} in calibration_metadata.yml\")\n",
-    "    else:\n",
-    "        try:\n",
-    "            # TODO: replace with proper retrieval (as done in pre-correction)\n",
-    "            when = agipd_corr.initialize_from_db(\n",
-    "                karabo_id=karabo_id,\n",
-    "                karabo_da=k_da,\n",
-    "                cal_db_interface=cal_db_interface,\n",
-    "                creation_time=creation_time,\n",
-    "                memory_cells=mem_cells_db,\n",
-    "                bias_voltage=bias_voltage,\n",
-    "                photon_energy=9.2,\n",
-    "                gain_setting=gain_setting,\n",
-    "                acquisition_rate=acq_rate,\n",
-    "                integration_time=integration_time,\n",
-    "                module_idx=mod,\n",
-    "                only_dark=False,\n",
-    "            )\n",
-    "            print(f\"Queried CalCat for {k_da}\")\n",
-    "        except Exception as e:\n",
-    "            warning(f\"Module: {k_da}, {e}\")\n",
-    "            when = None\n",
-    "    return mod, when, k_da\n",
+    "    const_data = dict()\n",
+    "    variant = dict()\n",
+    "    for cname, mdata in agipd_metadata[da].items():\n",
+    "        dataset = mdata[\"dataset\"]\n",
+    "        with h5py.File(agipd_cal.caldb_root / mdata[\"path\"], \"r\") as cf:  # noqa\n",
+    "            const_data[cname] = np.copy(cf[f\"{dataset}/data\"])\n",
+    "            variant[cname] = cf[dataset].attrs[\"variant\"] if cf[dataset].attrs.keys() else 0  # noqa\n",
+    "    agipd_corr.init_constants(const_data, module, variant)\n",
     "\n",
     "\n",
-    "print(f'Preparing constants (FF: {agipd_corr.corr_bools.get(\"xray_corr\", False)}, PC: {any(agipd_corr.pc_bools)}, '\n",
-    "      f'BLC: {any(agipd_corr.blc_bools)})')\n",
-    "ts = perf_counter()\n",
+    "step_timer.start()\n",
     "with multiprocessing.Pool(processes=len(modules)) as pool:\n",
-    "    const_out = pool.map(retrieve_constants, modules)\n",
-    "print(f\"Constants were loaded in {perf_counter()-ts:.01f}s\")"
+    "    pool.starmap(load_constants, zip(karabo_da, modules))\n",
+    "step_timer.done_step(f'Constants were loaded in ')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Store timestamps for Offset, SlopesPC, and SlopesFF\n",
+    "# in YAML file for time-summary table.\n",
+    "timestamps = {}\n",
+    "\n",
+    "for mod, mod_mdata in agipd_metadata.items():\n",
+    "    modno = int(mod[-2:])\n",
+    "\n",
+    "    module_timestamps = {}\n",
+    "\n",
+    "    # Store few time stamps if exists\n",
+    "    # Add NA to keep array structure\n",
+    "    for key in ['Offset', 'SlopesPC', 'SlopesFF']:\n",
+    "        if key in mod_mdata:\n",
+    "            module_timestamps[key] = mod_mdata[key][\"begin_validity_at\"]\n",
+    "        else:\n",
+    "            module_timestamps[key] = \"NA\"\n",
+    "\n",
+    "    timestamps[module_index_to_qm(modno)] = module_timestamps\n",
+    "\n",
+    "seq = sequences[0] if sequences else 0\n",
+    "\n",
+    "with open(f\"{out_folder}/retrieved_constants_s{seq}.yml\",\"w\") as fd:\n",
+    "    yaml.safe_dump({\"time-summary\": {f\"S{seq}\": timestamps}}, fd)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Data processing ##"
    ]
   },
   {
@@ -652,15 +808,6 @@
     "            yield i_proc, i * n_img // n_chunks, (i+1) * n_img // n_chunks"
    ]
   },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "step_timer = StepTimer()"
-   ]
-  },
   {
    "cell_type": "code",
    "execution_count": null,
@@ -744,50 +891,6 @@
     "step_timer.print_summary()"
    ]
   },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# if the yml file contains \"retrieved-constants\", that means a leading\n",
-    "# notebook got processed and the reporting would be generated from it.\n",
-    "fst_print = True\n",
-    "timestamps = {}\n",
-    "\n",
-    "for i, (modno, when, k_da) in enumerate(const_out):\n",
-    "    qm = module_index_to_qm(modno)\n",
-    "\n",
-    "    if k_da not in const_yaml:\n",
-    "        if fst_print:\n",
-    "            print(\"Constants are retrieved with creation time: \")\n",
-    "            fst_print = False\n",
-    "\n",
-    "        module_timestamps = {}\n",
-    "\n",
-    "        print(f\"{qm}:\")\n",
-    "        for key, item in when.items():\n",
-    "            if hasattr(item, 'strftime'):\n",
-    "                item = item.strftime('%y-%m-%d %H:%M')\n",
-    "            when[key] = item\n",
-    "            print('{:.<12s}'.format(key), item)\n",
-    "\n",
-    "        # Store few time stamps if exists\n",
-    "        # Add NA to keep array structure\n",
-    "        for key in ['Offset', 'SlopesPC', 'SlopesFF']:\n",
-    "            if when and key in when and when[key]:\n",
-    "                module_timestamps[key] = when[key]\n",
-    "            else:\n",
-    "                module_timestamps[key] = \"NA\"\n",
-    "        timestamps[qm] = module_timestamps\n",
-    "\n",
-    "seq = sequences[0] if sequences else 0\n",
-    "\n",
-    "if timestamps:\n",
-    "    with open(f\"{out_folder}/retrieved_constants_s{seq}.yml\",\"w\") as fd:\n",
-    "        yaml.safe_dump({\"time-summary\": {f\"S{seq}\": timestamps}}, fd)"
-   ]
-  },
   {
    "cell_type": "code",
    "execution_count": null,
@@ -852,7 +955,12 @@
     "    :param tid: Train Id to be loaded. First train is considered if None is given\n",
     "    :param path: Path to find image data inside h5 file\n",
     "    \"\"\"\n",
-    "    run_data = RunDirectory(data_folder, include)\n",
+    "    try:\n",
+    "        run_data = RunDirectory(data_folder, include)\n",
+    "    except FileNotFoundError:\n",
+    "        warning(f'No corrected files for {include}. Skipping plots.')\n",
+    "        import sys\n",
+    "        sys.exit(0)\n",
     "    if tid is not None:\n",
     "        tid, data = run_data.select(\n",
     "            f'{detector_id}/DET/*', source).train_from_id(tid, keep_dims=True)\n",
@@ -976,9 +1084,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "scrolled": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "pulse_range = [np.min(pulseId[pulseId>=0]), np.max(pulseId[pulseId>=0])]\n",
diff --git a/notebooks/AGIPD/AGIPD_Correct_and_Verify_Summary_NBC.ipynb b/notebooks/AGIPD/AGIPD_Correct_and_Verify_Summary_NBC.ipynb
index 5de75ff6e72aa42ae279044455f624b17745d50e..36e21e9220cc0fcda5f1f4f4a235fcdef1208034 100644
--- a/notebooks/AGIPD/AGIPD_Correct_and_Verify_Summary_NBC.ipynb
+++ b/notebooks/AGIPD/AGIPD_Correct_and_Verify_Summary_NBC.ipynb
@@ -27,22 +27,13 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "import re\n",
-    "import warnings\n",
     "from pathlib import Path\n",
     "\n",
-    "import dateutil.parser\n",
-    "import numpy as np\n",
     "import yaml\n",
     "\n",
-    "warnings.filterwarnings('ignore')\n",
-    "\n",
-    "import matplotlib.pyplot as plt\n",
-    "\n",
-    "%matplotlib inline\n",
     "import tabulate\n",
     "from cal_tools.tools import CalibrationMetadata\n",
-    "from IPython.display import Latex, Markdown, display"
+    "from IPython.display import Latex, display"
    ]
   },
   {
diff --git a/notebooks/AGIPD/AGIPD_Retrieve_Constants_Precorrection.ipynb b/notebooks/AGIPD/AGIPD_Retrieve_Constants_Precorrection.ipynb
deleted file mode 100644
index bbb32e928c2784a85ea2c1b28b58a1c39cb8388f..0000000000000000000000000000000000000000
--- a/notebooks/AGIPD/AGIPD_Retrieve_Constants_Precorrection.ipynb
+++ /dev/null
@@ -1,464 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "# AGIPD Retrieving Constants Pre-correction #\n",
-    "\n",
-    "Author: European XFEL Detector Group, Version: 1.0\n",
-    "\n",
-    "Retrieving Required Constants for Offline Calibration of the AGIPD Detector"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "in_folder = \"/gpfs/exfel/exp/SPB/202030/p900119/raw\" # the folder to read data from, required\n",
-    "out_folder =  \"/gpfs/exfel/data/scratch/ahmedk/test/AGIPD_\"  # the folder to output to, required\n",
-    "metadata_folder = \"\"  # Directory containing calibration_metadata.yml when run by xfel-calibrate\n",
-    "modules = [-1] # modules to correct, set to -1 for all, range allowed\n",
-    "run = 80 # runs to process, required\n",
-    "\n",
-    "karabo_id = \"SPB_DET_AGIPD1M-1\" # karabo karabo_id\n",
-    "karabo_da = ['-1']  # a list of data aggregators names, Default [-1] for selecting all data aggregators\n",
-    "ctrl_source_template = '{}/MDL/FPGA_COMP_TEST'  # path to control information\n",
-    "instrument_source_template = '{}/DET/{}:xtdf'  # path in the HDF5 file to images\n",
-    "receiver_template = \"{}CH0\" # inset for receiver devices\n",
-    "karabo_id_control = \"SPB_IRU_AGIPD1M1\" # karabo-id for control device\n",
-    "\n",
-    "# Parameters for calibration database.\n",
-    "cal_db_interface = \"tcp://max-exfl016:8015#8045\" # the database interface to use\n",
-    "creation_date_offset = \"00:00:00\" # add an offset to creation date, e.g. to get different constants\n",
-    "creation_time = \"\"  # To overwrite the measured creation_time. Required Format: YYYY-MM-DD HR:MN:SC e.g. \"2022-06-28 13:00:00\"\n",
-    "\n",
-    "slopes_ff_from_files = \"\" # Path to locally stored SlopesFF and BadPixelsFF constants\n",
-    "mem_cells = -1  # number of memory cells used, set to 0 to automatically infer\n",
-    "bias_voltage = -1  # bias voltage, set to 0 to use stored value in slow data.\n",
-    "acq_rate = -1.  # the detector acquisition rate, use 0 to try to auto-determine\n",
-    "gain_setting = -1  # the gain setting, use -1 to use value stored in slow data.\n",
-    "gain_mode = -1  # gain mode (0: adaptive, 1-3 fixed high/med/low, -1: read from CONTROL data)\n",
-    "integration_time = -1 # integration time, negative values for auto-detection.\n",
-    "\n",
-    "# Correction Booleans\n",
-    "only_offset = False # Apply only Offset correction. if False, Offset is applied by Default. if True, Offset is only applied.\n",
-    "rel_gain = False # do relative gain correction based on PC data\n",
-    "xray_gain = True # do relative gain correction based on xray data\n",
-    "blc_noise = False # if set, baseline correction via noise peak location is attempted\n",
-    "blc_stripes = False # if set, baseline corrected via stripes\n",
-    "blc_hmatch = False # if set, base line correction via histogram matching is attempted\n",
-    "adjust_mg_baseline = False # adjust medium gain baseline to match highest high gain value"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# Fill dictionaries comprising bools and arguments for correction and data analysis\n",
-    "# Here the hierarichy and dependencies for correction booleans are defined \n",
-    "corr_bools = {}\n",
-    "\n",
-    "# offset is at the bottom of AGIPD correction pyramid.\n",
-    "corr_bools[\"only_offset\"] = only_offset\n",
-    "\n",
-    "# Dont apply any corrections if only_offset is requested \n",
-    "if not only_offset:\n",
-    "    corr_bools[\"adjust_mg_baseline\"] = adjust_mg_baseline\n",
-    "    corr_bools[\"rel_gain\"] = rel_gain\n",
-    "    corr_bools[\"xray_corr\"] = xray_gain\n",
-    "    corr_bools[\"blc_noise\"] = blc_noise\n",
-    "    corr_bools[\"blc_hmatch\"] = blc_hmatch"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import numpy as np\n",
-    "from logging import warning\n",
-    "from pathlib import Path\n",
-    "from typing import Tuple\n",
-    "\n",
-    "import multiprocessing\n",
-    "from datetime import timedelta\n",
-    "from dateutil import parser\n",
-    "from extra_data import RunDirectory\n",
-    "\n",
-    "from cal_tools.agipdlib import (\n",
-    "    AgipdCtrl,\n",
-    "    SnowResolution,\n",
-    "    assemble_constant_dict,\n",
-    ")\n",
-    "from cal_tools.enums import AgipdGainMode\n",
-    "from cal_tools.tools import (\n",
-    "    calcat_creation_time,\n",
-    "    get_from_db,\n",
-    "    module_index_to_qm,\n",
-    "    CalibrationMetadata,\n",
-    ")\n",
-    "from iCalibrationDB import Conditions, Constants, Detectors"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# slopes_ff_from_files left as str for now\n",
-    "in_folder = Path(in_folder)\n",
-    "out_folder = Path(out_folder)\n",
-    "metadata = CalibrationMetadata(metadata_folder or out_folder)\n",
-    "# Constant paths & timestamps are saved under retrieved-constants in calibration_metadata.yml\n",
-    "retrieved_constants = metadata.setdefault(\"retrieved-constants\", {})"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# Run's creation time:\n",
-    "creation_time = calcat_creation_time(in_folder, run, creation_time)\n",
-    "offset = parser.parse(creation_date_offset)\n",
-    "delta = timedelta(hours=offset.hour, minutes=offset.minute, seconds=offset.second)\n",
-    "creation_time += delta\n",
-    "print(f\"Creation time: {creation_time}\")\n",
-    "    \n",
-    "print(f\"Outputting to {out_folder}\")\n",
-    "out_folder.mkdir(parents=True, exist_ok=True)\n",
-    "\n",
-    "melt_snow = False if corr_bools[\"only_offset\"] else SnowResolution.NONE"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "ctrl_src = ctrl_source_template.format(karabo_id_control)\n",
-    "\n",
-    "print(f\"Detector in use is {karabo_id}\")\n",
-    "\n",
-    "# Extracting Instrument string\n",
-    "instrument = karabo_id.split(\"_\")[0]\n",
-    "# Evaluate detector instance for mapping\n",
-    "if instrument == \"SPB\":\n",
-    "    nmods = 16\n",
-    "elif instrument == \"MID\":\n",
-    "    nmods = 16\n",
-    "elif instrument == \"HED\":\n",
-    "    nmods = 8\n",
-    "\n",
-    "print(f\"Instrument {instrument}\")\n",
-    "\n",
-    "if karabo_da[0] == '-1':\n",
-    "    if modules[0] == -1:\n",
-    "        modules = list(range(nmods))\n",
-    "    karabo_da = [\"AGIPD{:02d}\".format(i) for i in modules]\n",
-    "else:\n",
-    "    modules = [int(x[-2:]) for x in karabo_da]"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "run_dc = RunDirectory(in_folder / f\"r{run:04d}\")\n",
-    "\n",
-    "instrument_src = instrument_source_template.format(karabo_id, receiver_template)\n",
-    "\n",
-    "instr_dc = run_dc.select(instrument_src.format(\"*\"), require_all=True)\n",
-    "\n",
-    "if not instr_dc.train_ids:\n",
-    "    raise ValueError(f\"No images found for {in_folder / f'r{run:04d}'}\")"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "agipd_cond = AgipdCtrl(\n",
-    "    run_dc=run_dc,\n",
-    "    image_src=None,  # Not neededed, as we wont read mem_cells or acq_rate.\n",
-    "    ctrl_src=ctrl_src,\n",
-    ")\n",
-    "\n",
-    "if gain_setting == -1:\n",
-    "    gain_setting = agipd_cond.get_gain_setting(creation_time)\n",
-    "if bias_voltage == -1:\n",
-    "    bias_voltage = agipd_cond.get_bias_voltage(karabo_id_control)\n",
-    "if integration_time == -1:\n",
-    "    integration_time = agipd_cond.get_integration_time()\n",
-    "if gain_mode == -1:\n",
-    "    gain_mode = agipd_cond.get_gain_mode()\n",
-    "else:\n",
-    "    gain_mode = AgipdGainMode(gain_mode)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Retrieve Constants ##"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "pc_bools = [  # flags that points to the need for retrieving SlopesPC and BadPixelsPC constants.\n",
-    "    corr_bools.get(\"rel_gain\"),\n",
-    "    corr_bools.get(\"adjust_mg_baseline\"),\n",
-    "    corr_bools.get('blc_noise'),\n",
-    "    corr_bools.get('blc_hmatch'),\n",
-    "    corr_bools.get('blc_stripes'),\n",
-    "    melt_snow,\n",
-    "]"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "def retrieve_constants(\n",
-    "    k_da: str, idx: int\n",
-    ") -> Tuple[str, str, float, float, str, dict]:\n",
-    "    \"\"\"\n",
-    "    Retrieve constants for a module.\n",
-    "\n",
-    "    :return:\n",
-    "            k_da: karabo data aggregator.\n",
-    "            acq_rate: acquisition rate parameter.\n",
-    "            mem_cells: number of memory cells.\n",
-    "            mdata_dict: (DICT) dictionary with the metadata for the retrieved constants.\n",
-    "    \"\"\"\n",
-    "    # check if this module has images to process.\n",
-    "    if instrument_src.format(idx) not in instr_dc.all_sources:\n",
-    "        print(\"ERROR: No raw images found for \"\n",
-    "              f\"{module_index_to_qm(idx)}({k_da}).\")\n",
-    "\n",
-    "        return None, k_da, None, None\n",
-    "\n",
-    "    agipd_cond.image_src = instrument_src.format(idx)\n",
-    "\n",
-    "    if mem_cells == -1:\n",
-    "        # Read value from fast data.\n",
-    "        local_mem_cells = agipd_cond.get_num_cells()\n",
-    "    else:\n",
-    "        # or use overriding notebook parameter.\n",
-    "        local_mem_cells = mem_cells\n",
-    "\n",
-    "    if acq_rate == -1.:\n",
-    "        local_acq_rate = agipd_cond.get_acq_rate()\n",
-    "    else:\n",
-    "        local_acq_rate = acq_rate\n",
-    "\n",
-    "    const_dict = assemble_constant_dict(\n",
-    "        corr_bools,\n",
-    "        pc_bools,\n",
-    "        local_mem_cells,\n",
-    "        bias_voltage,\n",
-    "        gain_setting,\n",
-    "        local_acq_rate,\n",
-    "        photon_energy=9.2,\n",
-    "        gain_mode=gain_mode,\n",
-    "        beam_energy=None,\n",
-    "        only_dark=False,\n",
-    "        integration_time=integration_time\n",
-    "    )\n",
-    "\n",
-    "    # Retrieve multiple constants through an input dictionary\n",
-    "    # to return a dict of useful metadata.\n",
-    "    mdata_dict = dict()\n",
-    "    mdata_dict[\"constants\"] = dict()\n",
-    "    mdata_dict[\"physical-detector-unit\"] = None  # initialization\n",
-    "\n",
-    "    for const_name, (const_init_fun, const_shape, (cond_type, cond_param)) in const_dict.items():  # noqa\n",
-    "        if gain_mode and const_name in (\"ThresholdsDark\",):\n",
-    "            continue\n",
-    "        \n",
-    "        # saving metadata in a dict\n",
-    "        const_mdata = dict()\n",
-    "        mdata_dict[\"constants\"][const_name] = const_mdata\n",
-    "\n",
-    "        if slopes_ff_from_files and const_name in [\"SlopesFF\", \"BadPixelsFF\"]:\n",
-    "            const_mdata[\"file-path\"] = (\n",
-    "                f\"{slopes_ff_from_files}/slopesff_bpmask_module_{module_index_to_qm(idx)}.h5\")  # noqa\n",
-    "            const_mdata[\"creation-time\"] = \"00:00:00\"\n",
-    "            continue\n",
-    "        \n",
-    "        if gain_mode and const_name in (\n",
-    "            \"BadPixelsPC\", \"SlopesPC\", \"BadPixelsFF\", \"SlopesFF\"\n",
-    "        ):\n",
-    "            param_copy = cond_param.copy()\n",
-    "            del param_copy[\"gain_mode\"]\n",
-    "            condition = getattr(Conditions, cond_type).AGIPD(**param_copy)\n",
-    "        else:\n",
-    "            condition = getattr(Conditions, cond_type).AGIPD(**cond_param)\n",
-    "\n",
-    "        _, mdata = get_from_db(\n",
-    "            karabo_id,\n",
-    "            k_da,\n",
-    "            getattr(Constants.AGIPD, const_name)(),\n",
-    "            condition,\n",
-    "            getattr(np, const_init_fun)(const_shape),\n",
-    "            cal_db_interface,\n",
-    "            creation_time,\n",
-    "            meta_only=True,\n",
-    "            verbosity=0,\n",
-    "        )\n",
-    "        mdata_const = mdata.calibration_constant_version\n",
-    "        # check if constant was sucessfully retrieved.\n",
-    "        if mdata.comm_db_success:\n",
-    "            const_mdata[\"file-path\"] = (\n",
-    "                f\"{mdata_const.hdf5path}\" f\"{mdata_const.filename}\"\n",
-    "            )\n",
-    "            const_mdata[\"creation-time\"] = f\"{mdata_const.begin_at}\"\n",
-    "            mdata_dict[\"physical-detector-unit\"] = mdata_const.device_name\n",
-    "        else:\n",
-    "            const_mdata[\"file-path\"] = const_dict[const_name][:2]\n",
-    "            const_mdata[\"creation-time\"] = None\n",
-    "\n",
-    "    return mdata_dict, k_da, local_acq_rate, local_mem_cells"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "inp = []\n",
-    "da_to_qm = dict()\n",
-    "for module_index, k_da in zip(modules, karabo_da):\n",
-    "    da_to_qm[k_da] = module_index_to_qm(module_index)\n",
-    "    if k_da in retrieved_constants:\n",
-    "        print(\n",
-    "            f\"Constant for {k_da} already in calibration_metadata.yml, won't query again.\")\n",
-    "        continue\n",
-    "\n",
-    "    inp.append((k_da, module_index))"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "with multiprocessing.Pool(processes=nmods) as pool:\n",
-    "    results = pool.starmap(retrieve_constants, inp)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "acq_rate_mods = []\n",
-    "mem_cells_mods = []\n",
-    "for md_dict, k_da, acq_rate, mem_cells in results:\n",
-    "    if acq_rate is None and mem_cells is None:\n",
-    "        continue\n",
-    "    md_dict, k_da, acq_rate, mem_cells\n",
-    "    retrieved_constants[k_da] = md_dict\n",
-    "    mem_cells_mods.append(mem_cells)\n",
-    "    acq_rate_mods.append(acq_rate)\n",
-    "\n",
-    "# Validate that mem_cells and acq_rate are the same for all modules.\n",
-    "# TODO: Should a warning be enough?\n",
-    "if len(set(mem_cells_mods)) != 1 or len(set(acq_rate_mods)) != 1:\n",
-    "    print(\n",
-    "        \"WARNING: Number of memory cells or \"\n",
-    "        \"acquisition rate are not identical for all modules.\\n\"\n",
-    "        f\"mem_cells: {mem_cells_mods}.\\nacq_rate: {acq_rate_mods}.\")\n",
-    "\n",
-    "# check if it is requested not to retrieve any constants from the database\n",
-    "print(\"\\nRetrieved constants for modules:\",\n",
-    "        ', '.join([module_index_to_qm(x) for x in modules]))\n",
-    "print(f\"Operating conditions are:\")\n",
-    "print(f\"• Bias voltage: {bias_voltage}\")\n",
-    "print(f\"• Memory cells: {mem_cells}\")\n",
-    "print(f\"• Acquisition rate: {acq_rate}\")\n",
-    "print(f\"• Gain mode: {gain_mode.name}\")\n",
-    "print(f\"• Gain setting: {gain_setting}\")\n",
-    "print(f\"• Integration time: {integration_time}\")\n",
-    "print(f\"• Photon Energy: 9.2\")\n",
-    "print(\"Constant metadata is saved under \\\"retrieved-constants\\\" in calibration_metadata.yml\\n\")"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "print(\"Using constants with creation times:\")\n",
-    "timestamps = {}\n",
-    "\n",
-    "for k_da, module_name in da_to_qm.items():\n",
-    "    if k_da not in retrieved_constants.keys():\n",
-    "        continue\n",
-    "    module_timestamps = timestamps[module_name] = {}\n",
-    "    module_constants = retrieved_constants[k_da]\n",
-    "\n",
-    "    print(f\"{module_name}:\")\n",
-    "    for cname, mdata in module_constants[\"constants\"].items():\n",
-    "        if hasattr(mdata[\"creation-time\"], 'strftime'):\n",
-    "            mdata[\"creation-time\"] = mdata[\"creation-time\"].strftime('%y-%m-%d %H:%M')\n",
-    "        print(f'{cname:.<12s}', mdata[\"creation-time\"])\n",
-    "\n",
-    "    for cname in ['Offset', 'SlopesPC', 'SlopesFF']:\n",
-    "        if cname in module_constants[\"constants\"]:\n",
-    "            module_timestamps[cname] = module_constants[\"constants\"][cname][\"creation-time\"]\n",
-    "        else:\n",
-    "            module_timestamps[cname] = \"NA\"\n",
-    "\n",
-    "time_summary = retrieved_constants.setdefault(\"time-summary\", {})\n",
-    "time_summary[\"SAll\"] = timestamps\n",
-    "\n",
-    "metadata.save()"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.8.12"
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 4
-}
diff --git a/notebooks/AGIPD/Characterize_AGIPD_Gain_Darks_NBC.ipynb b/notebooks/AGIPD/Characterize_AGIPD_Gain_Darks_NBC.ipynb
index 52b639be6ccbc481a69df0242d96ca1441b09306..76987d79519ae0f2642d001ec22fd94b05d5c8d3 100644
--- a/notebooks/AGIPD/Characterize_AGIPD_Gain_Darks_NBC.ipynb
+++ b/notebooks/AGIPD/Characterize_AGIPD_Gain_Darks_NBC.ipynb
@@ -36,10 +36,11 @@
     "karabo_id_control = \"HED_EXP_AGIPD500K2G\" # karabo-id for control device '\n",
     "\n",
     "use_dir_creation_date = True  # use dir creation date as data production reference date\n",
-    "cal_db_interface = \"tcp://max-exfl016:8020\" # the database interface to use\n",
+    "cal_db_interface = \"tcp://max-exfl-cal001:8020\" # the database interface to use\n",
     "cal_db_timeout = 3000000 # timeout on caldb requests\"\n",
     "local_output = True # output constants locally\n",
     "db_output = False # output constants to database\n",
+    "sort_runs = True  # Sort the selected dark runs. This flag is added for old data (e.g. 900174 r0011).\n",
     "\n",
     "mem_cells = 0 # number of memory cells used, set to 0 to automatically infer\n",
     "bias_voltage = 0 # bias voltage, set to 0 to use stored value in slow data.\n",
@@ -50,7 +51,7 @@
     "interlaced = False # assume interlaced data format, for data prior to Dec. 2017\n",
     "\n",
     "thresholds_offset_sigma = 3. # offset sigma thresholds for offset deduced bad pixels\n",
-    "thresholds_offset_hard = [0, 0]  # For setting the same threshold offset for the 3 gains. Left for backcompatability. Default [0, 0] to take the following parameters.\n",
+    "thresholds_offset_hard = [0, 0]  # For setting the same threshold offset for the 3 gains. Left for backward compatibility. Default [0, 0] to take the following parameters.\n",
     "thresholds_offset_hard_hg = [3000, 7000]  # High-gain thresholds in absolute ADU terms for offset deduced bad pixels\n",
     "thresholds_offset_hard_mg = [6000, 10000]  # Medium-gain thresholds in absolute ADU terms for offset deduced bad pixels\n",
     "thresholds_offset_hard_lg = [6000, 10000]  # Low-gain thresholds in absolute ADU terms for offset deduced bad pixels\n",
@@ -59,14 +60,14 @@
     "thresholds_offset_hard_lg_fixed = [3500, 6500]  # Same as thresholds_offset_hard_lg, but for fixed gain operation\n",
     "\n",
     "thresholds_noise_sigma = 5. # noise sigma thresholds for offset deduced bad pixels\n",
-    "thresholds_noise_hard = [0, 0] # For setting the same threshold noise for the 3 gains. Left for backcompatability. Default [0, 0] to take the following parameters.\n",
+    "thresholds_noise_hard = [0, 0] # For setting the same threshold noise for the 3 gains. Left for backward compatibility. Default [0, 0] to take the following parameters.\n",
     "thresholds_noise_hard_hg = [4, 20] # High-gain thresholds in absolute ADU terms for offset deduced bad pixels\n",
     "thresholds_noise_hard_mg = [4, 20] # Medium-gain thresholds in absolute ADU terms for offset deduced bad pixels\n",
     "thresholds_noise_hard_lg = [4, 20] # Low-gain thresholds in absolute ADU terms for offset deduced bad pixels\n",
     "\n",
     "thresholds_gain_sigma = 5.  # Gain separation sigma threshold\n",
-    "max_trains = 550  # Maximum number of trains to use for processing dark. Set to 0 to process all available trains. 550 added for ~500GB nodes to temporarely avoid memory issues.\n",
-    "min_trains = 1  # Miniumum number of trains for processing dark. If run folder has less than minimum trains, processing is stopped.\n",
+    "max_trains = 550  # Maximum number of trains to use for processing dark. Set to 0 to process all available trains. 550 added for ~500GB nodes to temporarily avoid memory issues.\n",
+    "min_trains = 1  # Minimum number of trains for processing dark. If run folder has less than minimum trains, processing is stopped.\n",
     "high_res_badpix_3d = False # set this to True if you need high-resolution 3d bad pixel plots. ~7mins extra time for 64 memory cells\n",
     "\n",
     "# This is used if modules is not specified:\n",
@@ -95,9 +96,8 @@
     "from collections import OrderedDict\n",
     "from datetime import timedelta\n",
     "from pathlib import Path\n",
-    "from typing import List, Tuple\n",
+    "from typing import Tuple\n",
     "\n",
-    "import dateutil.parser\n",
     "import matplotlib\n",
     "import numpy as np\n",
     "import pasha as psh\n",
@@ -110,8 +110,9 @@
     "\n",
     "import iCalibrationDB\n",
     "import matplotlib.pyplot as plt\n",
-    "from cal_tools.agipdlib import AgipdCtrl\n",
-    "from cal_tools.enums import AgipdGainMode, BadPixels\n",
+    "from cal_tools import step_timing\n",
+    "from cal_tools.agipdlib import AgipdCtrlRuns\n",
+    "from cal_tools.enums import BadPixels\n",
     "from cal_tools.plotting import (\n",
     "    create_constant_overview,\n",
     "    plot_badpix_3d,\n",
@@ -124,7 +125,6 @@
     "    get_pdu_from_db,\n",
     "    get_random_db_interface,\n",
     "    get_report,\n",
-    "    map_gain_stages,\n",
     "    module_index_to_qm,\n",
     "    run_prop_seq_from_path,\n",
     "    save_const_to_h5,\n",
@@ -143,27 +143,10 @@
     "# insert control device if format string (does nothing otherwise)\n",
     "ctrl_src = ctrl_source_template.format(karabo_id_control)\n",
     "\n",
-    "runs_dict = OrderedDict()\n",
     "run_numbers = [run_high, run_med, run_low]\n",
     "\n",
-    "for gain_idx, (run_name, run_number) in enumerate(zip([\"high\", \"med\", \"low\"], run_numbers)):\n",
-    "    runs_dict[run_name] = {\n",
-    "        \"number\": run_number,\n",
-    "        \"gain\": gain_idx,\n",
-    "        \"dc\": RunDirectory(f'{in_folder}/r{run_number:04d}/')\n",
-    "    }\n",
-    "\n",
-    "creation_time=None\n",
-    "if use_dir_creation_date:\n",
-    "    creation_time = get_dir_creation_date(in_folder, run_high)\n",
-    "\n",
-    "print(f\"Using {creation_time} as creation time of constant.\")\n",
-    "\n",
     "run, prop, seq = run_prop_seq_from_path(in_folder)\n",
     "\n",
-    "# Read report path and create file location tuple to add with the injection\n",
-    "file_loc = f\"proposal:{prop} runs:{run_low} {run_med} {run_high}\"\n",
-    "\n",
     "report = get_report(metadata_folder)\n",
     "cal_db_interface = get_random_db_interface(cal_db_interface)\n",
     "print(f'Calibration database interface: {cal_db_interface}')\n",
@@ -194,7 +177,9 @@
     "\n",
     "print(f\"Detector in use is {karabo_id}\")\n",
     "print(f\"Instrument {instrument}\")\n",
-    "print(f\"Detector instance {dinstance}\")"
+    "print(f\"Detector instance {dinstance}\")\n",
+    "\n",
+    "step_timer = step_timing.StepTimer()"
    ]
   },
   {
@@ -203,45 +188,47 @@
    "metadata": {},
    "outputs": [],
    "source": [
+    "step_timer.start()\n",
+    "\n",
     "# Create out_folder if it doesn't exist.\n",
     "Path(out_folder).mkdir(parents=True, exist_ok=True)\n",
     "\n",
     "mod_image_size = []\n",
-    "for run_dict in runs_dict.values():\n",
+    "for run in run_numbers:\n",
     "    missing_modules = []  # modules with no images within a run.\n",
     "    n_trains_list = []   # list of the number of trains for each module within a run.\n",
     "    # This is important in case of no slurm parallelization over modules is done.\n",
     "    # (e.g. running notebook interactively)\n",
     "    for m in modules:\n",
     "        # validate that there are trains for the selected modules and run.\n",
-    "        dc = run_dict[\"dc\"].select(\n",
+    "        dc = RunDirectory(f'{in_folder}/r{run:04d}/').select(\n",
     "            instrument_src.format(m), \"*\", require_all=True)\n",
     "        n_trains = len(dc.train_ids)\n",
     "\n",
     "        if n_trains == 0:\n",
-    "            print(f\"WARNING: No images for module AGIPD{m:02d}, run {run_dict['number']}.\")\n",
+    "            print(f\"WARNING: No images for module AGIPD{m:02d}, run {run}.\")\n",
     "            missing_modules.append(m)\n",
     "        # Raise a warning if the module has less trains than expected.\n",
     "        elif n_trains < min_trains:\n",
-    "            print(f\"WARNING: AGIPD{m:02d}, run {run_dict['number']} \"\n",
+    "            print(f\"WARNING: AGIPD{m:02d}, run {run} \"\n",
     "                  f\"has trains less than minimum trains: {min_trains}.\")\n",
     "        else:\n",
     "            print(f\"Processing {max_trains if max_trains < n_trains else n_trains} \"\n",
-    "                  f\"for AGIPD{m:02d}, run {run_dict['number']} \")\n",
+    "                  f\"for AGIPD{m:02d}, run {run} \")\n",
     "\n",
     "        n_trains_list.append(n_trains)\n",
     "        mod_image_size.append(np.product(dc[instrument_src.format(m), \"image.data\"].shape) * 2  / 1e9)\n",
     "\n",
     "    if max(n_trains_list) == 0:\n",
-    "        raise ValueError(f\"No images to process for run: {run_dict['number']}\")\n",
+    "        raise ValueError(f\"No images to process for run: {run}\")\n",
     "    elif max(n_trains_list) < min_trains:\n",
-    "        raise ValueError(f\"{run_dict['number']} has less than minimum trains: {min_trains}\")\n",
+    "        raise ValueError(f\"{run} has less than minimum trains: {min_trains}\")\n",
     "\n",
     "# Update modules and karabo_da lists based on available modules to processes.\n",
     "modules = [m for m in modules if m not in missing_modules]\n",
     "karabo_da = create_karabo_da_list(modules)\n",
-    "\n",
-    "print(f\"Will process data of ({sum(mod_image_size):.02f} GB).\")"
+    "print(f\"Will process data of ({sum(mod_image_size):.02f} GB).\")\n",
+    "step_timer.done_step(\"Checking the data size and availability.\")"
    ]
   },
   {
@@ -257,73 +244,8 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "def read_run_conditions(runs_dict: dict):\n",
-    "    agipd_cond = AgipdCtrl(\n",
-    "        run_dc=runs_dict[\"dc\"],\n",
-    "        image_src=instrument_src_mod,\n",
-    "        ctrl_src=ctrl_src,\n",
-    "    )\n",
-    "    cond_dict[\"runs\"].append(runs_dict[\"number\"])\n",
-    "    if acq_rate == 0:\n",
-    "        cond_dict[\"acq_rate\"].append(agipd_cond.get_acq_rate())\n",
-    "    if mem_cells == 0:\n",
-    "        cond_dict[\"mem_cells\"].append(agipd_cond.get_num_cells())\n",
-    "    if gain_setting == -1:    \n",
-    "        cond_dict[\"gain_setting\"].append(\n",
-    "            agipd_cond.get_gain_setting(creation_time))\n",
-    "    if bias_voltage == 0.:\n",
-    "        cond_dict[\"bias_voltage\"].append(\n",
-    "            agipd_cond.get_bias_voltage(karabo_id_control))\n",
-    "    if integration_time == -1:\n",
-    "        cond_dict[\"integration_time\"].append(\n",
-    "            agipd_cond.get_integration_time())\n",
-    "    if gain_mode == -1:\n",
-    "        cond_dict[\"gain_mode\"].append(agipd_cond.get_gain_mode())\n",
-    "    else:\n",
-    "        cond_dict[\"gain_mode\"].append(AgipdGainMode(gain_mode))"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "def validate_gain_modes(gain_modes: List[AgipdGainMode]):\n",
-    "    # Validate that gain modes are not a mix of adaptive and fixed gain.\n",
-    "    if all(\n",
-    "        gm == AgipdGainMode.ADAPTIVE_GAIN for gm in gain_modes\n",
-    "    ):\n",
-    "        fixed_gain_mode = False\n",
-    "    # Some runs are adaptive by mistake.\n",
-    "    elif any(\n",
-    "        gm == AgipdGainMode.ADAPTIVE_GAIN for gm in gain_modes\n",
-    "    ):\n",
-    "        raise ValueError(\n",
-    "            f\"ERROR: Given runs {run_numbers}\"\n",
-    "            \" have a mix of ADAPTIVE and FIXED gain modes: \"\n",
-    "            f\"{gain_modes}.\"\n",
-    "    )\n",
-    "    elif list(gain_modes) == [\n",
-    "        AgipdGainMode.FIXED_HIGH_GAIN,\n",
-    "        AgipdGainMode.FIXED_MEDIUM_GAIN,\n",
-    "        AgipdGainMode.FIXED_LOW_GAIN\n",
-    "    ]:\n",
-    "        fixed_gain_mode = True\n",
-    "    else:\n",
-    "        raise ValueError(\n",
-    "        \"ERROR: Wrong arrangment of given dark runs. \"\n",
-    "        f\"Given runs' gain_modes are {gain_modes} for runs: {run_numbers}.\"\n",
-    "    )\n",
-    "    return fixed_gain_mode"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
+    "step_timer.start()\n",
+    "\n",
     "# Read slow data from 1st channel only.\n",
     "# Read all modules in one notebook and validate the conditions across detectors?\n",
     "# Currently slurm jobs run per one module.\n",
@@ -331,39 +253,42 @@
     "# TODO: what if first module is not available. Maybe only channel 2 available\n",
     "instrument_src_mod = instrument_src.format(modules[0])\n",
     "\n",
-    "cond_dict = dict()\n",
-    "fixed_gain_mode = None\n",
-    "\n",
-    "with multiprocessing.Manager() as manager:\n",
-    "    cond_dict[\"runs\"] = manager.list()\n",
-    "    cond_dict[\"acq_rate\"] = manager.list()\n",
-    "    cond_dict[\"mem_cells\"] = manager.list()\n",
-    "    cond_dict[\"gain_setting\"] = manager.list()\n",
-    "    cond_dict[\"gain_mode\"] = manager.list()\n",
-    "    cond_dict[\"bias_voltage\"] = manager.list()\n",
-    "    cond_dict[\"integration_time\"] = manager.list()\n",
-    "\n",
-    "    with multiprocessing.Pool(processes=len(modules)) as pool:\n",
-    "        pool.starmap(read_run_conditions, zip(runs_dict.values()))\n",
-    "\n",
-    "    for cond, vlist in cond_dict.items():\n",
-    "        if cond == \"runs\":\n",
-    "            continue\n",
-    "        elif cond == \"gain_mode\":\n",
-    "            fixed_gain_mode = validate_gain_modes(cond_dict[\"gain_mode\"])\n",
-    "        elif not all(x == vlist[0] for x in vlist):\n",
-    "            # TODO: raise ERROR??\n",
-    "            print(\n",
-    "                f\"WARNING: {cond} is not the same for the runs \"\n",
-    "                f\"{cond_dict['runs']} with values\"\n",
-    "                f\" of {cond_dict[cond]}, respectively.\"\n",
-    "            )\n",
-    "    if cond_dict[\"acq_rate\"]: acq_rate = cond_dict[\"acq_rate\"][0]\n",
-    "    if cond_dict[\"mem_cells\"]: mem_cells = cond_dict[\"mem_cells\"][0]\n",
-    "    if cond_dict[\"gain_setting\"]: gain_setting = cond_dict[\"gain_setting\"][0]\n",
-    "    if cond_dict[\"gain_mode\"]: gain_mode = list(cond_dict[\"gain_mode\"])\n",
-    "    if cond_dict[\"bias_voltage\"]: bias_voltage = cond_dict[\"bias_voltage\"][0]\n",
-    "    if cond_dict[\"integration_time\"]: integration_time = cond_dict[\"integration_time\"][0]"
+    "agipd_ctrl_dark = AgipdCtrlRuns(\n",
+    "    raw_folder=in_folder,\n",
+    "    runs=run_numbers,\n",
+    "    image_src=instrument_src_mod,\n",
+    "    ctrl_src=ctrl_src,\n",
+    "    sort_dark_runs_enabled=sort_runs\n",
+    ")\n",
+    "# Update run_numbers list in case it was sorted.\n",
+    "run_numbers = agipd_ctrl_dark.runs\n",
+    "\n",
+    "creation_time = None\n",
+    "if use_dir_creation_date:\n",
+    "    creation_time = get_dir_creation_date(in_folder, run_numbers[0])\n",
+    "\n",
+    "print(f\"Using {creation_time} as creation time of constant.\")\n",
+    "\n",
+    "if mem_cells == 0:\n",
+    "    mem_cells = agipd_ctrl_dark.get_memory_cells()\n",
+    "\n",
+    "if acq_rate == 0:\n",
+    "    acq_rate = agipd_ctrl_dark.get_acq_rate()\n",
+    "\n",
+    "if bias_voltage == 0:\n",
+    "    bias_voltage = agipd_ctrl_dark.get_bias_voltage(karabo_id_control)\n",
+    "\n",
+    "fixed_gain_mode = False\n",
+    "if gain_mode == -1:\n",
+    "    gain_mode = agipd_ctrl_dark.gain_modes\n",
+    "    fixed_gain_mode = agipd_ctrl_dark.fixed_gain_mode()\n",
+    "\n",
+    "if gain_setting == -1:\n",
+    "    gain_setting = agipd_ctrl_dark.get_gain_setting()\n",
+    "\n",
+    "if integration_time == -1:\n",
+    "    integration_time = agipd_ctrl_dark.get_integration_time()\n",
+    "step_timer.done_step(f\"Read operating conditions.\")"
    ]
   },
   {
@@ -462,16 +387,16 @@
     "print(f\"Will use {parallel_num_procs} processes with {parallel_num_threads} threads each\")\n",
     "\n",
     "def characterize_module(\n",
-    "    channel: int, runs_dict: dict,\n",
+    "    channel: int, gain_run: Tuple[int, int],\n",
     ") -> Tuple[int, int, np.array, np.array, np.array, np.array, np.array]:\n",
     "\n",
+    "    gain_index, run = gain_run\n",
     "    # Select the corresponding module channel.\n",
     "    instrument_src_mod = instrument_src.format(channel)\n",
     "\n",
-    "    run_dc = runs_dict[\"dc\"].select(instrument_src_mod, require_all=True)\n",
+    "    run_dc = RunDirectory(f'{in_folder}/r{run:04d}/').select(instrument_src_mod, require_all=True)\n",
     "    if max_trains != 0:\n",
     "        run_dc = run_dc.select_trains(np.s_[:max_trains])\n",
-    "    gain_index = runs_dict[\"gain\"]\n",
     "\n",
     "    # Read module's image and cellId data.\n",
     "    im = run_dc[instrument_src_mod, \"image.data\"].ndarray()\n",
@@ -513,7 +438,15 @@
     "            ga_slice = ga[..., cell_slice_index]\n",
     "            gains[..., cell_number] = np.median(ga_slice, axis=2)\n",
     "            gains_std[..., cell_number] = np.std(ga_slice, axis=2)\n",
-    "    context.map(process_cell, np.unique(cell_ids))\n",
+    "    unique_cell_ids = np.unique(cell_ids)\n",
+    "\n",
+    "    # We assume cells are accepted starting 0.\n",
+    "    if np.any(unique_cell_ids > mem_cells):\n",
+    "        raise ValueError(\n",
+    "            f\"Invalid cells found {unique_cell_ids} \"\n",
+    "            f\"for run: {run_dc.run_metadata()['runNumber']}.\")\n",
+    "\n",
+    "    context.map(process_cell, unique_cell_ids)\n",
     "\n",
     "    # bad pixels\n",
     "    bp = np.zeros_like(offset, dtype=np.uint32)\n",
@@ -544,18 +477,22 @@
    "metadata": {},
    "outputs": [],
    "source": [
+    "step_timer.start()\n",
+    "\n",
     "with multiprocessing.Pool(processes=parallel_num_procs) as pool:\n",
     "    results = pool.starmap(\n",
-    "        characterize_module, itertools.product(modules, list(runs_dict.values())))\n",
+    "        characterize_module, itertools.product(modules, list(enumerate(run_numbers))))\n",
+    "\n",
+    "step_timer.done_step(\"Processing dark from the 3 runs.\")\n",
     "\n",
     "# mapped values for processing 2 modules example:\n",
-    "# [\n",
-    "#     0, {\"gain\": 0, \"run_number\": <run-high>, \"dc\": <high-dc>},\n",
-    "#     0, {\"gain\": 1, \"run_number\": <run-med>, \"dc\": <med-dc>},\n",
-    "#     0, {\"gain\": 2, \"run_number\": <run-low>, \"dc\": <low-dc>},\n",
-    "#     1, {\"gain\": 0, \"run_number\": <run-high>, \"dc\": <high-dc>},\n",
-    "#     1, {\"gain\": 1, \"run_number\": <run-med>, \"dc\": <med-dc>},\n",
-    "#     1, {\"gain\": 2, \"run_number\": <run-low>, \"dc\": <low-dc>},\n",
+    "# [(0, (0, 9013))\n",
+    "#     0, (0, run-high),\n",
+    "#     0, (1, run-med),\n",
+    "#     0, (2, run-low),\n",
+    "#     1, (0, run-high),\n",
+    "#     1, (1, run-med),\n",
+    "#     1, (2, run-low),,\n",
     "# ]"
    ]
   },
@@ -706,7 +643,11 @@
    "metadata": {},
    "outputs": [],
    "source": [
+    "step_timer.start()\n",
+    "\n",
     "md = None\n",
+    "# Location of source data, injected with the constants\n",
+    "file_loc = f\"proposal:{prop} runs:{' '.join([str(r) for r in reversed(run_numbers)])}\"\n",
     "\n",
     "for qm in res:\n",
     "    db_module = qm_dict[qm][\"db_module\"]\n",
@@ -728,7 +669,9 @@
     "print(f\"• memory_cells: {mem_cells}\\n• bias_voltage: {bias_voltage}\\n\"\n",
     "      f\"• acquisition_rate: {acq_rate}\\n• gain_setting: {gain_setting}\\n\"\n",
     "      f\"• gain_mode: {fixed_gain_mode}\\n• integration_time: {integration_time}\\n\"\n",
-    "      f\"• creation_time: {md.calibration_constant_version.begin_at if md is not None else creation_time}\\n\")"
+    "      f\"• creation_time: {md.calibration_constant_version.begin_at if md is not None else creation_time}\\n\")\\\n",
+    "\n",
+    "step_timer.done_step(\"Inject calibration constants to the database.\")"
    ]
   },
   {
@@ -749,6 +692,11 @@
     "\n",
     "\n",
     "def retrieve_old_constant(qm, const):\n",
+    "\n",
+    "    import time\n",
+    "\n",
+    "    st = time.time()\n",
+    "\n",
     "    dconst = getattr(iCalibrationDB.Constants.AGIPD, const)()\n",
     "\n",
     "    data, mdata = get_from_db(\n",
@@ -775,15 +723,15 @@
     "            mdata.calibration_constant_version.filename\n",
     "        )\n",
     "        h5path = mdata.calibration_constant_version.h5path\n",
-    "\n",
-    "    return data, timestamp, filepath, h5path\n",
+    "    \n",
+    "    return data, timestamp, filepath, h5path, time.time() - st\n",
     "\n",
     "\n",
     "old_retrieval_pool = multiprocessing.Pool()\n",
     "old_retrieval_res = old_retrieval_pool.starmap_async(\n",
     "    retrieve_old_constant, qm_x_const\n",
     ")\n",
-    "old_retrieval_pool.close()"
+    "old_retrieval_pool.close()\n"
    ]
   },
   {
@@ -822,6 +770,8 @@
    "metadata": {},
    "outputs": [],
    "source": [
+    "step_timer.start()\n",
+    "\n",
     "cell = 3\n",
     "gain = 0\n",
     "show_overview(res, cell, gain, infix=\"{}-{}-{}\".format(*run_numbers))"
@@ -860,7 +810,9 @@
    "source": [
     "cell = 3\n",
     "gain = 2\n",
-    "show_overview(res, cell, gain, infix=\"{}-{}-{}\".format(*run_numbers))"
+    "show_overview(res, cell, gain, infix=\"{}-{}-{}\".format(*run_numbers))\n",
+    "\n",
+    "step_timer.done_step(\"Single-Cell Overviews.\")"
    ]
   },
   {
@@ -916,6 +868,8 @@
    "metadata": {},
    "outputs": [],
    "source": [
+    "step_timer.start()\n",
+    "\n",
     "create_constant_overview(offset_g, \"Offset (ADU)\", mem_cells, 4000, 8000,\n",
     "                         badpixels=[badpix_g, np.nan])"
    ]
@@ -948,7 +902,9 @@
     "                             badpixels=[bp_thresh, np.nan],\n",
     "                             gmap=['HG-MG Threshold', 'MG-LG Threshold', 'High gain', 'Medium gain', 'low gain'],\n",
     "                             marker=['d','d','','','']\n",
-    "                             )"
+    "                             )\n",
+    "\n",
+    "step_timer.done_step(\"Aggregate values, and per Cell behaviour.\")"
    ]
   },
   {
@@ -983,13 +939,16 @@
     "old_mdata = {}\n",
     "old_retrieval_res.wait()\n",
     "\n",
-    "for (qm, const), (data, timestamp, filepath, h5path) in zip(qm_x_const, old_retrieval_res.get()):\n",
+    "timings = []\n",
+    "for (qm, const), (data, timestamp, filepath, h5path, timing) in zip(qm_x_const, old_retrieval_res.get()):\n",
     "    old_const.setdefault(qm, {})[const] = data\n",
     "    old_mdata.setdefault(qm, {})[const] = {\n",
     "        \"timestamp\": timestamp,\n",
     "        \"filepath\": filepath,\n",
     "        \"h5path\": h5path\n",
-    "    }"
+    "    }\n",
+    "    timings.append(timing)\n",
+    "print(f\"Retrieving old constant took around {np.asarray(timings).mean():.01f} s\")"
    ]
   },
   {
@@ -1021,9 +980,17 @@
    "metadata": {},
    "outputs": [],
    "source": [
+    "step_timer.start()\n",
+    "\n",
     "table = []\n",
     "gain_names = ['High', 'Medium', 'Low']\n",
-    "bits = [BadPixels.NOISE_OUT_OF_THRESHOLD, BadPixels.OFFSET_OUT_OF_THRESHOLD, BadPixels.OFFSET_NOISE_EVAL_ERROR, BadPixels.GAIN_THRESHOLDING_ERROR]\n",
+    "bits = [\n",
+    "    BadPixels.NOISE_OUT_OF_THRESHOLD,\n",
+    "    BadPixels.OFFSET_OUT_OF_THRESHOLD,\n",
+    "    BadPixels.OFFSET_NOISE_EVAL_ERROR,\n",
+    "    BadPixels.GAIN_THRESHOLDING_ERROR,\n",
+    "]\n",
+    "\n",
     "for qm in badpix_g.keys():\n",
     "    for gain in range(3):\n",
     "        l_data = []\n",
@@ -1069,7 +1036,9 @@
     "if len(table)>0:\n",
     "    md = display(Latex(tabulate.tabulate(table, tablefmt='latex',\n",
     "                                         headers=[\"Pixel type\", \"Threshold\",\n",
-    "                                                  \"New constant\", \"Old constant\"])))"
+    "                                                  \"New constant\", \"Old constant\"])))\n",
+    "\n",
+    "step_timer.done_step(\"Create badpixels table.\")"
    ]
   },
   {
@@ -1078,6 +1047,8 @@
    "metadata": {},
    "outputs": [],
    "source": [
+    "step_timer.start()\n",
+    "\n",
     "header = ['Parameter',\n",
     "          \"New constant\", \"Old constant \",\n",
     "          \"New constant\", \"Old constant \",\n",
@@ -1149,7 +1120,9 @@
     "\n",
     "for (const, qm), table in zip(constants_x_qms, tables):\n",
     "    display(Markdown(f\"### {qm}: {const} [ADU], good pixels only\"))\n",
-    "    display(Latex(tabulate.tabulate(table, tablefmt='latex', headers=header)))"
+    "    display(Latex(tabulate.tabulate(table, tablefmt='latex', headers=header)))\n",
+    "\n",
+    "step_timer.done_step(\"Computing comparison tables.\")"
    ]
   }
  ],
diff --git a/notebooks/AGIPD/Characterize_AGIPD_Gain_FlatFields_NBC.ipynb b/notebooks/AGIPD/Characterize_AGIPD_Gain_FlatFields_NBC.ipynb
index 99a185e0f2b5e49571bed914178a0a6febfb815a..fa1d218a0f5ee268f2d7ff139d6ad19e012b5ebf 100644
--- a/notebooks/AGIPD/Characterize_AGIPD_Gain_FlatFields_NBC.ipynb
+++ b/notebooks/AGIPD/Characterize_AGIPD_Gain_FlatFields_NBC.ipynb
@@ -31,7 +31,7 @@
     "karabo_da_control = 'AGIPD1MCTRL00' # karabo DA for control infromation\n",
     "\n",
     "use_dir_creation_date = True # use the creation data of the input dir for database queries\n",
-    "cal_db_interface = \"tcp://max-exfl016:8015#8045\" # the database interface to use\n",
+    "cal_db_interface = \"tcp://max-exfl-cal001:8015#8045\" # the database interface to use\n",
     "cal_db_timeout = 30000 # in milli seconds\n",
     "local_output = True # output constants locally\n",
     "db_output = False # output constants to database\n",
diff --git a/notebooks/AGIPD/Characterize_AGIPD_Gain_FlatFields_Summary.ipynb b/notebooks/AGIPD/Characterize_AGIPD_Gain_FlatFields_Summary.ipynb
index 1694e505ec2a5632f8d0e4f644b33cd26aaf4945..fca571903e01ac972a3a4a4c1f03c44ead950819 100644
--- a/notebooks/AGIPD/Characterize_AGIPD_Gain_FlatFields_Summary.ipynb
+++ b/notebooks/AGIPD/Characterize_AGIPD_Gain_FlatFields_Summary.ipynb
@@ -19,15 +19,15 @@
     "metadata_folder = \"\"  # Directory containing calibration_metadata.yml when run by xfel-calibrate\n",
     "hist_file_template = \"hists_m{:02d}_sum.h5\"\n",
     "proc_folder = \"\" # Path to corrected image data used to create histograms and validation plots\n",
-    "raw_folder = \"/gpfs/exfel/exp/MID/202030/p900137/raw\"  # folder of raw data. This is used to save information of source data of generated constants, required\n",
-    "run = 449 # runs of image data used to create histograms\n",
+    "raw_folder = \"\"  # folder of raw data. This is used to save information of source data of generated constants, required\n",
+    "run = 38 # runs of image data used to create histograms\n",
     "\n",
-    "karabo_id = \"MID_DET_AGIPD1M-1\" # karabo karabo_id\n",
+    "karabo_id = \"SPB_DET_AGIPD1M-1\" # karabo karabo_id\n",
     "ctrl_source_template = '{}/MDL/FPGA_COMP' # path to control information\n",
-    "karabo_id_control = \"MID_EXP_AGIPD1M1\" # karabo-id for control device\n",
+    "karabo_id_control = \"SPB_IRU_AGIPD1M1\" # karabo-id for control device\n",
     "\n",
     "use_dir_creation_date = True # use the creation data of the input dir for database queries\n",
-    "cal_db_interface = \"tcp://max-exfl016:8015#8045\" # the database interface to use\n",
+    "cal_db_interface = \"tcp://max-exfl-cal001:8015#8045\" # the database interface to use\n",
     "cal_db_timeout = 30000 # in milli seconds\n",
     "local_output = True # output constants locally\n",
     "db_output = False # output constants to database\n",
@@ -39,7 +39,7 @@
     "d0_lim = [10, 70] # hard limits for d0 value (distance between noise and first peak)\n",
     "gain_lim = [0.80, 1.2] # Threshold on gain in relative number. Contribute to BadPixel bit \"Gain_deviation\"\n",
     "\n",
-    "cell_range = [1,5] # range of cell to be considered, [0,0] for all\n",
+    "cell_range = [0,352] # range of cell to be considered, [0,0] for all\n",
     "pixel_range = [0,0,512,128] # range of pixels x1,y1,x2,y2 to consider [0,0,512,128] for all\n",
     "n_peaks_fit = 4 # Number of gaussian peaks to fit including noise peak\n",
     "\n",
@@ -561,17 +561,16 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "def get_trains_data(run_folder, source, include, tid=None):\n",
+    "def get_trains_data(run_data, source, include, tid=None):\n",
     "    \"\"\"\n",
     "    Load single train for all module\n",
     "    \n",
-    "    :param run_folder: Path to folder with data\n",
+    "    :param run_data: Corrcted data to be loaded from\n",
     "    :param source: Data source to be loaded\n",
     "    :param include: Inset of file name to be considered \n",
     "    :param tid: Train Id to be loaded. First train is considered if None is given\n",
     "    \n",
     "    \"\"\"\n",
-    "    run_data = RunDirectory(run_folder, include)\n",
     "    if tid:\n",
     "        tid, data = run_data.select('*/DET/*', source).train_from_id(tid)\n",
     "        return tid, stack_detector_data(data, source, modules=nmods)\n",
@@ -580,9 +579,9 @@
     "            return tid, stack_detector_data(data, source, modules=nmods)\n",
     "    return None, None\n",
     "\n",
-    "\n",
     "include = '*S00000*'\n",
-    "tid, orig = get_trains_data(f'{proc_folder}/r{run:04d}/', 'image.data', include)\n",
+    "run_data = RunDirectory(f'{proc_folder}/r{run:04d}/', include)\n",
+    "tid, orig = get_trains_data(run_data, 'image.data', include)\n",
     "orig = orig[cell_range[0]:cell_range[1], ...]"
    ]
   },
@@ -594,10 +593,19 @@
    "source": [
     "# FIXME: mask bad pixels from median\n",
     "# mask = const_data['BadPixelsFF']\n",
-    "\n",
     "corrections = const_data['slopesFF'] # (16,shape[0],512,128) shape[0]= cell_range[1]-cell_range[0] /\n",
     "corrections = np.moveaxis(corrections, 1, 0) # (shape[0],16,512,128)\n",
     "rel_corr = corrections/np.nanmedian(corrections)\n",
+    "\n",
+    "# this is needed if LitFrame is enabled in DAQ to avoid shape mismatch \n",
+    "# and correction of the right cells\n",
+    "if np.diff(cell_range)[0] == mem_cells:\n",
+    "    sel = run_data.select(f'{karabo_id}/DET/0CH0:xtdf', 'image.cellId')\n",
+    "    _, cell = sel.train_from_index(0)\n",
+    "    stacked_cells = stack_detector_data(cell, 'image.cellId')[:, 0]\n",
+    "    \n",
+    "    rel_corr = rel_corr[stacked_cells[0]:stacked_cells[-1]+1]\n",
+    "\n",
     "corrected = orig / rel_corr"
    ]
   },
@@ -817,6 +825,23 @@
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
    "version": "3.8.11"
+  },
+  "latex_envs": {
+   "LaTeX_envs_menu_present": true,
+   "autocomplete": true,
+   "bibliofile": "biblio.bib",
+   "cite_by": "apalike",
+   "current_citInitial": 1,
+   "eqLabelWithNumbers": true,
+   "eqNumInitial": 1,
+   "hotkeys": {
+    "equation": "Ctrl-E",
+    "itemize": "Ctrl-I"
+   },
+   "labels_anchors": false,
+   "latex_user_defs": false,
+   "report_style_numbering": false,
+   "user_envs_cfg": false
   }
  },
  "nbformat": 4,
diff --git a/notebooks/AGIPD/Chracterize_AGIPD_Gain_PC_NBC.ipynb b/notebooks/AGIPD/Chracterize_AGIPD_Gain_PC_NBC.ipynb
index e739991f81f64a6e20b455700b2f4f1a5674e899..672e8e9bc089a6abf787658d52d2e70e3624c006 100644
--- a/notebooks/AGIPD/Chracterize_AGIPD_Gain_PC_NBC.ipynb
+++ b/notebooks/AGIPD/Chracterize_AGIPD_Gain_PC_NBC.ipynb
@@ -49,7 +49,7 @@
     "\n",
     "use_dir_creation_date = True\n",
     "delta_time = 0 # offset to the creation time (e.g. useful in case we want to force the system to use diff. dark constants)\n",
-    "cal_db_interface = \"tcp://max-exfl016:8019\"  # the database interface to use\n",
+    "cal_db_interface = \"tcp://max-exfl-cal001:8019\"  # the database interface to use\n",
     "local_output = True # output constants locally\n",
     "db_output = False # output constants to database\n",
     "\n",
diff --git a/notebooks/AGIPD/playground/AGIPD_SingleM_test_Dark.ipynb b/notebooks/AGIPD/playground/AGIPD_SingleM_test_Dark.ipynb
index 1e10696d04d144b8ef84f75c2e2d51763f4a232f..93986695ae12119773cd6437b5be5982209670ed 100644
--- a/notebooks/AGIPD/playground/AGIPD_SingleM_test_Dark.ipynb
+++ b/notebooks/AGIPD/playground/AGIPD_SingleM_test_Dark.ipynb
@@ -31,7 +31,7 @@
     "local_output = True # output constants locally\n",
     "db_output = False # output constants to database\n",
     "\n",
-    "cal_db_interface = \"tcp://max-exfl016:8020\" # the database interface to use\n",
+    "cal_db_interface = \"tcp://max-exfl-cal001:8020\" # the database interface to use\n",
     "cal_db_timeout = 3000000 # timeout on caldb requests\"\n",
     "\n",
     "instrument = \"SPB\"\n",
diff --git a/notebooks/AGIPD/playground/Characterize_AGIPD_Gain_FlatFields_NBC.ipynb b/notebooks/AGIPD/playground/Characterize_AGIPD_Gain_FlatFields_NBC.ipynb
index eb1807e2a01a34635b15f6a2fe70201ece96dd60..bc2e17645575faf9cc2986a0ae7375b9c378e114 100644
--- a/notebooks/AGIPD/playground/Characterize_AGIPD_Gain_FlatFields_NBC.ipynb
+++ b/notebooks/AGIPD/playground/Characterize_AGIPD_Gain_FlatFields_NBC.ipynb
@@ -40,7 +40,7 @@
     "local_output = True # output constants locally\n",
     "db_output = False # output constants to database\n",
     "bias_voltage = 300 # detector bias voltage\n",
-    "cal_db_interface = \"tcp://max-exfl016:8026#8035\"  # the database interface to use\n",
+    "cal_db_interface = \"tcp://max-exfl-cal001:8026#8035\"  # the database interface to use\n",
     "mem_cells = 0  # number of memory cells used\n",
     "interlaced = False # assume interlaced data format, for data prior to Dec. 2017\n",
     "fit_hook = True # fit a hook function to medium gain slope\n",
diff --git a/notebooks/AGIPD/playground/Chracterize_AGIPD_Gain_PC_mlearn.ipynb b/notebooks/AGIPD/playground/Chracterize_AGIPD_Gain_PC_mlearn.ipynb
index 22e9fb728eb75775b49bbaa21917a96ce8d46826..78e4d3189760b8babe22285b8042372a66651e03 100644
--- a/notebooks/AGIPD/playground/Chracterize_AGIPD_Gain_PC_mlearn.ipynb
+++ b/notebooks/AGIPD/playground/Chracterize_AGIPD_Gain_PC_mlearn.ipynb
@@ -42,7 +42,7 @@
     "local_output = True # output constants locally\n",
     "db_output = True # output constants to database\n",
     "bias_voltage = 300 # detector bias voltage\n",
-    "cal_db_interface = \"tcp://max-exfl016:8019\"  # the database interface to use\n",
+    "cal_db_interface = \"tcp://max-exfl-cal001:8019\"  # the database interface to use\n",
     "mem_cells = 176  # number of memory cells used\n",
     "interlaced = False # assume interlaced data format, for data prior to Dec. 2017\n",
     "fit_hook = True # fit a hook function to medium gain slope\n",
diff --git a/notebooks/AGIPD/playground/Investigate_Baseline_Drift.ipynb b/notebooks/AGIPD/playground/Investigate_Baseline_Drift.ipynb
index 5d944cca8be5ef31394edfb504c8806297fd3ebc..74a12622b20677feec2596266e2b03f2c8847bd1 100644
--- a/notebooks/AGIPD/playground/Investigate_Baseline_Drift.ipynb
+++ b/notebooks/AGIPD/playground/Investigate_Baseline_Drift.ipynb
@@ -24,7 +24,7 @@
     "cells = [2] #list(range(1,32,8))\n",
     "max_cells_db = 176\n",
     "bias_voltage = 300\n",
-    "cal_db_interface = \"tcp://max-exfl016:8020\"\n",
+    "cal_db_interface = \"tcp://max-exfl-cal001:8020\"\n",
     "photon_energy = 9.2"
    ]
   },
diff --git a/notebooks/AGIPD/playground/QuickCorrect-SingleModule.ipynb b/notebooks/AGIPD/playground/QuickCorrect-SingleModule.ipynb
index 2290cf45ba25349580a31a92118f4dba49a9e506..6bcd849a4e7e887f4376bffaa1b8c3ba1ec3e123 100644
--- a/notebooks/AGIPD/playground/QuickCorrect-SingleModule.ipynb
+++ b/notebooks/AGIPD/playground/QuickCorrect-SingleModule.ipynb
@@ -19,7 +19,7 @@
     "max_pulses = 120\n",
     "local_input = False\n",
     "bias_voltage = 300\n",
-    "cal_db_interface = \"tcp://max-exfl016:8020\" # the database interface to use\n",
+    "cal_db_interface = \"tcp://max-exfl-cal001:8020\" # the database interface to use\n",
     "use_dir_creation_date = False # use the creation data of the input dir for database queries\n",
     "photon_energy = 9.2 # photon energy in keV\n",
     "index_v = 2 # version of RAW index type\n",
diff --git a/notebooks/DSSC/Characterize_DSSC_Darks_NBC.ipynb b/notebooks/DSSC/Characterize_DSSC_Darks_NBC.ipynb
index b065ef7681ce6de7261c389e3a0691e7dce1875c..b4e01c190dd99c40c08943d02181e7e5c47d36d6 100644
--- a/notebooks/DSSC/Characterize_DSSC_Darks_NBC.ipynb
+++ b/notebooks/DSSC/Characterize_DSSC_Darks_NBC.ipynb
@@ -36,7 +36,7 @@
     "slow_data_pattern = 'RAW-R{}-DA{}-S00000.h5'\n",
     "\n",
     "use_dir_creation_date = True # use the dir creation date for determining the creation time\n",
-    "cal_db_interface = \"tcp://max-exfl016:8020\" # the database interface to use\n",
+    "cal_db_interface = \"tcp://max-exfl-cal001:8020\" # the database interface to use\n",
     "cal_db_timeout = 3000000 # timeout on caldb requests\"\n",
     "local_output = True # output constants locally\n",
     "db_output = False # output constants to database\n",
@@ -54,7 +54,7 @@
     "offset_numpy_algorithm = \"mean\"\n",
     "\n",
     "high_res_badpix_3d = False # set this to True if you need high-resolution 3d bad pixel plots. Runtime: ~ 1h\n",
-    "slow_data_aggregators = [1,2,3,4]  # quadrant/aggregator\n",
+    "slow_data_aggregators = [1,1,1,1]  # quadrant/aggregator\n",
     "slow_data_path = 'SQS_NQS_DSSC/FPGA/PPT_Q'\n",
     "operation_mode = ''  # Detector operation mode, optional"
    ]
@@ -210,18 +210,7 @@
     "    import h5py\n",
     "    import numpy as np\n",
     "    from cal_tools.enums import BadPixels\n",
-    " \n",
-    "    def get_num_cells(fname, h5path):\n",
-    "        with h5py.File(fname, \"r\") as f:\n",
-    "\n",
-    "            cells = f[f\"{h5path}/cellId\"][()]\n",
-    "            if cells == []:\n",
-    "                return\n",
-    "            maxcell = np.max(cells)\n",
-    "            options = [100, 200, 400, 500, 600, 700, 800]\n",
-    "            dists = np.array([(o-maxcell) for o in options])\n",
-    "            dists[dists<0] = 10000 # assure to always go higher\n",
-    "            return options[np.argmin(dists)]\n",
+    "    from cal_tools.dssclib import get_num_cells\n",
     "    \n",
     "    filename, channel = inp\n",
     "    \n",
@@ -491,11 +480,15 @@
     "                                             acquisition_rate=opfreq, \n",
     "                                             target_gain=targetgain,\n",
     "                                             encoded_gain=encodedgain)\n",
-    "            \n",
+    "            for parm in condition.parameters:\n",
+    "                if parm.name == \"Memory cells\":\n",
+    "                    parm.lower_deviation = max_cells\n",
+    "                    parm.upper_deviation = 0\n",
+    "\n",
     "            if db_output:\n",
     "                md = send_to_db(db_module, karabo_id, dconst, condition, file_loc, report,\n",
     "                                cal_db_interface, creation_time=creation_time, timeout=cal_db_timeout)\n",
-    "                \n",
+    "\n",
     "            if local_output and dont_use_pulseIds: # Don't save constant localy two times.\n",
     "                md = save_const_to_h5(db_module, karabo_id, dconst, condition,\n",
     "                                      dconst.data, file_loc, report,\n",
diff --git a/notebooks/DSSC/DSSC_Correct_and_Verify.ipynb b/notebooks/DSSC/DSSC_Correct_and_Verify.ipynb
index b29f2e2d93e5ac08ed6b83afff63b2272f3ea237..88277c1449194bf6fdfd58d24d43e604879f92ea 100644
--- a/notebooks/DSSC/DSSC_Correct_and_Verify.ipynb
+++ b/notebooks/DSSC/DSSC_Correct_and_Verify.ipynb
@@ -38,7 +38,7 @@
     "slow_data_pattern = 'RAW-R{}-DA{}-S00000.h5'\n",
     "\n",
     "use_dir_creation_date = True # use the creation data of the input dir for database queries\n",
-    "cal_db_interface = \"tcp://max-exfl016:8020#8025\" # the database interface to use\n",
+    "cal_db_interface = \"tcp://max-exfl-cal001:8020#8025\" # the database interface to use\n",
     "cal_db_timeout = 300000 # in milli seconds\n",
     "\n",
     "mem_cells = 0 # number of memory cells used, set to 0 to automatically infer\n",
@@ -238,7 +238,11 @@
     "\n",
     "    import h5py\n",
     "    import numpy as np\n",
-    "    from cal_tools.dssclib import get_dssc_ctrl_data, get_pulseid_checksum\n",
+    "    from cal_tools.dssclib import (\n",
+    "        get_dssc_ctrl_data,\n",
+    "        get_num_cells,\n",
+    "        get_pulseid_checksum,\n",
+    "    )\n",
     "    from cal_tools.enums import BadPixels\n",
     "    from cal_tools.tools import get_constant_from_db_and_time\n",
     "    from iCalibrationDB import (\n",
@@ -267,15 +271,6 @@
     "    pulse_edges = None\n",
     "    err = None\n",
     "    offset_not_found = False\n",
-    "    def get_num_cells(fname, h5path):\n",
-    "        with h5py.File(fname, \"r\") as f:\n",
-    "\n",
-    "            cells = f[f\"{h5path}/cellId\"][()]\n",
-    "            maxcell = np.max(cells)\n",
-    "            options = [100, 200, 400, 500, 600, 700, 800]\n",
-    "            dists = np.array([(o-maxcell) for o in options])\n",
-    "            dists[dists<0] = 10000 # assure to always go higher\n",
-    "            return options[np.argmin(dists)]\n",
     "        \n",
     "    if mem_cells == 0:\n",
     "        mem_cells = get_num_cells(filename, h5path)\n",
diff --git a/notebooks/FastCCD/Characterize_Darks_NewDAQ_FastCCD_NBC_New_Common_Mode.ipynb b/notebooks/FastCCD/Characterize_Darks_NewDAQ_FastCCD_NBC_New_Common_Mode.ipynb
index 34e718eb421e68ddcfac313a8a8bd10d1f34a4ab..a1c9bb5d7b26ffb9f298e9c7d41bebbb43f5e225 100644
--- a/notebooks/FastCCD/Characterize_Darks_NewDAQ_FastCCD_NBC_New_Common_Mode.ipynb
+++ b/notebooks/FastCCD/Characterize_Darks_NewDAQ_FastCCD_NBC_New_Common_Mode.ipynb
@@ -35,7 +35,7 @@
     "h5path_cntrl = '/RUN/{}/DET/FCCD'  # path to find control data\n",
     "\n",
     "use_dir_creation_date = True # To be used to retrieve calibration constants later on (for database time derivation)\n",
-    "cal_db_interface = \"tcp://max-exfl016:8020\" # the calibration database interface to use\n",
+    "cal_db_interface = \"tcp://max-exfl-cal001:8020\" # the calibration database interface to use\n",
     "cal_db_timeout = 300000 # timeout on calibration database requests\n",
     "db_output = False # Output constants to the calibration database\n",
     "local_output = True # output constants locally\n",
diff --git a/notebooks/FastCCD/CorrectionNotebook_NewDAQ_FastCCD_NBC.ipynb b/notebooks/FastCCD/CorrectionNotebook_NewDAQ_FastCCD_NBC.ipynb
index 6be6f7d1776bba412998ff0fa9824f7a113f14aa..27081b0418bf03827aed3268dc22f4238f72c8b9 100644
--- a/notebooks/FastCCD/CorrectionNotebook_NewDAQ_FastCCD_NBC.ipynb
+++ b/notebooks/FastCCD/CorrectionNotebook_NewDAQ_FastCCD_NBC.ipynb
@@ -37,7 +37,7 @@
     "h5path_cntrl = '/RUN/{}/DET/FCCD'  # path to control data\n",
     "\n",
     "use_dir_creation_date = True # use dir creation data for calDB queries\n",
-    "cal_db_interface = \"tcp://max-exfl016:8015#8025\" # calibration DB interface to use\n",
+    "cal_db_interface = \"tcp://max-exfl-cal001:8015#8025\" # calibration DB interface to use\n",
     "cal_db_timeout = 300000000 # timeout on caldb requests\n",
     "\n",
     "\n",
diff --git a/notebooks/Gotthard2/Characterize_Darks_Gotthard2_NBC.ipynb b/notebooks/Gotthard2/Characterize_Darks_Gotthard2_NBC.ipynb
index 0007b09ba8af3b9577fc359daae5cb908cab85f3..ee6e5c160964ce530ecc99fae80ff6b7ea362039 100644
--- a/notebooks/Gotthard2/Characterize_Darks_Gotthard2_NBC.ipynb
+++ b/notebooks/Gotthard2/Characterize_Darks_Gotthard2_NBC.ipynb
@@ -39,7 +39,7 @@
     "\n",
     "# Parameters for the calibration database.\n",
     "use_dir_creation_date = True\n",
-    "cal_db_interface = \"tcp://max-exfl016:8020\"  # calibration DB interface to use\n",
+    "cal_db_interface = \"tcp://max-exfl-cal001:8020\"  # calibration DB interface to use\n",
     "cal_db_timeout = 300000  # timeout on caldb requests\n",
     "overwrite_creation_time = \"\"  # To overwrite the measured creation_time. Required Format: YYYY-MM-DD HR:MN:SC.00 e.g. \"2022-06-28 13:00:00.00\"\n",
     "db_output = False  # Output constants to the calibration database\n",
diff --git a/notebooks/Gotthard2/Correction_Gotthard2_NBC.ipynb b/notebooks/Gotthard2/Correction_Gotthard2_NBC.ipynb
index 157dac917a944416dc41368d73dc0a14ca892a76..134e7e0bb729e7faf6c1509ea6d65c46156709d2 100644
--- a/notebooks/Gotthard2/Correction_Gotthard2_NBC.ipynb
+++ b/notebooks/Gotthard2/Correction_Gotthard2_NBC.ipynb
@@ -36,7 +36,7 @@
     "karabo_id_control = \"\"  # Control karabo ID. Set to empty string to use the karabo-id\n",
     "\n",
     "# Parameters for calibration database.\n",
-    "cal_db_interface = \"tcp://max-exfl016:8016#8025\"  # the database interface to use.\n",
+    "cal_db_interface = \"tcp://max-exfl-cal001:8016#8025\"  # the database interface to use.\n",
     "cal_db_timeout = 180000  # timeout on caldb requests.\n",
     "creation_time = \"\"  # To overwrite the measured creation_time. Required Format: YYYY-MM-DD HR:MN:SC e.g. \"2022-06-28 13:00:00\"\n",
     "\n",
@@ -88,7 +88,7 @@
     "from cal_tools.step_timing import StepTimer\n",
     "from cal_tools.tools import (\n",
     "    calcat_creation_time,\n",
-    "    CalibrationMetadata,\n",
+    "    write_constants_fragment,\n",
     ")\n",
     "from XFELDetAna.plotting.heatmap import heatmapPlot\n",
     "\n",
@@ -109,10 +109,6 @@
     "out_folder = Path(out_folder)\n",
     "out_folder.mkdir(parents=True, exist_ok=True)\n",
     "\n",
-    "metadata = CalibrationMetadata(metadata_folder or out_folder)\n",
-    "# NOTE: this notebook will not overwrite calibration metadata file\n",
-    "const_yaml = metadata.get(\"retrieved-constants\", {})\n",
-    "\n",
     "if not karabo_id_control:\n",
     "    karabo_id_control = karabo_id\n",
     "\n",
@@ -242,28 +238,14 @@
     "            const_data[mod][\"RelativeGainGotthard2\"] = cfile[\"gain_map\"][()].astype(np.float32)\n",
     "            const_data[mod][\"Mask\"] = cfile[\"bpix_ff\"][()].astype(np.uint32)\n",
     "else:\n",
-    "    if const_yaml:\n",
-    "        const_data = dict()\n",
-    "        for mod in karabo_da:\n",
-    "            const_data[mod] = dict()\n",
-    "            for cname, mdata in const_yaml[mod][\"constants\"].items():\n",
-    "                const_data[mod][cname] = dict()\n",
-    "                if mdata[\"creation-time\"]:\n",
-    "                    with h5py.File(mdata[\"path\"], \"r\") as cf:\n",
-    "                        const_data[mod][cname] = np.copy(\n",
-    "                            cf[f\"{mdata['dataset']}/data\"])\n",
-    "    else:\n",
-    "        mdata_dict = {\"constants\": dict()}\n",
-    "\n",
-    "        constant_names = [\"LUTGotthard2\", \"OffsetGotthard2\", \"BadPixelsDarkGotthard2\"]\n",
-    "        if gain_correction:\n",
-    "            constant_names += [\"RelativeGainGotthard2\", \"BadPixelsFFGotthard2\"]\n",
-    "\n",
-    "        # Retrieve metadata for all pnccd constants.\n",
-    "        const_data = g2_cal.ndarray_map(constant_names)\n",
+    "    constant_names = [\"LUTGotthard2\", \"OffsetGotthard2\", \"BadPixelsDarkGotthard2\"]\n",
+    "    if gain_correction:\n",
+    "        constant_names += [\"RelativeGainGotthard2\", \"BadPixelsFFGotthard2\"]\n",
+    "\n",
+    "    g2_metadata = g2_cal.metadata(calibrations=constant_names)\n",
     "\n",
     "    # Validate the constants availability and raise/warn correspondingly.\n",
-    "    for mod, calibrations in const_data.items():\n",
+    "    for mod, calibrations in g2_metadata.items():\n",
     "\n",
     "        dark_constants = {\"LUTGotthard2\"}\n",
     "        if offset_correction:\n",
@@ -277,32 +259,50 @@
     "        missing_gain_constants = {\n",
     "            \"BadPixelsFFGotthard2\", \"RelativeGainGotthard2\"} - set(calibrations)\n",
     "        if gain_correction and missing_gain_constants:\n",
-    "            warning(f\"Gain constants {missing_gain_constants} are not retrieved for mod {mod}.\"\n",
-    "                    \"Gain correction is disabled for this module\")\n",
-    "\n",
-    "        # Create the mask array.\n",
-    "        bpix = const_data[mod].get(\"BadPixelsDarkGotthard2\")\n",
-    "        if bpix is None:\n",
-    "            bpix = np.zeros((1280, 2, 3), dtype=np.uint32)\n",
-    "        if const_data[mod].get(\"BadPixelsFFGotthard2\") is not None:\n",
-    "            bpix |= const_data[mod][\"BadPixelsFFGotthard2\"]\n",
-    "        const_data[mod][\"Mask\"] = bpix\n",
-    "\n",
-    "        # Prepare empty arrays for missing constants.\n",
-    "        if const_data[mod].get(\"OffsetGotthard2\") is None:\n",
-    "            const_data[mod][\"OffsetGotthard2\"] = np.zeros(\n",
-    "                (1280, 2, 3), dtype=np.float32)\n",
-    "\n",
-    "        if const_data[mod].get(\"RelativeGainGotthard2\") is None:\n",
-    "            const_data[mod][\"RelativeGainGotthard2\"] = np.ones(\n",
-    "                (1280, 2, 3), dtype=np.float32)\n",
-    "        const_data[mod][\"RelativeGainGotthard2\"] = const_data[mod][\"RelativeGainGotthard2\"].astype(  # noqa\n",
-    "            np.float32, copy=False)  # Old gain constants are not float32.\n",
+    "            warning(f\"Gain constants {missing_gain_constants} are not retrieved for mod {mod}.\")\n",
     "\n",
     "if not karabo_da:\n",
     "    raise ValueError(\"Dark constants are not available for all modules.\")"
    ]
   },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "ac1cdec5",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Record constant details in YAML metadata.\n",
+    "write_constants_fragment(\n",
+    "    out_folder=(metadata_folder or out_folder),\n",
+    "    det_metadata=g2_metadata,\n",
+    "    caldb_root=g2_cal.caldb_root)\n",
+    "\n",
+    "# Load constants data for all constants.\n",
+    "const_data = g2_cal.ndarray_map(metadata=g2_metadata)\n",
+    "\n",
+    "# Prepare constant arrays.\n",
+    "if not constants_file:\n",
+    "    # Create the mask array.\n",
+    "    bpix = const_data[mod].get(\"BadPixelsDarkGotthard2\")\n",
+    "    if bpix is None:\n",
+    "        bpix = np.zeros((1280, 2, 3), dtype=np.uint32)\n",
+    "    if const_data[mod].get(\"BadPixelsFFGotthard2\") is not None:\n",
+    "        bpix |= const_data[mod][\"BadPixelsFFGotthard2\"]\n",
+    "    const_data[mod][\"Mask\"] = bpix\n",
+    "\n",
+    "    # Prepare empty arrays for missing constants.\n",
+    "    if const_data[mod].get(\"OffsetGotthard2\") is None:\n",
+    "        const_data[mod][\"OffsetGotthard2\"] = np.zeros(\n",
+    "            (1280, 2, 3), dtype=np.float32)\n",
+    "\n",
+    "    if const_data[mod].get(\"RelativeGainGotthard2\") is None:\n",
+    "        const_data[mod][\"RelativeGainGotthard2\"] = np.ones(\n",
+    "            (1280, 2, 3), dtype=np.float32)\n",
+    "    const_data[mod][\"RelativeGainGotthard2\"] = const_data[mod][\"RelativeGainGotthard2\"].astype(  # noqa\n",
+    "        np.float32, copy=False)  # Old gain constants are not float32."
+   ]
+  },
   {
    "cell_type": "code",
    "execution_count": null,
diff --git a/notebooks/Gotthard2/Gotthard2_retrieve_constants_precorrection_NBC.ipynb b/notebooks/Gotthard2/Gotthard2_retrieve_constants_precorrection_NBC.ipynb
deleted file mode 100644
index 679bc8506b03f887e077be4993ae63aeee29b0c3..0000000000000000000000000000000000000000
--- a/notebooks/Gotthard2/Gotthard2_retrieve_constants_precorrection_NBC.ipynb
+++ /dev/null
@@ -1,249 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "# GOTTHARD2 Retrieving Constants Pre-correction #\n",
-    "\n",
-    "Author: European XFEL Detector Group, Version: 1.0\n",
-    "\n",
-    "Retrieving Required Constants for Offline Calibration of the Gotthard2 Detector"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "in_folder = \"/gpfs/exfel/exp/FXE/202221/p003225/raw\"  # the folder to read data from, required\n",
-    "out_folder = \"/gpfs/exfel/data/scratch/ahmedk/test/gotthard2\"  # the folder to output to, required\n",
-    "metadata_folder = \"\"  # Directory containing calibration_metadata.yml when run by xfel-calibrate\n",
-    "run = 50  # run to process, required\n",
-    "\n",
-    "# Parameters used to access raw data.\n",
-    "karabo_id = \"FXE_XAD_G2XES\"  # karabo prefix of Gotthard-II devices\n",
-    "karabo_da = [\"GH201\"]  # data aggregators\n",
-    "receiver_template = \"RECEIVER\"  # receiver template used to read INSTRUMENT keys.\n",
-    "control_template = \"CONTROL\"  # control template used to read CONTROL keys.\n",
-    "instrument_source_template = \"{}/DET/{}:daqOutput\"  # template for source name (filled with karabo_id & receiver_id). e.g. 'SPB_IRDA_JF4M/DET/JNGFR01:daqOutput'\n",
-    "ctrl_source_template = \"{}/DET/{}\"  # template for control source name (filled with karabo_id_control)\n",
-    "karabo_id_control = \"\"  # Control karabo ID. Set to empty string to use the karabo-id\n",
-    "\n",
-    "# Parameters for calibration database.\n",
-    "cal_db_interface = \"tcp://max-exfl016:8017#8025\"  # the database interface to use.\n",
-    "cal_db_timeout = 180000  # timeout on caldb requests.\n",
-    "creation_time = \"\"  # To overwrite the measured creation_time. Required Format: YYYY-MM-DD HR:MN:SC e.g. \"2022-06-28 13:00:00\"\n",
-    "\n",
-    "# Parameters affecting corrected data.\n",
-    "constants_file = \"\"  # /gpfs/exfel/data/scratch/ahmedk/dont_remove/gotthard2/constants/calibration_constants_GH2.h5\"  # Retrieve constants from local.\n",
-    "offset_correction = True  # apply offset correction. This can be disabled to only apply LUT or apply LUT and gain correction for non-linear differential results.\n",
-    "gain_correction = True  # apply gain correction.\n",
-    "\n",
-    "# Parameter conditions.\n",
-    "bias_voltage = -1  # Detector bias voltage, set to -1 to use value in raw file.\n",
-    "exposure_time = -1.  # Detector exposure time, set to -1 to use value in raw file.\n",
-    "exposure_period = -1.  # Detector exposure period, set to -1 to use value in raw file.\n",
-    "acquisition_rate = -1.  # Detector acquisition rate (1.1/4.5), set to -1 to use value in raw file.\n",
-    "single_photon = -1  # Detector single photon mode (High/Low CDS), set to -1 to use value in raw file.\n",
-    "\n",
-    "if constants_file:\n",
-    "    print(\"Skipping constant retrieval. Specified constants_file is used.\")\n",
-    "    import sys\n",
-    "\n",
-    "    sys.exit(0)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "from logging import warning\n",
-    "\n",
-    "from extra_data import RunDirectory\n",
-    "from pathlib import Path\n",
-    "\n",
-    "import cal_tools.restful_config as rest_cfg\n",
-    "from cal_tools.calcat_interface import GOTTHARD2_CalibrationData\n",
-    "from cal_tools.gotthard2 import gotthard2lib\n",
-    "from cal_tools.tools import (\n",
-    "    calcat_creation_time,\n",
-    "    CalibrationMetadata,\n",
-    ")"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "in_folder = Path(in_folder)\n",
-    "run_folder = in_folder / f\"r{run:04d}\"\n",
-    "out_folder = Path(out_folder)\n",
-    "out_folder.mkdir(parents=True, exist_ok=True)\n",
-    "metadata = CalibrationMetadata(metadata_folder or out_folder)\n",
-    "# Constant paths are saved under retrieved-constants in calibration_metadata.yml\n",
-    "retrieved_constants = metadata.setdefault(\"retrieved-constants\", {})\n",
-    "\n",
-    "if not karabo_id_control:\n",
-    "    karabo_id_control = karabo_id\n",
-    "\n",
-    "instrument_src = instrument_source_template.format(karabo_id, receiver_template)\n",
-    "ctrl_src = ctrl_source_template.format(karabo_id_control, control_template)\n",
-    "\n",
-    "print(f\"Retrieve constants for modules: {karabo_da} for run {run}\")\n",
-    "\n",
-    "# Run's creation time:\n",
-    "creation_time = calcat_creation_time(in_folder, run, creation_time)\n",
-    "print(f\"Creation time: {creation_time}\")"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# Read slow data\n",
-    "run_dc = RunDirectory(run_folder)\n",
-    "g2ctrl = gotthard2lib.Gotthard2Ctrl(run_dc=run_dc, ctrl_src=ctrl_src)\n",
-    "\n",
-    "if bias_voltage == -1:\n",
-    "    bias_voltage = g2ctrl.get_bias_voltage()\n",
-    "if exposure_time == -1:\n",
-    "    exposure_time = g2ctrl.get_exposure_time()\n",
-    "if exposure_period == -1:\n",
-    "    exposure_period = g2ctrl.get_exposure_period()\n",
-    "if acquisition_rate == -1:\n",
-    "    acquisition_rate = g2ctrl.get_acquisition_rate()\n",
-    "if single_photon == -1:\n",
-    "    single_photon = g2ctrl.get_single_photon()\n",
-    "\n",
-    "print(\"Bias Voltage:\", bias_voltage)\n",
-    "print(\"Exposure Time:\", exposure_time)\n",
-    "print(\"Exposure Period:\", exposure_period)\n",
-    "print(\"Acquisition Rate:\", acquisition_rate)\n",
-    "print(\"Single Photon:\", single_photon)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "g2_cal = GOTTHARD2_CalibrationData(\n",
-    "    detector_name=karabo_id,\n",
-    "    sensor_bias_voltage=bias_voltage,\n",
-    "    exposure_time=exposure_time,\n",
-    "    exposure_period=exposure_period,\n",
-    "    acquisition_rate=acquisition_rate,\n",
-    "    single_photon=single_photon,\n",
-    "    event_at=creation_time,\n",
-    "    client=rest_cfg.calibration_client(),\n",
-    ")\n",
-    "\n",
-    "mdata_dict = {\"constants\": dict()}\n",
-    "\n",
-    "constant_names = [\"LUTGotthard2\", \"OffsetGotthard2\", \"BadPixelsDarkGotthard2\"]\n",
-    "if gain_correction:\n",
-    "    constant_names += [\"RelativeGainGotthard2\", \"BadPixelsFFGotthard2\"]\n",
-    "\n",
-    "# Retrieve metadata for all pnccd constants.\n",
-    "g2_metadata = g2_cal.metadata(constant_names)\n",
-    "\n",
-    "missing_dark_modules = set()\n",
-    "# Validate the constants availability and raise/warn correspondingly.\n",
-    "for mod, ccv_dict in g2_metadata.items():\n",
-    "\n",
-    "    dark_constants = {\"LUTGotthard2\"}\n",
-    "    if offset_correction:\n",
-    "        dark_constants |= {\"OffsetGotthard2\", \"BadPixelsDarkGotthard2\"}\n",
-    "    missing_dark_constants = dark_constants - set(ccv_dict)\n",
-    "\n",
-    "    if missing_dark_constants:\n",
-    "        warning(f\"Dark constants {missing_dark_constants} are not available to correct {mod}\")\n",
-    "        missing_dark_modules.add(mod)\n",
-    "\n",
-    "    missing_gain_constants = {\"BadPixelsFFGotthard2\", \"RelativeGainGotthard2\"} - set(ccv_dict)\n",
-    "    if gain_correction and missing_gain_constants:\n",
-    "        warning(f\"Gain constants {missing_gain_constants} are not retrieved for {mod}\")\n",
-    "\n",
-    "if missing_dark_modules == set(karabo_da):\n",
-    "    raise ValueError(f\"{missing_dark_constants} constants are not available for all modules.\")\n",
-    "\n",
-    "# Add constants metadata in retrieved_constants dict.\n",
-    "for mod, ccv_dict in g2_metadata.items():\n",
-    "    mod_dict = retrieved_constants.setdefault(mod, dict())\n",
-    "    const_dict = mod_dict.setdefault(\"constants\", dict())\n",
-    "    for cname, ccv_metadata in ccv_dict.items():\n",
-    "        const_dict[cname] = {\n",
-    "                \"path\": str(g2_cal.caldb_root / ccv_metadata[\"path\"]),\n",
-    "                \"dataset\": ccv_metadata[\"dataset\"],\n",
-    "                \"creation-time\": ccv_metadata[\"begin_validity_at\"],\n",
-    "                \"ccv_id\": ccv_metadata[\"ccv_id\"],\n",
-    "            }\n",
-    "    mod_dict[\"physical-name\"] = ccv_metadata[\"physical_name\"]\n",
-    "    \n",
-    "print(f\"Stored retrieved constants in {metadata.filename}\")"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "timestamps = dict()\n",
-    "\n",
-    "for mod in karabo_da:\n",
-    "    module_timestamps = timestamps[mod] = dict()\n",
-    "    module_constants = retrieved_constants[mod]\n",
-    "    print(f\"Module: {mod}:\")\n",
-    "    for cname, mdata in module_constants[\"constants\"].items():\n",
-    "        print(f'{cname:.<12s}', mdata[\"creation-time\"])\n",
-    "\n",
-    "    for cname in [\"OffsetGotthard2\", \"BadPixelsDarkGotthard2\", \"RelativeGainGotthard2\", \"BadPixelsFFGotthard2\"]:\n",
-    "        if cname in module_constants[\"constants\"]:\n",
-    "            module_timestamps[cname] = module_constants[\"constants\"][cname][\"creation-time\"]\n",
-    "        else:\n",
-    "            module_timestamps[cname] = \"NA\"\n",
-    "\n",
-    "retrieved_constants[\"time-summary\"] = timestamps\n",
-    "\n",
-    "metadata.save()"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3.8.11 ('.cal4_venv')",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.8.11"
-  },
-  "orig_nbformat": 4,
-  "vscode": {
-   "interpreter": {
-    "hash": "ccde353e8822f411c1c49844e1cbe3edf63293a69efd975d1b44f5e852832668"
-   }
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 2
-}
diff --git a/notebooks/Jungfrau/Jungfrau_Gain_Correct_and_Verify_NBC.ipynb b/notebooks/Jungfrau/Jungfrau_Gain_Correct_and_Verify_NBC.ipynb
index 928f120d8e8c24f527558a6da8dc96d88c6c3224..2cbf2883903625f971734edb684e66e5c8011853 100644
--- a/notebooks/Jungfrau/Jungfrau_Gain_Correct_and_Verify_NBC.ipynb
+++ b/notebooks/Jungfrau/Jungfrau_Gain_Correct_and_Verify_NBC.ipynb
@@ -17,41 +17,42 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "in_folder = \"/gpfs/exfel/exp/SPB/202130/p900204/raw\"  # the folder to read data from, required\n",
-    "out_folder =  \"/gpfs/exfel/data/scratch/ahmedk/test/remove\"  # the folder to output to, required\n",
-    "run = 91  # run to process, required\n",
+    "in_folder = \"/gpfs/exfel/exp/HED/202331/p900360/raw\"  # the folder to read data from, required\n",
+    "out_folder =  \"/gpfs/exfel/data/scratch/ahmedk/test/remove/jungfrau\"  # the folder to output to, required\n",
+    "run = 20  # run to process, required\n",
     "metadata_folder = \"\"  # Directory containing calibration_metadata.yml when run by xfel-calibrate\n",
     "sequences = [-1]  # sequences to correct, set to [-1] for all, range allowed\n",
     "sequences_per_node = 1  # number of sequence files per cluster node if run as slurm job, set to 0 to not run SLURM parallel\n",
     "\n",
     "# Parameters used to access raw data.\n",
-    "karabo_id = \"SPB_IRDA_JF4M\"  # karabo prefix of Jungfrau devices\n",
-    "karabo_da = ['JNGFR01', 'JNGFR02', 'JNGFR03', 'JNGFR04', 'JNGFR05', 'JNGFR06', 'JNGFR07', 'JNGFR08']  # data aggregators\n",
+    "karabo_id = \"HED_IA1_JF500K4\"  # karabo prefix of Jungfrau devices\n",
+    "karabo_da = ['JNGFR04']  # data aggregators\n",
     "receiver_template = \"JNGFR{:02d}\"  # Detector receiver template for accessing raw data files. e.g. \"JNGFR{:02d}\"\n",
     "instrument_source_template = '{}/DET/{}:daqOutput'  # template for source name (filled with karabo_id & receiver_id). e.g. 'SPB_IRDA_JF4M/DET/JNGFR01:daqOutput'\n",
     "ctrl_source_template = '{}/DET/CONTROL'  # template for control source name (filled with karabo_id_control)\n",
     "karabo_id_control = \"\"  # if control is on a different ID, set to empty string if it is the same a karabo-id\n",
     "\n",
     "# Parameters for calibration database.\n",
-    "use_dir_creation_date = True  # use the creation data of the input dir for database queries\n",
-    "cal_db_interface = \"tcp://max-exfl016:8017#8025\" # the database interface to use\n",
+    "cal_db_interface = \"tcp://max-exfl-cal001:8017#8025\" # the database interface to use\n",
     "cal_db_timeout = 180000  # timeout on caldb requests\n",
+    "creation_time = \"\"  # To overwrite the measured creation_time. Required Format: YYYY-MM-DD HR:MN:SC e.g. \"2022-06-28 13:00:00\"\n",
     "\n",
     "# Parameters affecting corrected data.\n",
     "relative_gain = True  # do relative gain correction.\n",
-    "strixel_sensor = False  # reordering for strixel detector layout.\n",
+    "strixel_sensor = \"\"  # reordering for strixel detector layout. Possible strixels to choose from are A0123 and A1256.\n",
     "strixel_double_norm = 2.0  # normalization to use for double-size pixels, only applied for strixel sensors.\n",
     "limit_trains = 0  # ONLY FOR TESTING. process only first N trains, Use 0 to process all.\n",
     "chunks_ids = 32  # HDF chunk size for memoryCell and frameNumber.\n",
     "chunks_data = 1  # HDF chunk size for pixel data in number of frames.\n",
+    "wrong_gain_pixels = [-1]  # List of 5 values (e.g. [4, 0, 255, 896, 1024]) defining the module number (4 for JNGFR04). And using the indexes of the FEM row [pixel_x_0:pixel_x_1] and column [pixel_y_0:pixel_y_1]. Set to -1 to not pick pixels for gain replacement.\n",
+    "replace_wrong_gain_value = 0  # Force gain value into the chosen gain [0, 1, or 2] for pixels specified in `wrong_gain_pixels`. This has no effect if wrong_gain_pixels = [-1]\n",
     "\n",
     "# Parameters for retrieving calibration constants\n",
-    "manual_slow_data = False  # if true, use manually entered bias_voltage, integration_time, gain_setting, and gain_mode values\n",
-    "integration_time = 4.96  # integration time in us, will be overwritten by value in file\n",
-    "gain_setting = 0  # 0 for dynamic gain, 1 for dynamic HG0, will be overwritten by value in file\n",
-    "gain_mode = 0  # 0 for runs with dynamic gain setting, 1 for fixgain. It will be overwritten by value in file, if manual_slow_data is set to True.\n",
+    "integration_time = -1  # integration time in us. set to -1 to overwrite by value in file.\n",
+    "gain_setting = -1  # 0 for dynamic gain, 1 for dynamic HG0. set to -1 to overwrite by value in file.\n",
+    "gain_mode = -1  # 0 for runs with dynamic gain setting, 1 for fixed gain. Set to -1 to overwrite by value in file.\n",
     "mem_cells = -1  # Set mem_cells to -1 to automatically use the value stored in RAW data.\n",
-    "bias_voltage = 180  # will be overwritten by value in file\n",
+    "bias_voltage = -1  # Bias Voltage. Set to -1 to overwrite by value in file.\n",
     "\n",
     "# Parameters for plotting\n",
     "skip_plots = False  # exit after writing corrected files\n",
@@ -60,6 +61,8 @@
     "\n",
     "# Parameters for ROI selection and reduction\n",
     "roi_definitions = [-1]  # List with groups of 6 values defining ROIs, e.g. [3, 120, 180, 200, 550, -2] for module 3 (JNGFR03), slice 120:180, 200:550, average along axis -2 (slow scan, or -1 for fast scan)\n",
+    "roi_threshold = 2.  # Corrected pixels below the threshold will be excluded from ROI projections. Set to -1 to include all pixels.\n",
+    "\n",
     "\n",
     "def balance_sequences(in_folder, run, sequences, sequences_per_node, karabo_da):\n",
     "    from xfel_calibrate.calibrate import balance_sequences as bs\n",
@@ -76,34 +79,30 @@
     "import multiprocessing\n",
     "import sys\n",
     "import warnings\n",
-    "from functools import partial\n",
     "from logging import warning\n",
     "from pathlib import Path\n",
     "\n",
-    "import h5py\n",
     "import matplotlib\n",
     "import matplotlib.pyplot as plt\n",
     "import numpy as np\n",
     "import pasha as psh\n",
     "import tabulate\n",
-    "from IPython.display import Latex, Markdown, display\n",
     "from extra_data import DataCollection, H5File, RunDirectory, by_id, components\n",
-    "from extra_geom import JUNGFRAUGeometry\n",
+    "from IPython.display import Latex, Markdown, display\n",
     "from matplotlib.colors import LogNorm\n",
     "\n",
-    "from cal_tools import h5_copy_except\n",
-    "from cal_tools.jungfraulib import JungfrauCtrl\n",
+    "import cal_tools.restful_config as rest_cfg\n",
+    "from cal_tools.calcat_interface import JUNGFRAU_CalibrationData\n",
     "from cal_tools.enums import BadPixels\n",
     "from cal_tools.files import DataFile\n",
+    "from cal_tools.jungfrau.jungfraulib import JungfrauCtrl\n",
+    "from cal_tools.plotting import init_jungfrau_geom\n",
     "from cal_tools.step_timing import StepTimer\n",
     "from cal_tools.tools import (\n",
-    "    get_constant_from_db_and_time,\n",
-    "    get_dir_creation_date,\n",
-    "    get_pdu_from_db,\n",
+    "    calcat_creation_time,\n",
     "    map_seq_files,\n",
-    "    CalibrationMetadata,\n",
+    "    write_constants_fragment,\n",
     ")\n",
-    "from iCalibrationDB import Conditions, Constants\n",
     "\n",
     "warnings.filterwarnings('ignore')\n",
     "\n",
@@ -127,12 +126,12 @@
     "\n",
     "print(f\"Run is: {run}\")\n",
     "print(f\"Instrument H5File source: {instrument_src}\")\n",
+    "karabo_da = sorted(karabo_da)\n",
     "print(f\"Process modules: {karabo_da}\")\n",
     "\n",
-    "creation_time = None\n",
-    "if use_dir_creation_date:\n",
-    "    creation_time = get_dir_creation_date(in_folder, run)\n",
-    "    print(f\"Using {creation_time} as creation time\")\n",
+    "# Run's creation time:\n",
+    "creation_time = calcat_creation_time(in_folder, run, creation_time)\n",
+    "print(f\"Creation time: {creation_time}\")\n",
     "\n",
     "if karabo_id_control == \"\":\n",
     "    karabo_id_control = karabo_id\n",
@@ -143,42 +142,6 @@
     "    sys.exit(1)"
    ]
   },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# Read available sequence files to correct.\n",
-    "mapped_files, num_seq_files = map_seq_files(\n",
-    "    run_folder, karabo_da, sequences)\n",
-    "\n",
-    "if not len(mapped_files):\n",
-    "    raise IndexError(\n",
-    "        \"No sequence files available to correct for the selected sequences and karabo_da.\")"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "print(f\"Processing a total of {num_seq_files} sequence files\")\n",
-    "table = []\n",
-    "fi = 0\n",
-    "for kda, sfiles in mapped_files.items():\n",
-    "    for k, f in enumerate(sfiles):\n",
-    "        if k == 0:\n",
-    "            table.append((fi, kda, k, f))\n",
-    "        else:\n",
-    "            table.append((fi, \"\", k,  f))\n",
-    "        fi += 1\n",
-    "md = display(Latex(tabulate.tabulate(\n",
-    "    table, tablefmt='latex',\n",
-    "    headers=[\"#\", \"module\", \"# module\", \"file\"])))"
-   ]
-  },
   {
    "cell_type": "code",
    "execution_count": null,
@@ -192,23 +155,43 @@
     "    memory_cells, sc_start = ctrl_data.get_memory_cells()\n",
     "\n",
     "    mem_cells_name = \"single cell\" if memory_cells == 1 else \"burst\"\n",
+    "    print(f\"Number of memory cells are {memory_cells}\")\n",
     "    print(f\"Run is in {mem_cells_name} mode.\\nStorage cell start: {sc_start:02d}\")\n",
     "else:\n",
     "    memory_cells = mem_cells\n",
     "    mem_cells_name = \"single cell\" if memory_cells == 1 else \"burst\"\n",
     "    print(f\"Run is in manually set to {mem_cells_name} mode. With {memory_cells} memory cells\")\n",
     "\n",
-    "if not manual_slow_data:\n",
+    "if integration_time < 0:\n",
     "    integration_time = ctrl_data.get_integration_time()\n",
+    "    print(f\"Integration time is {integration_time} us\")\n",
+    "else:\n",
+    "    print(f\"Integration time is manually set to {integration_time} us\")\n",
+    "\n",
+    "if bias_voltage < 0:\n",
     "    bias_voltage = ctrl_data.get_bias_voltage()\n",
+    "    print(f\"Bias voltage is {bias_voltage} V\")\n",
+    "else:\n",
+    "    print(f\"Bias voltage is manually set to {bias_voltage} V.\")\n",
+    "\n",
+    "if gain_setting < 0:\n",
     "    gain_setting = ctrl_data.get_gain_setting()\n",
-    "    gain_mode = ctrl_data.get_gain_mode()\n",
+    "    print(f\"Gain setting is {gain_setting} (run settings: {ctrl_data.run_settings})\")\n",
+    "else:\n",
+    "    print(f\"Gain setting is manually set to {gain_setting}.\")\n",
     "\n",
-    "print(f\"Integration time is {integration_time} us\")\n",
-    "print(f\"Gain setting is {gain_setting} (run settings: {ctrl_data.run_settings})\")\n",
-    "print(f\"Gain mode is {gain_mode} ({ctrl_data.run_mode})\")\n",
-    "print(f\"Bias voltage is {bias_voltage} V\")\n",
-    "print(f\"Number of memory cells are {memory_cells}\")"
+    "force_fixed_gain_constants_flag = False\n",
+    "if gain_mode < 0:\n",
+    "    gain_mode = ctrl_data.get_gain_mode()\n",
+    "    print(f\"Gain mode is {gain_mode} ({ctrl_data.run_mode})\")\n",
+    "    # JF corrections in burst mode are only supported when no gain switching occurs.\n",
+    "    # Always retrieve fixed gain constant for burst mode.\n",
+    "    if gain_mode == 0 and memory_cells > 1:\n",
+    "        print(\"By default fixed gain constant will be retrieved for burst mode data,\"\n",
+    "            \" even for dynamic gain data.\")\n",
+    "        force_fixed_gain_constants_flag = True\n",
+    "else:\n",
+    "    print(f\"Gain mode is manually set to {gain_mode}.\")"
    ]
   },
   {
@@ -217,17 +200,111 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "if strixel_sensor:\n",
-    "    from cal_tools.jfstrixel import STRIXEL_SHAPE as strixel_frame_shape, double_pixel_indices, to_strixel\n",
-    "    Ydouble, Xdouble = double_pixel_indices()\n",
-    "    print('Strixel sensor transformation enabled')"
+    "def jungfrau_cal_mdata(gm):\n",
+    "    jf_cal = JUNGFRAU_CalibrationData(\n",
+    "        detector_name=karabo_id,\n",
+    "        sensor_bias_voltage=bias_voltage,\n",
+    "        event_at=creation_time,\n",
+    "        modules=karabo_da,\n",
+    "        memory_cells=memory_cells,\n",
+    "        integration_time=integration_time,\n",
+    "        gain_setting=gain_setting,\n",
+    "        gain_mode=gm,\n",
+    "        client=rest_cfg.calibration_client(),\n",
+    "    )\n",
+    "\n",
+    "    constant_names = [\"Offset10Hz\", \"BadPixelsDark10Hz\"]\n",
+    "    if relative_gain:\n",
+    "        constant_names += [\"BadPixelsFF10Hz\", \"RelativeGain10Hz\"]\n",
+    "    jf_metadata = jf_cal.metadata(calibrations=constant_names) \n",
+    "    # Display retrieved calibration constants timestamps\n",
+    "    jf_cal.display_markdown_retrieved_constants(metadata=jf_metadata)\n",
+    "    return jf_cal, jf_metadata\n",
+    "\n",
+    "def force_fixed_gain_constants():\n",
+    "    \"\"\"JF corrections in burst mode are only supported when\n",
+    "    no gain switching occurs. Always retrieve fixed gain\n",
+    "    constant for burst mode.\n",
+    "    https://git.xfel.eu/calibration/planning/-/issues/196\n",
+    "\n",
+    "    Returns:\n",
+    "        dict: The metadata with the jungfrau retrieved constants.\n",
+    "            {mod: {cname: ccv_metadata}}\n",
+    "    \"\"\"\n",
+    "    from datetime import datetime\n",
+    "\n",
+    "    from cal_tools.calcat_interface import CalCatError\n",
+    "\n",
+    "    try:\n",
+    "        jf_cal, jf_metadata = jungfrau_cal_mdata(gm=1)\n",
+    "    except CalCatError as e:\n",
+    "        warning(\n",
+    "            \"No fixed gain constants found. \"\n",
+    "            \"Looking for dynamic gain constant. \"\n",
+    "            f\"(CalCatError: {e}.\")\n",
+    "\n",
+    "    jf_cal, jf_metadata = jungfrau_cal_mdata(gm=0)\n",
+    "\n",
+    "    for mod, ccvs in jf_metadata.items():\n",
+    "        offset = ccvs.get(\"Offset10Hz\")\n",
+    "        if not offset:  # This module wont be corrected later after validating constants.\n",
+    "            continue\n",
+    "        time_difference = creation_time - datetime.fromisoformat(offset[\"begin_validity_at\"])\n",
+    "        if abs(time_difference.days) > 3:\n",
+    "            warning(\n",
+    "                f\"No dynamic gain constant retrieved for {mod} with at least\"\n",
+    "                \" 3 days time difference with the RAW data creation date.\"\n",
+    "                \" Please make sure there are available constants.\")\n",
+    "            jf_metadata[mod].pop(\"Offset10Hz\")\n",
+    "\n",
+    "    return jf_cal, jf_metadata"
    ]
   },
   {
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "### Retrieving calibration constants ###"
+    "### Retrieving calibration constants"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "scrolled": true
+   },
+   "outputs": [],
+   "source": [
+    "if force_fixed_gain_constants_flag:\n",
+    "    jf_cal, jf_metadata = force_fixed_gain_constants()\n",
+    "else:\n",
+    "    jf_cal, jf_metadata = jungfrau_cal_mdata(gain_mode)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Validate the constants availability and raise/warn correspondingly. \n",
+    "for mod in karabo_da[:]:\n",
+    "    calibrations = jf_metadata.get(mod, {})\n",
+    "\n",
+    "    missing_dark_constants = {\"Offset10Hz\", \"BadPixelsDark10Hz\"} - set(calibrations)\n",
+    "    missing_gain_constants = {\"BadPixelsFF10Hz\", \"RelativeGain10Hz\"} - set(calibrations)\n",
+    "\n",
+    "    if missing_dark_constants:\n",
+    "        warning(\n",
+    "            f\"Dark constants {missing_dark_constants} are not available to correct {mod}.\"\n",
+    "            f\" Module {mod} won't be corrected.\")\n",
+    "        karabo_da.remove(mod)\n",
+    "\n",
+    "    if relative_gain and missing_gain_constants:\n",
+    "        warning(f\"Gain constants {missing_gain_constants} were not retrieved for {mod}.\"\n",
+    "                \" No Relative gain correction for this module\")\n",
+    "if not karabo_da:  # Dark constants are missing for all modules.\n",
+    "    raise ValueError(\"Dark constants are missing for all modules.\")"
    ]
   },
   {
@@ -236,70 +313,45 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "condition = Conditions.Dark.jungfrau(\n",
-    "    memory_cells=memory_cells,\n",
-    "    bias_voltage=bias_voltage,\n",
-    "    integration_time=integration_time,\n",
-    "    gain_setting=gain_setting,\n",
-    "    gain_mode=gain_mode,\n",
-    ")\n",
+    "# Record constant details in YAML metadata\n",
+    "write_constants_fragment(\n",
+    "    out_folder=(metadata_folder or out_folder),\n",
+    "    det_metadata=jf_metadata,\n",
+    "    caldb_root=jf_cal.caldb_root)\n",
+    "\n",
     "\n",
-    "empty_constants = {\n",
-    "    \"Offset\": np.zeros((512, 1024, memory_cells, 3), dtype=np.float32),\n",
-    "    \"BadPixelsDark\": np.zeros((512, 1024, memory_cells, 3), dtype=np.uint32),\n",
-    "    \"RelativeGain\": None,\n",
-    "    \"BadPixelsFF\": None,\n",
-    "}\n",
-    "metadata = CalibrationMetadata(metadata_folder or out_folder)\n",
-    "# NOTE: this notebook will not overwrite calibration metadata file\n",
-    "const_yaml = metadata.get(\"retrieved-constants\", {})\n",
+    "# load constants arrays after storing fragment YAML file\n",
+    "# and validating constants availability.\n",
+    "const_data = jf_cal.ndarray_map(metadata=jf_metadata)\n",
     "\n",
-    "def get_constants_for_module(karabo_da: str):\n",
-    "    \"\"\" Get calibration constants for given module of Jungfrau\n",
+    "# For plotting\n",
+    "da_to_pdu = {}\n",
+    "for mod_info in jf_cal.physical_detector_units.values():\n",
+    "    da_to_pdu[mod_info[\"karabo_da\"]] = mod_info[\"physical_name\"]"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def prepare_constants(module: str):\n",
+    "    \"\"\"Prepare constant arrays.\n",
     "\n",
+    "    :param module: The module name (karabo_da)\n",
     "    :return:\n",
     "        offset_map (offset map),\n",
     "        mask (mask of bad pixels),\n",
     "        gain_map (map of relative gain factors),\n",
-    "        db_module (name of DB module),\n",
-    "        when (dictionary: constant - creation time)\n",
+    "        module (name of module),\n",
     "    \"\"\"\n",
-    "\n",
-    "    when = dict()\n",
-    "    const_data = dict()\n",
-    "\n",
-    "    if const_yaml:\n",
-    "        for cname, mdata in const_yaml[karabo_da][\"constants\"].items():\n",
-    "            const_data[cname] = dict()\n",
-    "            when[cname] = mdata[\"creation-time\"]\n",
-    "            if when[cname]:\n",
-    "                with h5py.File(mdata[\"file-path\"], \"r\") as cf:\n",
-    "                    const_data[cname] = np.copy(\n",
-    "                        cf[f\"{mdata['dataset-name']}/data\"])\n",
-    "            else:\n",
-    "                const_data[cname] = empty_constants[cname]\n",
-    "    else:\n",
-    "        retrieval_function = partial(\n",
-    "            get_constant_from_db_and_time,\n",
-    "            karabo_id=karabo_id,\n",
-    "            karabo_da=karabo_da,\n",
-    "            cal_db_interface=cal_db_interface,\n",
-    "            creation_time=creation_time,\n",
-    "            timeout=cal_db_timeout,\n",
-    "            print_once=False,\n",
-    "        )\n",
-    "        \n",
-    "        for cname, cempty in empty_constants.items():\n",
-    "            const_data[cname], when[cname] = retrieval_function(\n",
-    "                condition=condition,\n",
-    "                constant=getattr(Constants.jungfrau, cname)(),\n",
-    "                empty_constant=cempty,\n",
-    "            )\n",
-    "\n",
-    "    offset_map = const_data[\"Offset\"]\n",
-    "    mask = const_data[\"BadPixelsDark\"]\n",
-    "    gain_map = const_data[\"RelativeGain\"]\n",
-    "    mask_ff = const_data[\"BadPixelsFF\"]\n",
+    "    constant_arrays = const_data[module]\n",
+    "    offset_map = constant_arrays[\"Offset10Hz\"]\n",
+    "    mask = constant_arrays[\"BadPixelsDark10Hz\"]\n",
+    " \n",
+    "    gain_map = constant_arrays.get(\"RelativeGain10Hz\")\n",
+    "    mask_ff = constant_arrays.get(\"BadPixelsFF10Hz\")\n",
     "\n",
     "    # Combine masks\n",
     "    if mask_ff is not None:\n",
@@ -312,7 +364,7 @@
     "    else:\n",
     "        offset_map = np.squeeze(offset_map)\n",
     "        mask = np.squeeze(mask)\n",
-    "    \n",
+    "\n",
     "    # masking double size pixels\n",
     "    mask[..., [255, 256], :, :] |= BadPixels.NON_STANDARD_SIZE\n",
     "    mask[..., [255, 256, 511, 512, 767, 768], :] |= BadPixels.NON_STANDARD_SIZE\n",
@@ -326,23 +378,71 @@
     "        else:\n",
     "            gain_map = np.moveaxis(np.squeeze(gain_map), 1, 0)\n",
     "\n",
-    "    return offset_map, mask, gain_map, karabo_da, when\n",
+    "    return offset_map, mask, gain_map, module\n",
     "\n",
     "with multiprocessing.Pool() as pool:\n",
-    "    r = pool.map(get_constants_for_module, karabo_da)\n",
+    "    r = pool.map(prepare_constants, karabo_da)\n",
     "\n",
     "# Print timestamps for the retrieved constants.\n",
     "constants = {}\n",
-    "for offset_map, mask, gain_map, k_da, when in r:\n",
-    "    print(f'Constants for module {k_da}:')\n",
-    "    for const in when:\n",
-    "        print(f'  {const} injected at {when[const]}')\n",
+    "for offset_map, mask, gain_map, k_da in r:\n",
     "\n",
-    "    if gain_map is None:\n",
-    "        print(\"No gain map found\")\n",
-    "        relative_gain = False\n",
+    "    constants[k_da] = (offset_map, mask, gain_map)\n",
     "\n",
-    "    constants[k_da] = (offset_map, mask, gain_map)"
+    "const_data.clear()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Read available sequence files to correct.\n",
+    "mapped_files, num_seq_files = map_seq_files(\n",
+    "    run_folder, karabo_da, sequences)\n",
+    "\n",
+    "if not len(mapped_files):\n",
+    "    raise IndexError(\n",
+    "        \"No sequence files available to correct for the selected sequences and karabo_da.\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "print(f\"Processing a total of {num_seq_files} sequence files\")\n",
+    "table = []\n",
+    "fi = 0\n",
+    "for kda, sfiles in mapped_files.items():\n",
+    "    for k, f in enumerate(sfiles):\n",
+    "        if k == 0:\n",
+    "            table.append((fi, kda, k, f))\n",
+    "        else:\n",
+    "            table.append((fi, \"\", k,  f))\n",
+    "        fi += 1\n",
+    "md = display(Latex(tabulate.tabulate(\n",
+    "    table, tablefmt='latex',\n",
+    "    headers=[\"#\", \"module\", \"# module\", \"file\"])))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "if strixel_sensor:\n",
+    "    from cal_tools.jungfrau.jfstrixel import get_strixel_parameters, to_strixel\n",
+    "    strx_params = get_strixel_parameters(strixel_sensor)\n",
+    "\n",
+    "    strixel_shape = strx_params[\"frame_shape\"]\n",
+    "    Ydouble = strx_params.get(\"ydouble\", slice(None))\n",
+    "    Xdouble = strx_params.get(\"xdouble\", slice(None))\n",
+    "\n",
+    "    print('Strixel sensor transformation enabled')"
    ]
   },
   {
@@ -358,7 +458,7 @@
     "    \n",
     "    # Copy gain over first to keep it at the original 3 for low gain.\n",
     "    if strixel_sensor:\n",
-    "        to_strixel(g, out=gain_corr[index, ...])\n",
+    "        to_strixel(g, out=gain_corr[index, ...], kind=strixel_sensor)\n",
     "    else:\n",
     "        gain_corr[index, ...] = g\n",
     "\n",
@@ -366,6 +466,18 @@
     "    # Change low gain to 2 for indexing purposes.\n",
     "    g[g==3] = 2\n",
     "\n",
+    "    # A fix for a module hardware problem (e.g. Jungfrau_M302)\n",
+    "    # of chip being stuck on the wrong gain bit.\n",
+    "    if (\n",
+    "        wrong_gain_pixels[0] > -1 and\n",
+    "        wrong_gain_pixels[0] == int(local_karabo_da[-2:])\n",
+    "    ):\n",
+    "        x1 = wrong_gain_pixels[1]\n",
+    "        x2 = wrong_gain_pixels[2]\n",
+    "        y1 = wrong_gain_pixels[3]\n",
+    "        y2 = wrong_gain_pixels[4]\n",
+    "        g[:, x1:x2, y1:y2] = replace_wrong_gain_value\n",
+    "\n",
     "    # Select memory cells\n",
     "    if memory_cells > 1:\n",
     "        \"\"\"\n",
@@ -392,7 +504,7 @@
     "    d -= offset\n",
     "\n",
     "    # Gain correction\n",
-    "    if relative_gain:\n",
+    "    if relative_gain and gain_map is not None:\n",
     "        if memory_cells > 1:\n",
     "            gain_map_cell = gain_map[m, ...]\n",
     "        else:\n",
@@ -403,9 +515,9 @@
     "    msk = np.choose(g, np.moveaxis(mask_cell, -1, 0))\n",
     "\n",
     "    if strixel_sensor:\n",
-    "        to_strixel(d, out=data_corr[index, ...])\n",
+    "        to_strixel(d, out=data_corr[index, ...], kind=strixel_sensor)\n",
     "        data_corr[index, :, Ydouble, Xdouble] /= strixel_double_norm\n",
-    "        to_strixel(msk, out=mask_corr[index, ...])\n",
+    "        to_strixel(msk, out=mask_corr[index, ...], kind=strixel_sensor)\n",
     "    else:\n",
     "        data_corr[index, ...] = d\n",
     "        mask_corr[index, ...] = msk"
@@ -444,8 +556,12 @@
     "        roi_module, a1, a2, b1, b2, mean_axis = roi_definitions[i*6 : (i+1)*6]\n",
     "        if roi_module == module_no:\n",
     "            rois_defined += 1\n",
+    "            # Set pixels below the threshold to 0 (but still used in the averaging)\n",
+    "            roi_data = data_corr[..., a1:a2, b1:b2]\n",
+    "            if roi_threshold > -1:\n",
+    "                roi_data = roi_data * (roi_data > roi_threshold)\n",
     "            # Apply the mask and average remaining pixels to 1D\n",
-    "            roi_data = data_corr[..., a1:a2, b1:b2].mean(\n",
+    "            roi_data = roi_data.mean(\n",
     "                axis=mean_axis, where=(mask_corr[..., a1:a2, b1:b2] == 0)\n",
     "            )\n",
     "\n",
@@ -455,6 +571,7 @@
     "            # Add roi run control datasets.\n",
     "            ctrl_source.create_run_key(f'roi{rois_defined}.region', np.array([[a1, a2, b1, b2]]))\n",
     "            ctrl_source.create_run_key(f'roi{rois_defined}.reduce_axis', np.array([mean_axis]))\n",
+    "            ctrl_source.create_run_key(f'roi{rois_defined}.threshold', np.array([roi_threshold], dtype=np.float32))\n",
     "    \n",
     "    if rois_defined:\n",
     "        # Copy the index for the new source\n",
@@ -524,7 +641,7 @@
     "        \n",
     "        # Determine total output shape.\n",
     "        if strixel_sensor:\n",
-    "            oshape = (*ishape[:-2], *strixel_frame_shape)\n",
+    "            oshape = (*ishape[:-2], *strixel_shape)\n",
     "        else:\n",
     "            oshape = ishape\n",
     "\n",
@@ -601,7 +718,10 @@
     "            save_reduced_rois(outp_file, data_corr, mask_corr, local_karabo_da)\n",
     "\n",
     "            # Create METDATA datasets\n",
-    "            outp_file.create_metadata(like=seq_dc)\n",
+    "            outp_file.create_metadata(\n",
+    "                like=seq_dc,\n",
+    "                sequence=seq_dc.run_metadata()[\"sequenceNumber\"],\n",
+    "            )\n",
     "\n",
     "        step_timer.done_step(f'Saving data time.')\n",
     "if empty_seq == sum([len(i) for i in mapped_files.values()]):\n",
@@ -643,30 +763,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "# Positions are given in pixels\n",
-    "mod_width = (256 * 4) + (2 * 3)  # inc. 2px gaps between tiles\n",
-    "mod_height = (256 * 2) + 2\n",
-    "if karabo_id == \"SPB_IRDA_JF4M\":\n",
-    "    # The first 4 modules are rotated 180 degrees relative to the others.\n",
-    "    # We pass the bottom, beam-right corner of the module regardless of its\n",
-    "    # orientation, requiring a subtraction from the symmetric positions we'd\n",
-    "    # otherwise calculate.\n",
-    "    x_start, y_start = 1125, 1078\n",
-    "    module_pos = [\n",
-    "        (x_start - mod_width, y_start - mod_height - (i * (mod_height + 33)))\n",
-    "        for i in range(4)\n",
-    "    ] + [\n",
-    "        (-x_start, -y_start + (i * (mod_height + 33))) for i in range(4)\n",
-    "    ]\n",
-    "    orientations = [(-1, -1) for _ in range(4)] + [(1, 1) for _ in range(4)]\n",
-    "elif karabo_id == \"FXE_XAD_JF1M\":\n",
-    "    module_pos = ((-mod_width//2, 33),(-mod_width//2, -mod_height -33))\n",
-    "    orientations = [(-1,-1), (1,1)]\n",
-    "else:\n",
-    "    module_pos = ((-mod_width//2,-mod_height//2),)\n",
-    "    orientations = None\n",
-    "\n",
-    "geom = JUNGFRAUGeometry.from_module_positions(module_pos, orientations=orientations, asic_gap=0)"
+    "_, geom = init_jungfrau_geom(karabo_id=karabo_id, karabo_da=karabo_da)"
    ]
   },
   {
@@ -704,7 +801,7 @@
     "\n",
     "    # Reading RAW data for plotting.\n",
     "    jf_raw = components.JUNGFRAU(raw_dc, detector_name=karabo_id).select_trains(\n",
-    "        np.s_[:plot_trains]\n",
+    "            np.s_[:plot_trains]\n",
     "    )\n",
     "\n",
     "raw = jf_raw.get_array(\"data.adc\")[:, :, cell_idx_preview, ...].values\n",
@@ -720,22 +817,6 @@
     ")"
    ]
   },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "db_modules = get_pdu_from_db(\n",
-    "    karabo_id=karabo_id,\n",
-    "    karabo_da=karabo_da,\n",
-    "    constant=Constants.jungfrau.Offset(),\n",
-    "    condition=condition,\n",
-    "    cal_db_interface=cal_db_interface,\n",
-    "    snapshot_at=creation_time,\n",
-    ")"
-   ]
-  },
   {
    "cell_type": "markdown",
    "metadata": {},
@@ -789,6 +870,12 @@
     "    vmin=_corrected_vmin, vmax=_corrected_vmax, cmap=\"jet\"\n",
     ")\n",
     "\n",
+    "if strixel_sensor:\n",
+    "    if strixel_sensor == \"A1256\":\n",
+    "        aspect = 1/3\n",
+    "    else:  # A0123\n",
+    "        aspect = 10\n",
+    "\n",
     "if not strixel_sensor:\n",
     "    geom.plot_data_fast(\n",
     "        corrected_mean,\n",
@@ -797,8 +884,9 @@
     "        **mean_plot_kwargs\n",
     "    )\n",
     "else:\n",
-    "    ax.imshow(corrected_mean.squeeze(), aspect=10, **mean_plot_kwargs)\n",
-    "    \n",
+    "    corr = ax.imshow(corrected_mean.squeeze(), aspect=aspect, **mean_plot_kwargs)\n",
+    "    plt.colorbar(corr)\n",
+    "\n",
     "ax.set_title(f'{karabo_id} - Mean CORRECTED', size=18)\n",
     "\n",
     "plt.show()"
@@ -824,7 +912,8 @@
     "        **mean_plot_kwargs\n",
     "    )\n",
     "else:\n",
-    "    ax.imshow(corrected_mean.squeeze(), aspect=10, **mean_plot_kwargs)\n",
+    "    corr = ax.imshow(corrected_masked_mean.squeeze(), aspect=aspect, **mean_plot_kwargs)\n",
+    "    plt.colorbar(corr)\n",
     "\n",
     "ax.set_title(f'{karabo_id} - Mean CORRECTED with mask', size=18)\n",
     "\n",
@@ -855,7 +944,8 @@
     "        **single_plot_kwargs\n",
     "    )\n",
     "else:\n",
-    "    ax.imshow(corrected_train.squeeze(), aspect=10, **single_plot_kwargs)\n",
+    "    corr = ax.imshow(corrected_train.squeeze(), aspect=aspect, **single_plot_kwargs)\n",
+    "    plt.colorbar(corr)\n",
     "\n",
     "ax.set_title(f\"{karabo_id} - CORRECTED train: {tid}\", size=18)\n",
     "\n",
@@ -904,7 +994,8 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "for i, (pdu, mod) in enumerate(zip(db_modules, karabo_da)):\n",
+    "for i, mod in enumerate(karabo_da):\n",
+    "    pdu = da_to_pdu[mod]\n",
     "    h, ex, ey = np.histogram2d(\n",
     "        raw[i].flatten(),\n",
     "        gain[i].flatten(),\n",
@@ -933,7 +1024,8 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "for i, (pdu, mod) in enumerate(zip(db_modules, karabo_da)): \n",
+    "for i, mod in enumerate(karabo_da):\n",
+    "    pdu = da_to_pdu[mod]\n",
     "    fig, axs = plt.subplots(nrows=2, ncols=1, figsize=(18, 10))\n",
     "    corrected_flatten = corrected[i].flatten()\n",
     "    for ax, hist_range in zip(axs, [(-100, 1000), (-1000, 10000)]):\n",
@@ -1023,7 +1115,9 @@
     "        colorbar={'shrink': 1, 'pad': 0.01},\n",
     "    )\n",
     "else:\n",
-    "    ax.imshow(np.log2(mask_train).squeeze(), vmin=0, vmax=32, cmap='jet', aspect=10)\n",
+    "    mask = ax.imshow(\n",
+    "        mask_train.squeeze(), vmin=0, vmax=32, cmap='jet', aspect=aspect)\n",
+    "    plt.colorbar(mask)\n",
     "\n",
     "plt.show()"
    ]
@@ -1031,9 +1125,9 @@
  ],
  "metadata": {
   "kernelspec": {
-   "display_name": "Python 3",
+   "display_name": "Offline Cal",
    "language": "python",
-   "name": "python3"
+   "name": "offline-cal"
   },
   "language_info": {
    "codemirror_mode": {
diff --git a/notebooks/Jungfrau/Jungfrau_dark_analysis_all_gains_burst_mode_NBC.ipynb b/notebooks/Jungfrau/Jungfrau_dark_analysis_all_gains_burst_mode_NBC.ipynb
index 144ce000930afe71d5a8c0cb1f6721d71ddacb50..93a402ca0885c6a1e1ae0677bc6cad12468a7fa8 100644
--- a/notebooks/Jungfrau/Jungfrau_dark_analysis_all_gains_burst_mode_NBC.ipynb
+++ b/notebooks/Jungfrau/Jungfrau_dark_analysis_all_gains_burst_mode_NBC.ipynb
@@ -34,7 +34,7 @@
     "\n",
     "# Parameters for calibration database and storing constants.\n",
     "use_dir_creation_date = True  # use dir creation date\n",
-    "cal_db_interface = 'tcp://max-exfl016:8016#8045'  # calibrate db interface to connect to\n",
+    "cal_db_interface = 'tcp://max-exfl-cal001:8016#8045'  # calibrate db interface to connect to\n",
     "cal_db_timeout = 300000 # timeout on caldb requests\n",
     "local_output = True  # output constants locally\n",
     "db_output = False  # output constants to database\n",
@@ -49,11 +49,11 @@
     "time_limits = 0.025  # to find calibration constants later on, the integration time is allowed to vary by 0.5 us\n",
     "\n",
     "# Parameters to be used for injecting dark calibration constants.\n",
-    "integration_time = 1000 # integration time in us, will be overwritten by value in file\n",
-    "gain_setting = 0  # 0 for dynamic, forceswitchg1, forceswitchg2, 1 for dynamichg0, fixgain1, fixgain2. Will be overwritten by value in file\n",
-    "gain_mode = 0  # 1 if medium and low runs are  fixgain1 and fixgain2, otherwise 0. It will be overwritten by value in file, if manual_slow_data\n",
-    "bias_voltage = 90  # sensor bias voltage in V, will be overwritten by value in file\n",
-    "memory_cells = 16  # number of memory cells\n",
+    "integration_time = -1  # Integration time in us. Set to -1 to overwrite by value in file.\n",
+    "gain_setting = -1  # 0 for dynamic, forceswitchg1, forceswitchg2, 1 for dynamichg0, fixgain1, fixgain2. Set to overwrite by value in file.\n",
+    "gain_mode = -1  # 1 if medium and low runs are  fixgain1 and fixgain2, otherwise 0. Set to -1 to overwrite by value in file.\n",
+    "bias_voltage = -1  # sensor bias voltage in V, will be overwritten by value in file\n",
+    "memory_cells = -1  # Number of memory cells.\n",
     "\n",
     "# Parameters used for plotting\n",
     "detailed_report = False\n",
@@ -90,7 +90,8 @@
     "\n",
     "from XFELDetAna.plotting.heatmap import heatmapPlot\n",
     "from XFELDetAna.plotting.histogram import histPlot\n",
-    "from cal_tools import jungfraulib, step_timing\n",
+    "from cal_tools import step_timing\n",
+    "from cal_tools.jungfrau import jungfraulib\n",
     "from cal_tools.enums import BadPixels, JungfrauGainMode\n",
     "from cal_tools.tools import (\n",
     "    get_dir_creation_date,\n",
@@ -167,22 +168,39 @@
     "\n",
     "ctrl_src = ctrl_source_template.format(karabo_id_control)\n",
     "\n",
+    "run_nums = jungfraulib.sort_runs_by_gain(\n",
+    "    raw_folder=in_folder,\n",
+    "    runs=run_nums,\n",
+    "    ctrl_src=ctrl_src,\n",
+    "    )\n",
+    "_gain_mode = None\n",
     "for gain, run_n in enumerate(run_nums):\n",
     "    run_dc = RunDirectory(f\"{in_folder}/r{run_n:04d}/\")\n",
     "    gain_runs[run_n] = [gain, run_dc]\n",
     "    ctrl_data = jungfraulib.JungfrauCtrl(run_dc, ctrl_src)\n",
     "    # Read control data for the high gain run only.\n",
-    "    if run_n == run_high:\n",
+    "    if gain == 0:\n",
     "\n",
     "        run_mcells, sc_start = ctrl_data.get_memory_cells()\n",
     "\n",
-    "        if not manual_slow_data:\n",
+    "        if integration_time < 0:\n",
     "            integration_time = ctrl_data.get_integration_time()\n",
+    "            print(f\"Integration time is {integration_time} us.\")\n",
+    "        else:\n",
+    "            print(f\"Integration time is manually set to {integration_time} us.\")\n",
+    "\n",
+    "        if bias_voltage < 0:\n",
     "            bias_voltage = ctrl_data.get_bias_voltage()\n",
+    "            print(f\"Bias voltage is {bias_voltage} V.\")\n",
+    "        else:\n",
+    "            print(f\"Bias voltage is manually set to {bias_voltage} V.\")\n",
+    "\n",
+    "        if gain_setting < 0:\n",
     "            gain_setting = ctrl_data.get_gain_setting()\n",
     "            print(f\"Gain setting is {gain_setting} ({ctrl_data.run_settings})\")\n",
-    "            print(f\"Integration time is {integration_time} us\")\n",
-    "            print(f\"Bias voltage is {bias_voltage} V\")\n",
+    "        else:\n",
+    "            print(f\"Gain setting is manually set to {gain_setting}.\")\n",
+    "\n",
     "        if run_mcells == 1:\n",
     "            memory_cells = 1\n",
     "            print('Dark runs in single cell mode, '\n",
@@ -191,25 +209,17 @@
     "            memory_cells = 16\n",
     "            print('Dark runs in burst mode, '\n",
     "                  f'storage cell start: {sc_start:02d}')\n",
-    "    else:\n",
-    "        gain_mode = ctrl_data.get_gain_mode()\n",
+    "    else:  # medium and low gain\n",
+    "        _gain_mode = ctrl_data.get_gain_mode()\n",
     "        med_low_settings.append(ctrl_data.run_mode)\n",
     "\n",
-    "# A transperent workaround for old raw data with wrong/missing medium and low settings\n",
-    "if med_low_settings == [None, None]:\n",
-    "    warning(\"run.settings is not stored in the data to read. \"\n",
-    "            f\"Hence assuming gain_mode = {gain_mode} for adaptive old data.\")\n",
-    "elif med_low_settings == [\"dynamicgain\", \"forceswitchg1\"]:\n",
-    "    warning(f\"run.settings for medium and low gain runs are wrong {med_low_settings}. \"\n",
-    "            f\"This is an expected bug for old raw data. Setting gain_mode to {gain_mode}.\")\n",
-    "# Validate that low_med_settings is not a mix of adaptive and fixed settings.\n",
-    "elif not (sorted(med_low_settings) in [fixed_settings, dynamic_settings, old_fixed_settings]):  # noqa\n",
-    "    raise ValueError(\n",
-    "        \"Medium and low run settings are not as expected. \"\n",
-    "        f\"Either {dynamic_settings}, {fixed_settings}, or {old_fixed_settings} are expected.\\n\"\n",
-    "        f\"Got {sorted(med_low_settings)} for both runs, respectively.\")\n",
-    "\n",
-    "print(f\"Gain mode is {gain_mode} ({med_low_settings})\")\n",
+    "# TODO: consider updating this cell into something similar to agipdlib.AgipdCtrlsRuns()\n",
+    "if gain_mode < 0:\n",
+    "    gain_mode = _gain_mode\n",
+    "    print(f\"Gain mode is {gain_mode} ({med_low_settings})\")\n",
+    "else:\n",
+    "    print(f\"Gain mode is manually set to {gain_mode}.\")\n",
+    "\n",
     "\n",
     "step_timer.done_step(f'Reading control data.')"
    ]
@@ -220,6 +230,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
+    "step_timer.start()\n",
     "# set the operating condition\n",
     "condition = Conditions.Dark.jungfrau(\n",
     "    memory_cells=memory_cells,\n",
@@ -235,7 +246,8 @@
     "    constant=Constants.jungfrau.Offset(),\n",
     "    condition=condition,\n",
     "    cal_db_interface=cal_db_interface,\n",
-    "    snapshot_at=creation_time)"
+    "    snapshot_at=creation_time)\n",
+    "step_timer.done_step('Set conditions and get PDU names from CalCat.')"
    ]
   },
   {
@@ -245,6 +257,7 @@
    "outputs": [],
    "source": [
     "# Start retrieving existing constants for comparison\n",
+    "step_timer.start()\n",
     "mod_x_const = [(mod, const) for const in [\"Offset\", \"Noise\", \"BadPixelsDark\"] for mod in karabo_da]\n",
     "\n",
     "from cal_tools.tools import get_from_db\n",
@@ -285,7 +298,8 @@
     "old_retrieval_res = old_retrieval_pool.starmap_async(\n",
     "    retrieve_old_constant, mod_x_const\n",
     ")\n",
-    "old_retrieval_pool.close()"
+    "old_retrieval_pool.close()\n",
+    "step_timer.done_step('Retrieved old dark constants for comparison.')"
    ]
   },
   {
@@ -341,15 +355,23 @@
     "\n",
     "    print(f\"\\n- Instrument data path for {mod} is {instrument_src}.\")\n",
     "\n",
+    "    # (1024, 512, 1 or 16, 3)\n",
     "    offset_map[mod] = context.alloc(\n",
     "        shape=(sensor_size+(memory_cells, 3)), fill=0, dtype=np.float32)\n",
     "    noise_map[mod] = context.alloc(like=offset_map[mod], fill=0)\n",
-    "    bad_pixels_map[mod] = context.alloc(like=offset_map[mod], dtype=np.uint32, fill=0)\n",
+    "    bad_pixels_map[mod] = context.alloc(shape=offset_map[mod].shape, dtype=np.uint32, fill=0)\n",
     "\n",
     "    for run_n, [gain, run_dc] in gain_runs.items():\n",
     "\n",
     "        def process_cell(worker_id, array_index, cell_number):\n",
     "            cell_slice_idx = acelltable == cell_number\n",
+    "            if cell_slice_idx.sum() == 0:\n",
+    "                # This cell is not in the data (or it's deliberated excluded)\n",
+    "                bad_pixels_map[mod][..., cell_number, gain] = BadPixels.NO_DARK_DATA.value\n",
+    "                offset_map[mod][..., cell_number, gain] = np.nan\n",
+    "                noise_map[mod][..., cell_number, gain] = np.nan\n",
+    "                return\n",
+    "\n",
     "            thiscell = images[..., cell_slice_idx]  # [1024, 512, n_trains]\n",
     "\n",
     "            # Identify cells/trains with images of 0 pixels.\n",
@@ -363,6 +385,7 @@
     "            noise_map[mod][..., cell_number, gain] = np.std(  # [1024, 512]\n",
     "                thiscell, axis=2, dtype=np.float32)\n",
     "            del thiscell\n",
+    "\n",
     "            # Check if there are wrong bad gain values.\n",
     "            # 1. Exclude empty images.\n",
     "            # 2. Indicate pixels with wrong gain value for any train for each cell.\n",
@@ -372,8 +395,8 @@
     "                axis=2, dtype=np.float32\n",
     "            )\n",
     "\n",
-    "            # [1024, 512]\n",
-    "            bad_pixels_map[mod][..., cell_number, gain][gain_avg != raw_g] |= BadPixels.WRONG_GAIN_VALUE.value\n",
+    "            # Assign WRONG_GAIN_VALUE for a pixel in a badpixel map for all gains.\n",
+    "            bad_pixels_map[mod][:, :,cell_number][gain_avg != raw_g] |= BadPixels.WRONG_GAIN_VALUE.value\n",
     "\n",
     "        print(f\"Gain stage {gain}, run {run_n}\")\n",
     "\n",
@@ -419,10 +442,16 @@
     "\n",
     "        # Calculate offset and noise maps\n",
     "        context.map(process_cell, range(memory_cells))\n",
+    "        \n",
+    "        cells_missing = (bad_pixels_map[mod][0, 0, :, gain] & BadPixels.NO_DARK_DATA) > 0\n",
+    "        if np.any(cells_missing):\n",
+    "            print(f\"No dark data in gain stage {gain} found for cells\", np.nonzero(cells_missing)[0])\n",
+    "\n",
     "        del images\n",
     "        del acelltable\n",
     "        del gain_vals\n",
-    "    step_timer.done_step(f'Creating Offset and noise constants for a module.')"
+    "\n",
+    "    step_timer.done_step('Creating Offset and noise constants for a module.')"
    ]
   },
   {
@@ -501,7 +530,7 @@
     "                ax_n0.set_xlabel(\n",
     "                    f'RMS noise {g_name[g_idx]} ' + unit, fontsize=15)\n",
     "                plt.show()\n",
-    "    step_timer.done_step(f'Plotting offset and noise maps.')"
+    "    step_timer.done_step('Plotting offset and noise maps.')"
    ]
   },
   {
@@ -535,6 +564,7 @@
     "print_bp_entry(BadPixels.OFFSET_OUT_OF_THRESHOLD)\n",
     "print_bp_entry(BadPixels.NOISE_OUT_OF_THRESHOLD)\n",
     "print_bp_entry(BadPixels.OFFSET_NOISE_EVAL_ERROR)\n",
+    "print_bp_entry(BadPixels.NO_DARK_DATA)\n",
     "print_bp_entry(BadPixels.WRONG_GAIN_VALUE)\n",
     "\n",
     "def eval_bpidx(d):\n",
@@ -583,7 +613,7 @@
     "                    aspect=1.,\n",
     "                    vmin=0, vmax=5,\n",
     "                    title=f'G{g_idx} Bad pixel map - Cell {cell:02d} - Module {mod} ({pdu})')\n",
-    "step_timer.done_step(f'Creating bad pixels constant')"
+    "step_timer.done_step('Creating bad pixels constant')"
    ]
   },
   {
@@ -715,9 +745,9 @@
  ],
  "metadata": {
   "kernelspec": {
-   "display_name": "Python 3",
+   "display_name": "Offline Cal",
    "language": "python",
-   "name": "python3"
+   "name": "offline-cal"
   },
   "language_info": {
    "codemirror_mode": {
@@ -729,7 +759,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.8.12"
+   "version": "3.8.10"
   }
  },
  "nbformat": 4,
diff --git a/notebooks/Jungfrau/Jungfrau_darks_Summary_NBC.ipynb b/notebooks/Jungfrau/Jungfrau_darks_Summary_NBC.ipynb
index 55269def401f498af7437e35c270f5a5bb70c413..6da1df8b914372e6bcfb7489438fd9cba056c0fd 100644
--- a/notebooks/Jungfrau/Jungfrau_darks_Summary_NBC.ipynb
+++ b/notebooks/Jungfrau/Jungfrau_darks_Summary_NBC.ipynb
@@ -41,7 +41,6 @@
    "outputs": [],
    "source": [
     "import warnings\n",
-    "from collections import OrderedDict\n",
     "from pathlib import Path\n",
     "\n",
     "warnings.filterwarnings('ignore')\n",
@@ -57,8 +56,6 @@
     "matplotlib.use(\"agg\")\n",
     "%matplotlib inline\n",
     "\n",
-    "from cal_tools.enums import BadPixels, JungfrauSettings\n",
-    "from cal_tools.ana_tools import get_range\n",
     "from cal_tools.plotting import init_jungfrau_geom, show_processed_modules_jungfrau\n",
     "from cal_tools.tools import CalibrationMetadata\n",
     "from XFELDetAna.plotting.simpleplot import simplePlot"
diff --git a/notebooks/Jungfrau/Jungfrau_retrieve_constants_precorrection_NBC.ipynb b/notebooks/Jungfrau/Jungfrau_retrieve_constants_precorrection_NBC.ipynb
deleted file mode 100644
index 50115ad431e642a2ad463d6d05e80da326a9ccbf..0000000000000000000000000000000000000000
--- a/notebooks/Jungfrau/Jungfrau_retrieve_constants_precorrection_NBC.ipynb
+++ /dev/null
@@ -1,269 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "# JUNGFRAU Retrieving Constants Pre-correction #\n",
-    "\n",
-    "Author: European XFEL Detector Group, Version: 1.0\n",
-    "\n",
-    "Retrieving Required Constants for Offline Calibration of the JUNGFRAU Detector"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "in_folder = \"/gpfs/exfel/exp/SPB/202130/p900204/raw\"  # the folder to read data from, required\n",
-    "out_folder =  \"/gpfs/exfel/data/scratch/ahmedk/test/remove\"  # the folder to output to, required\n",
-    "metadata_folder = \"\"  # Directory containing calibration_metadata.yml when run by xfel-calibrate\n",
-    "run = 112  # run to process, required\n",
-    "sequences = [-1]  # sequences to correct, set to [-1] for all, range allowed\n",
-    "sequences_per_node = 1  # number of sequence files per cluster node if run as slurm job, set to 0 to not run SLURM parallel\n",
-    "\n",
-    "# Parameters used to access raw data.\n",
-    "karabo_id = \"SPB_IRDA_JF4M\"  # karabo prefix of Jungfrau devices\n",
-    "karabo_da = ['JNGFR01', 'JNGFR02', 'JNGFR03', 'JNGFR04', 'JNGFR05', 'JNGFR06', 'JNGFR07', 'JNGFR08']  # data aggregators\n",
-    "receiver_template = \"JNGFR{:02d}\"  # Detector receiver template for accessing raw data files. e.g. \"JNGFR{:02d}\"\n",
-    "instrument_source_template = '{}/DET/{}:daqOutput'  # template for source name (filled with karabo_id & receiver_id). e.g. 'SPB_IRDA_JF4M/DET/JNGFR01:daqOutput'\n",
-    "ctrl_source_template = '{}/DET/CONTROL'  # template for control source name (filled with karabo_id_control)\n",
-    "karabo_id_control = \"\"  # if control is on a different ID, set to empty string if it is the same a karabo-id\n",
-    "\n",
-    "# Parameters for calibration database.\n",
-    "use_dir_creation_date = True  # use the creation data of the input dir for database queries\n",
-    "cal_db_interface = \"tcp://max-exfl016:8017#8025\" # the database interface to use\n",
-    "cal_db_timeout = 180000  # timeout on cal db requests\n",
-    "\n",
-    "# Parameters affecting corrected data.\n",
-    "relative_gain = True  # do relative gain correction\n",
-    "\n",
-    "# Parameters for retrieving calibration constants\n",
-    "manual_slow_data = False  # if true, use manually entered bias_voltage, integration_time, gain_setting, and gain_mode values\n",
-    "integration_time = 4.96  # integration time in us, will be overwritten by value in file\n",
-    "gain_setting = 0  # 0 for dynamic gain, 1 for dynamic HG0, will be overwritten by value in file\n",
-    "gain_mode = 0  # 0 for runs with dynamic gain setting, 1 for fixgain. It will be overwritten by value in file, if manual_slow_data is set to True.\n",
-    "mem_cells = -1  # Set mem_cells to -1 to automatically use the value stored in RAW data.#\n",
-    "bias_voltage = 180  # will be overwritten by value in file"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import datetime\n",
-    "from functools import partial\n",
-    "\n",
-    "import multiprocessing\n",
-    "from extra_data import RunDirectory\n",
-    "from pathlib import Path\n",
-    "\n",
-    "from cal_tools.jungfraulib import JungfrauCtrl\n",
-    "from cal_tools.tools import (\n",
-    "    get_dir_creation_date,\n",
-    "    get_from_db,\n",
-    "    CalibrationMetadata,\n",
-    ")\n",
-    "from iCalibrationDB import Conditions, Constants"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "in_folder = Path(in_folder)\n",
-    "out_folder = Path(out_folder)\n",
-    "metadata = CalibrationMetadata(metadata_folder or out_folder)\n",
-    "run_folder = in_folder / f'r{run:04d}'\n",
-    "run_dc = RunDirectory(run_folder)\n",
-    "\n",
-    "out_folder.mkdir(parents=True, exist_ok=True)\n",
-    "\n",
-    "creation_time = None\n",
-    "if use_dir_creation_date:\n",
-    "    creation_time = get_dir_creation_date(in_folder, run)\n",
-    "    print(f\"Using {creation_time} as creation time\")\n",
-    "\n",
-    "if karabo_id_control == \"\":\n",
-    "    karabo_id_control = karabo_id"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "ctrl_src = ctrl_source_template.format(karabo_id_control)\n",
-    "ctrl_data = JungfrauCtrl(run_dc, ctrl_src)\n",
-    "\n",
-    "if mem_cells < 0:\n",
-    "    memory_cells, sc_start = ctrl_data.get_memory_cells()\n",
-    "\n",
-    "    mem_cells_name = \"single cell\" if memory_cells == 1 else \"burst\"\n",
-    "    print(f\"Run is in {mem_cells_name} mode.\\nStorage cell start: {sc_start:02d}\")\n",
-    "else:\n",
-    "    memory_cells = mem_cells\n",
-    "    mem_cells_name = \"single cell\" if memory_cells == 1 else \"burst\"\n",
-    "    print(f\"Run is in manually set to {mem_cells_name} mode. With {memory_cells} memory cells\")\n",
-    "\n",
-    "if not manual_slow_data:\n",
-    "    integration_time = ctrl_data.get_integration_time()\n",
-    "    bias_voltage = ctrl_data.get_bias_voltage()\n",
-    "    gain_setting = ctrl_data.get_gain_setting()\n",
-    "    gain_mode = ctrl_data.get_gain_mode()\n",
-    "\n",
-    "print(f\"Integration time is {integration_time} us\")\n",
-    "print(f\"Gain setting is {gain_setting} (run settings: {ctrl_data.run_settings}\")\n",
-    "print(f\"Gain mode is {gain_mode} ({ctrl_data.run_mode})\")\n",
-    "print(f\"Bias voltage is {bias_voltage} V\")\n",
-    "print(f\"Number of memory cells are {memory_cells}\")"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "condition = Conditions.Dark.jungfrau(\n",
-    "    memory_cells=memory_cells,\n",
-    "    bias_voltage=bias_voltage,\n",
-    "    integration_time=integration_time,\n",
-    "    gain_setting=gain_setting,\n",
-    "    gain_mode=gain_mode,\n",
-    ")\n",
-    "\n",
-    "def get_constants_for_module(mod: str):\n",
-    "    \"\"\"Get calibration constants for given module for Jungfrau.\"\"\"\n",
-    "    retrieval_function = partial(\n",
-    "        get_from_db,\n",
-    "        karabo_id=karabo_id,\n",
-    "        karabo_da=mod,\n",
-    "        cal_db_interface=cal_db_interface,\n",
-    "        creation_time=creation_time,\n",
-    "        timeout=cal_db_timeout,\n",
-    "        verbosity=0,\n",
-    "        meta_only=True,\n",
-    "        load_data=False,\n",
-    "        empty_constant=None\n",
-    "    )\n",
-    "\n",
-    "    mdata_dict = dict()\n",
-    "    mdata_dict[\"constants\"] = dict()\n",
-    "    constants = [\n",
-    "        \"Offset\", \"BadPixelsDark\",\n",
-    "        \"RelativeGain\", \"BadPixelsFF\",\n",
-    "    ]\n",
-    "    for cname in constants:\n",
-    "        mdata_dict[\"constants\"][cname] = dict()\n",
-    "        if not relative_gain and cname in [\"BadPixelsFF\", \"RelativeGain\"]:\n",
-    "            continue\n",
-    "        _, mdata = retrieval_function(\n",
-    "            condition=condition,\n",
-    "            constant=getattr(Constants.jungfrau, cname)(),\n",
-    "        )\n",
-    "        mdata_const = mdata.calibration_constant_version\n",
-    "        const_mdata = mdata_dict[\"constants\"][cname]\n",
-    "        # check if constant was successfully retrieved.\n",
-    "        if mdata.comm_db_success:\n",
-    "            const_mdata[\"file-path\"] = (\n",
-    "                f\"{mdata_const.hdf5path}\" f\"{mdata_const.filename}\"\n",
-    "            )\n",
-    "            const_mdata[\"dataset-name\"] = mdata_const.h5path\n",
-    "            const_mdata[\"creation-time\"] = f\"{mdata_const.begin_at}\"\n",
-    "            mdata_dict[\"physical-detector-unit\"] = mdata_const.device_name\n",
-    "        else:\n",
-    "            const_mdata[\"file-path\"] = None\n",
-    "            const_mdata[\"creation-time\"] = None\n",
-    "    return mdata_dict, mod"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# Constant paths are saved under retrieved-constants in calibration_metadata.yml\n",
-    "retrieved_constants = metadata.setdefault(\"retrieved-constants\", {})\n",
-    "# Avoid retrieving constants for available modules in calibration_metadata.yml\n",
-    "# This is used during reproducability.\n",
-    "query_karabo_da = []\n",
-    "for mod in karabo_da:\n",
-    "    if mod in retrieved_constants.keys():\n",
-    "        print(f\"Constant for {mod} already in \"\n",
-    "              \"calibration_metadata.yml, won't query again.\")\n",
-    "        continue\n",
-    "    query_karabo_da.append(mod)\n",
-    "\n",
-    "with multiprocessing.Pool() as pool:\n",
-    "    results = pool.map(get_constants_for_module, query_karabo_da)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "timestamps = dict()\n",
-    "for md_dict, mod in results:\n",
-    "    retrieved_constants[mod] = md_dict\n",
-    "\n",
-    "    module_timestamps = timestamps[mod] = dict()\n",
-    "    module_constants = retrieved_constants[mod]\n",
-    "\n",
-    "    print(f\"Module: {mod}:\")\n",
-    "    for cname, mdata in module_constants[\"constants\"].items():\n",
-    "        if hasattr(mdata[\"creation-time\"], 'strftime'):\n",
-    "            mdata[\"creation-time\"] = mdata[\"creation-time\"].strftime('%y-%m-%d %H:%M')\n",
-    "        print(f'{cname:.<12s}', mdata[\"creation-time\"])\n",
-    "\n",
-    "    for cname in [\"Offset\", \"BadPixelsDark\", \"RelativeGain\", \"BadPixelsFF\"]:\n",
-    "        if cname in module_constants[\"constants\"]:\n",
-    "            module_timestamps[cname] = module_constants[\"constants\"][cname][\"creation-time\"]\n",
-    "        else:\n",
-    "            module_timestamps[cname] = \"NA\"\n",
-    "\n",
-    "time_summary = retrieved_constants.setdefault(\"time-summary\", {})\n",
-    "time_summary = timestamps\n",
-    "\n",
-    "metadata.save()"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3.8.11 ('.cal4_venv')",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.8.11"
-  },
-  "orig_nbformat": 4,
-  "vscode": {
-   "interpreter": {
-    "hash": "ccde353e8822f411c1c49844e1cbe3edf63293a69efd975d1b44f5e852832668"
-   }
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 2
-}
diff --git a/notebooks/LPD/Characterize_LPD_GAIN_CI_per_pixel_NBC.ipynb b/notebooks/LPD/Characterize_LPD_GAIN_CI_per_pixel_NBC.ipynb
index 0cdc02eb4d01c4375a0156c4ca168c9d9a390cdd..d406668eae5a4e79685333a43f8ce7bd61958b54 100644
--- a/notebooks/LPD/Characterize_LPD_GAIN_CI_per_pixel_NBC.ipynb
+++ b/notebooks/LPD/Characterize_LPD_GAIN_CI_per_pixel_NBC.ipynb
@@ -40,7 +40,7 @@
     "db_output = False # output constants to database\n",
     "db_input = True\n",
     "bias_voltage = 300 # detector bias voltage\n",
-    "cal_db_interface = \"tcp://max-exfl016:8015\" # the database interface to use\n",
+    "cal_db_interface = \"tcp://max-exfl-cal001:8015\" # the database interface to use\n",
     "\n",
     "use_dir_creation_date = True # use the creation date of the directory for database time derivation\n",
     "instrument = \"FXE\"\n",
diff --git a/notebooks/LPD/Inject_calibration_constants_from_h5files.ipynb b/notebooks/LPD/Inject_calibration_constants_from_h5files.ipynb
index 463bf868cf6884a092f72407d700864ef8dbb4cf..e3da5863595eb1de5a0f19dcf08c35a96fb70705 100644
--- a/notebooks/LPD/Inject_calibration_constants_from_h5files.ipynb
+++ b/notebooks/LPD/Inject_calibration_constants_from_h5files.ipynb
@@ -29,7 +29,7 @@
     "karabo_da = [\"all\"]  # karabo data aggregators. default \"all\" for all 16 karabo data aggregator names.\n",
     "\n",
     "# calibration database parameters:\n",
-    "cal_db_interface = \"tcp://max-exfl016:8015#8045\"  # calibration DB zmq address.\n",
+    "cal_db_interface = \"tcp://max-exfl-cal001:8015#8045\"  # calibration DB zmq address.\n",
     "\n",
     "# calibration constant conditions:\n",
     "memory_cells = 512  # Number of memory cells. Used for constant conditions.\n",
diff --git a/notebooks/LPD/LPDChar_Darks_NBC.ipynb b/notebooks/LPD/LPDChar_Darks_NBC.ipynb
index c9508aa5c5d43a540dec87f08045217e229a7ed0..de76f651d6b3f713b3e96102a8dd793eb850a3d9 100644
--- a/notebooks/LPD/LPDChar_Darks_NBC.ipynb
+++ b/notebooks/LPD/LPDChar_Darks_NBC.ipynb
@@ -22,24 +22,20 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "in_folder = \"/gpfs/exfel/exp/FXE/202030/p900121/raw\" # path to input data, required\n",
-    "out_folder = \"/gpfs/exfel/data/scratch/ahmedk/test/LPD/\" # path to output to, required\n",
+    "in_folder = \"/gpfs/exfel/exp/FXE/202304/p003338/raw\" # path to input data, required\n",
+    "out_folder = \"/gpfs/exfel/data/scratch/kluyvert/lpd-darks-p3338-r133-134-135/\" # path to output to, required\n",
     "metadata_folder = \"\"  # Directory containing calibration_metadata.yml when run by xfel-calibrate\n",
-    "sequence = 0 # sequence files to evaluate\n",
     "modules = [-1] # list of modules to evaluate, RANGE ALLOWED\n",
-    "run_high = 120 # run number in which high gain data was recorded, required\n",
-    "run_med = 121 # run number in which medium gain data was recorded, required\n",
-    "run_low = 122 # run number in which low gain data was recorded, required\n",
+    "run_high = 133 # run number in which high gain data was recorded, required\n",
+    "run_med = 134 # run number in which medium gain data was recorded, required\n",
+    "run_low = 135 # run number in which low gain data was recorded, required\n",
     "\n",
     "karabo_id = \"FXE_DET_LPD1M-1\" # karabo karabo_id\n",
     "karabo_da = ['-1']  # a list of data aggregators names, Default [-1] for selecting all data aggregators\n",
-    "receiver_id = \"{}CH0\" # inset for receiver devices\n",
-    "path_template = 'RAW-R{:04d}-{}-S{:05d}.h5' # the template to use to access data\n",
-    "h5path = '/INSTRUMENT/{}/DET/{}:xtdf/image' # path in the HDF5 file to images\n",
-    "h5path_idx = '/INDEX/{}/DET/{}:xtdf/image' # path in the HDF5 file to images\n",
+    "source_name = \"{}/DET/{}CH0:xtdf\"  # Source name for raw detector data - filled with karabo_id & module number\n",
     "\n",
     "use_dir_creation_date = True # use the creation date of the directory for database time derivation\n",
-    "cal_db_interface = \"tcp://max-exfl016:8015#8025\" # the database interface to use\n",
+    "cal_db_interface = \"tcp://max-exfl-cal001:8015#8025\" # the database interface to use\n",
     "cal_db_timeout = 300000 # timeout on caldb requests\"\n",
     "local_output = True # output constants locally\n",
     "db_output = False # output constants to database\n",
@@ -51,16 +47,17 @@
     "thresholds_offset_hard = [400, 1500] # bad pixel hard threshold\n",
     "thresholds_noise_sigma = 7. # bad pixel relative threshold in terms of n sigma noise\n",
     "thresholds_noise_hard = [1, 35] # bad pixel hard threshold\n",
-    "skip_first_ntrains = 10 # Number of first trains to skip\n",
+    "\n",
+    "ntrains = 500  # maximum number of trains to use in each gain stage\n",
+    "skip_first_ntrains = 10  # Number of first trains to skip\n",
+    "min_trains = 370  # minimum number of trains needed for each gain stage\n",
     "\n",
     "# Parameters for plotting\n",
     "skip_plots = False  # exit after writing corrected files\n",
     "\n",
-    "instrument = \"FXE\" # instrument name\n",
-    "ntrains = 100 # number of trains to use\n",
     "high_res_badpix_3d = False # plot bad-pixel summary in high resolution\n",
     "test_for_normality = False # permorm normality test\n",
-    "inject_cell_order = False  # Include memory cell order as part of the detector condition\n",
+    "inject_cell_order = 'auto'  # Include memory cell order as part of the detector condition: auto/always/never\n",
     "operation_mode = ''  # Detector operation mode, optional"
    ]
   },
@@ -72,17 +69,15 @@
    "source": [
     "import copy\n",
     "import multiprocessing\n",
-    "import os\n",
     "import warnings\n",
-    "from collections import OrderedDict\n",
     "from datetime import datetime\n",
     "from functools import partial\n",
     "from logging import warning\n",
+    "from pathlib import Path\n",
     "\n",
     "warnings.filterwarnings('ignore')\n",
     "\n",
     "import dateutil.parser\n",
-    "import h5py\n",
     "import matplotlib\n",
     "import pasha as psh\n",
     "import scipy.stats\n",
@@ -99,8 +94,10 @@
     "from iCalibrationDB import Conditions, Constants, Detectors, Versions\n",
     "from XFELDetAna.plotting.heatmap import heatmapPlot\n",
     "from XFELDetAna.plotting.simpleplot import simplePlot\n",
+    "from extra_data import RunDirectory\n",
     "\n",
     "from cal_tools.enums import BadPixels\n",
+    "from cal_tools.lpdlib import make_cell_order_condition\n",
     "from cal_tools.plotting import (\n",
     "    create_constant_overview,\n",
     "    plot_badpix_3d,\n",
@@ -117,6 +114,7 @@
     "    map_gain_stages,\n",
     "    module_index_to_qm,\n",
     "    parse_runs,\n",
+    "    reorder_axes,\n",
     "    run_prop_seq_from_path,\n",
     "    save_const_to_h5,\n",
     "    send_to_db,\n",
@@ -141,27 +139,16 @@
     "else:\n",
     "    modules = [int(x[-2:]) for x in karabo_da]\n",
     "\n",
-    "gain_runs = OrderedDict()\n",
-    "if capacitor_setting == 5:\n",
-    "    gain_runs[\"high_5pf\"] = run_high\n",
-    "    gain_runs[\"med_5pf\"] =  run_med\n",
-    "    gain_runs[\"low_5pf\"] =  run_low\n",
-    "elif capacitor_setting == 50:\n",
-    "    gain_runs[\"high_50pf\"] = run_high\n",
-    "    gain_runs[\"med_50pf\"] =  run_med\n",
-    "    gain_runs[\"low_50pf\"] =  run_low\n",
-    "\n",
-    "capacitor_settings = [capacitor_setting]\n",
-    "capacitor_settings = ['{}pf'.format(c) for c in capacitor_settings]\n",
-    "\n",
-    "h5path = h5path.format(karabo_id, receiver_id)\n",
-    "h5path_idx = h5path_idx.format(karabo_id, receiver_id)\n",
+    "capacitor_setting_s = f'{capacitor_setting}pf'\n",
     "\n",
     "creation_time = None\n",
     "if use_dir_creation_date:\n",
     "    creation_time = get_dir_creation_date(in_folder, run_high)\n",
     "    print(\"Using {} as creation time\".format(creation_time))\n",
     "\n",
+    "if inject_cell_order not in {'auto', 'always', 'never'}:\n",
+    "    raise ValueError(\"inject_cell_order must be auto/always/never\")\n",
+    "\n",
     "run, prop, seq = run_prop_seq_from_path(in_folder)\n",
     "\n",
     "cal_db_interface = get_random_db_interface(cal_db_interface)\n",
@@ -171,23 +158,11 @@
     "print(\"Proposal: {}\".format(prop))\n",
     "print(\"Memory cells: {}/{}\".format(mem_cells, max_cells))\n",
     "print(\"Runs: {}, {}, {}\".format(run_high, run_med, run_low))\n",
-    "print(\"Sequence: {}\".format(sequence))\n",
     "print(\"Using DB: {}\".format(db_output))\n",
     "print(\"Input: {}\".format(in_folder))\n",
     "print(\"Output: {}\".format(out_folder))\n",
-    "print(\"Bias voltage: {}V\".format(bias_voltage))"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# set everything up filewise\n",
-    "gmf = map_gain_stages(in_folder, gain_runs, path_template, karabo_da, [sequence])\n",
-    "gain_mapped_files, total_sequences, total_file_size = gmf\n",
-    "print(f\"Will process a total of {total_sequences} files.\")"
+    "print(\"Bias voltage: {}V\".format(bias_voltage))\n",
+    "print(f\"Capacitor setting: {capacitor_setting_s}\")"
    ]
   },
   {
@@ -207,41 +182,39 @@
     "parallel_num_threads = multiprocessing.cpu_count() // parallel_num_procs\n",
     "\n",
     "# the actual characterization\n",
-    "def characterize_module(filename, channel, gg, cap):\n",
-    "\n",
-    "    def splitOffGainLPD(d):\n",
-    "        msk = np.zeros(d.shape, np.uint16)\n",
-    "        msk[...] = 0b0000111111111111\n",
-    "        data = np.bitwise_and(d, msk)\n",
-    "        msk[...] = 0b0011000000000000\n",
-    "        gain = np.bitwise_and(d, msk)//4096\n",
-    "        gain[gain > 2] = 2\n",
-    "        return data, gain\n",
-    "\n",
-    "    infile = h5py.File(filename, \"r\")\n",
+    "def characterize_module(run_path, channel, gg):\n",
+    "    run = RunDirectory(run_path, parallelize=False)\n",
+    "    det_source = source_name.format(karabo_id, channel)\n",
+    "    data = run[det_source, 'image.data'].drop_empty_trains()\n",
+    "    data = data[skip_first_ntrains : skip_first_ntrains + ntrains]\n",
+    "    cell_ids = run[det_source, 'image.cellId'].drop_empty_trains()\n",
+    "    cell_ids = cell_ids[skip_first_ntrains : skip_first_ntrains + ntrains]\n",
+    "\n",
+    "    # If there is no data available, return and expect this\n",
+    "    # module to be skipped later.\n",
+    "    if len(data.train_ids) == 0:\n",
+    "        return None, None, None, None, None, None, None, None\n",
+    "    elif len(data.train_ids) < min_trains:\n",
+    "        raise Exception(f\"Run {run_path} only contains {len(data.train_ids)} trains, but {min_trains} required\")\n",
+    "\n",
+    "    im = data.ndarray()\n",
+    "    if im.ndim > 3:\n",
+    "        im = im[:, 0]  # Drop extra dimension\n",
     "    \n",
-    "    instrument_src = h5path.format(channel)\n",
-    "    index_src = h5path_idx.format(channel)\n",
-    "    count = infile[f\"{index_src}/count\"][()]\n",
-    "    first = infile[f\"{index_src}/first\"][()]\n",
-    "    valid = count != 0\n",
-    "    count, first = count[valid], first[valid]\n",
-    "    first_image = int(first[skip_first_ntrains] if first.shape[0] > skip_first_ntrains else 0)\n",
-    "    last_image = int(first_image + np.sum(count[skip_first_ntrains:skip_first_ntrains+ntrains]))\n",
-    "\n",
-    "    im = np.array(infile[\"{}/data\".format(instrument_src, channel)][first_image:last_image, ...])\n",
-    "    cellid = np.squeeze(np.array(infile[\"{}/cellId\".format(instrument_src, channel)][first_image:last_image, ...]))\n",
-    "    infile.close()\n",
-    "    if im.shape[0] == 0:  # No data\n",
-    "        return None, None, channel, gg, cap, None, None, None, None\n",
-    "\n",
-    "    cellid_pattern = cellid[:count[0]]\n",
-    "\n",
-    "    im, g = splitOffGainLPD(im[:, 0, ...])\n",
-    "    im = im.astype(np.float32)\n",
+    "    cellid = cell_ids.ndarray()\n",
+    "    cellid_pattern = cell_ids[0].ndarray()\n",
+    "    if cellid.ndim > 1:\n",
+    "        cellid = cellid[:, 0]\n",
+    "        cellid_pattern = cellid_pattern[:, 0]\n",
     "\n",
-    "    im = np.rollaxis(im, 2)\n",
-    "    im = np.rollaxis(im, 2, 1)\n",
+    "    # Mask off gain bits, leaving only data\n",
+    "    im &= 0b0000111111111111\n",
+    "\n",
+    "    im = im.astype(np.float32)\n",
+    "    im = reorder_axes(im,\n",
+    "        from_order=('frames', 'slow_scan', 'fast_scan'),\n",
+    "        to_order=('fast_scan', 'slow_scan', 'frames'),\n",
+    "    )\n",
     "\n",
     "    context = psh.context.ThreadContext(num_workers=parallel_num_threads)\n",
     "    offset = context.alloc(shape=(im.shape[0], im.shape[1], max_cells), dtype=np.float64)\n",
@@ -281,80 +254,63 @@
     "    bp[~np.isfinite(noise)] |= BadPixels.OFFSET_NOISE_EVAL_ERROR.value\n",
     "\n",
     "    idx = (cellid == cellid[0])\n",
-    "    return offset, noise, channel, gg, cap, bp, im[12, 12, idx], normal_test, cellid_pattern"
+    "    return offset, noise, channel, gg, bp, im[12, 12, idx], normal_test, cellid_pattern"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "scrolled": false
+   },
    "outputs": [],
    "source": [
-    "offset_g = OrderedDict()\n",
-    "noise_g = OrderedDict()\n",
-    "badpix_g = OrderedDict()\n",
-    "data_g = OrderedDict()\n",
-    "ntest_g = OrderedDict()\n",
+    "offset_g = {}\n",
+    "noise_g = {}\n",
+    "badpix_g = {}\n",
+    "data_g = {}\n",
+    "ntest_g = {}\n",
     "# Should be the same cell order for all modules & all gain stages\n",
     "cellid_patterns_g = {}\n",
     "\n",
-    "gg = 0\n",
-    "old_cap = None\n",
-    "start = datetime.now()\n",
-    "inp = []\n",
-    "    \n",
-    "for gain, mapped_files in gain_mapped_files.items():\n",
-    "    cap = gain.split(\"_\")[1]\n",
-    "    if cap != old_cap:\n",
-    "        gg = 0\n",
-    "        old_cap = cap\n",
-    "        offset_g[cap] = OrderedDict()\n",
-    "        noise_g[cap] = OrderedDict()\n",
-    "        badpix_g[cap] = OrderedDict()\n",
-    "        data_g[cap] = OrderedDict()\n",
-    "        ntest_g[cap] = OrderedDict()\n",
-    "        cellid_patterns_g[cap] = {}\n",
     "\n",
+    "inp = []\n",
+    "for gg, run_num in enumerate([run_high, run_med, run_low]):\n",
+    "    run_path = Path(in_folder, f\"r{run_num:04d}\")\n",
     "    for i in modules:\n",
-    "        qm = module_index_to_qm(i)\n",
-    "        if qm in mapped_files and not mapped_files[qm].empty():\n",
-    "            fname_in = mapped_files[qm].get()\n",
-    "            print(\"Process file: \", fname_in)\n",
-    "            inp.append((fname_in, i, gg, cap))\n",
-    "\n",
-    "    gg+=1\n",
+    "        inp.append((run_path, i, gg))\n",
     "\n",
     "with multiprocessing.Pool(processes=parallel_num_procs) as pool:\n",
     "    results = pool.starmap(characterize_module, inp)\n",
     "\n",
     "for ir, r in enumerate(results):\n",
-    "    offset, noise, i, gg, cap, bp, data, normal, cellid_pattern = r\n",
+    "    offset, noise, i, gg, bp, data, normal, cellid_pattern = r\n",
     "    if data is None:\n",
     "        warning(f\"No data for module {i} of gain {gg}\")\n",
     "        skip_plots = True\n",
     "        continue\n",
     "    qm = module_index_to_qm(i)\n",
-    "    if qm not in offset_g[cap]:\n",
-    "        offset_g[cap][qm] = np.zeros(\n",
-    "            (offset.shape[0], offset.shape[1], offset.shape[2], 3))\n",
-    "        noise_g[cap][qm] = np.zeros_like(offset_g[cap][qm])\n",
-    "        badpix_g[cap][qm] = np.zeros_like(offset_g[cap][qm], dtype=np.uint32)\n",
-    "        data_g[cap][qm] = np.full((ntrains, 3), np.nan)\n",
-    "        ntest_g[cap][qm] = np.zeros_like(offset_g[cap][qm])\n",
-    "        cellid_patterns_g[cap][qm] = cellid_pattern\n",
+    "    if qm not in offset_g:\n",
+    "        offset_g[qm] = np.zeros(offset.shape[:3] + (3,))\n",
+    "        print(\"Constant shape:\", offset_g[qm].shape)\n",
+    "        noise_g[qm] = np.zeros_like(offset_g[qm])\n",
+    "        badpix_g[qm] = np.zeros_like(offset_g[qm], dtype=np.uint32)\n",
+    "        data_g[qm] = np.full((ntrains, 3), np.nan)\n",
+    "        ntest_g[qm] = np.zeros_like(offset_g[qm])\n",
+    "        cellid_patterns_g[qm] = cellid_pattern\n",
     "    else:\n",
-    "        if not np.array_equal(cellid_pattern, cellid_patterns_g[cap][qm]):\n",
+    "        if not np.array_equal(cellid_pattern, cellid_patterns_g[qm]):\n",
     "            raise ValueError(\"Inconsistent cell ID pattern between gain stages\")\n",
     "            \n",
     "\n",
-    "    offset_g[cap][qm][..., gg] = offset\n",
-    "    noise_g[cap][qm][..., gg] = noise\n",
-    "    badpix_g[cap][qm][..., gg] = bp\n",
-    "    data_g[cap][qm][:data.shape[0], gg] = data\n",
-    "    ntest_g[cap][qm][..., gg] = normal\n",
+    "    offset_g[qm][..., gg] = offset\n",
+    "    noise_g[qm][..., gg] = noise\n",
+    "    badpix_g[qm][..., gg] = bp\n",
+    "    data_g[qm][:data.shape[0], gg] = data\n",
+    "    ntest_g[qm][..., gg] = normal\n",
     "\n",
     "    hn, cn = np.histogram(data, bins=20)\n",
-    "    print(f\"{gain_names[gg]} gain, Capacitor {cap}, Module: {qm}. \"\n",
+    "    print(f\"{gain_names[gg]} gain, Module: {qm}. \"\n",
     "          f\"Number of processed trains per cell: {data.shape[0]}.\")"
    ]
   },
@@ -391,7 +347,7 @@
    "source": [
     "# TODO: add db_module when received from myMDC\n",
     "# Create the modules dict of karabo_das and PDUs\n",
-    "qm_dict = OrderedDict()\n",
+    "qm_dict = {}\n",
     "for i, k_da in zip(modules, karabo_da):\n",
     "    qm = module_index_to_qm(i)\n",
     "    qm_dict[qm] = {\"karabo_da\": k_da,\n",
@@ -406,76 +362,71 @@
    "source": [
     "# Retrieve existing constants for comparison\n",
     "clist = [\"Offset\", \"Noise\", \"BadPixelsDark\"]\n",
-    "old_const = {}\n",
-    "old_mdata = {}\n",
     "\n",
     "dinstance = \"LPD1M1\"\n",
     "detinst = getattr(Detectors, dinstance)\n",
     "print('Retrieve pre-existing constants for comparison.')\n",
-    "for cap in capacitor_settings:\n",
-    "    old_const[cap] = {}\n",
-    "    old_mdata[cap] = {}\n",
-    "    for qm in offset_g[cap].keys():\n",
-    "        old_const[cap][qm] = {}\n",
-    "        old_mdata[cap][qm] = {}\n",
-    "        qm_db = qm_dict[qm]\n",
-    "        karabo_da = qm_db[\"karabo_da\"]\n",
-    "        cellid_pattern = cellid_patterns_g[cap][qm]\n",
-    "        if inject_cell_order:\n",
-    "            mem_cell_order = \",\".join([str(c) for c in cellid_pattern]) + \",\"\n",
-    "        else:\n",
-    "            mem_cell_order = None\n",
     "\n",
-    "        condition = Conditions.Dark.LPD(memory_cells=max_cells,\n",
-    "                                        bias_voltage=bias_voltage,\n",
-    "                                        capacitor=cap,\n",
-    "                                        memory_cell_order=mem_cell_order,\n",
-    "                                       )\n",
-    "        for const in clist:\n",
-    "            constant = getattr(Constants.LPD, const)()\n",
-    "            if not qm_db[\"db_module\"]:\n",
-    "                # This should be used in case of running notebook\n",
-    "                # by a different method other than myMDC which already\n",
-    "                # sends CalCat info.\n",
-    "                qm_db[\"db_module\"] = get_pdu_from_db(karabo_id, [karabo_da], constant,\n",
-    "                                                     condition, cal_db_interface,\n",
-    "                                                     snapshot_at=creation_time)[0]\n",
-    "\n",
-    "            data, mdata = get_from_db(karabo_id, karabo_da,\n",
-    "                                      constant,\n",
-    "                                      condition, None,\n",
-    "                                      cal_db_interface,\n",
-    "                                      creation_time=creation_time,\n",
-    "                                      verbosity=2, timeout=cal_db_timeout)\n",
-    "\n",
-    "            old_const[cap][qm][const] = data\n",
-    "\n",
-    "            if mdata is None or data is None:\n",
-    "                old_mdata[cap][qm][const] = {\n",
-    "                    \"timestamp\": \"Not found\",\n",
-    "                    \"filepath\": None,\n",
-    "                    \"h5path\": None\n",
-    "                }\n",
-    "            else:\n",
-    "                timestamp = mdata.calibration_constant_version.begin_at.isoformat()\n",
-    "                filepath = os.path.join(\n",
-    "                    mdata.calibration_constant_version.hdf5path,\n",
-    "                    mdata.calibration_constant_version.filename\n",
-    "                )\n",
-    "                h5path = mdata.calibration_constant_version.h5path\n",
-    "                old_mdata[cap][qm][const] = {\n",
-    "                    \"timestamp\": timestamp,\n",
-    "                    \"filepath\": filepath,\n",
-    "                    \"h5path\": h5path\n",
-    "                }\n",
-    "\n",
-    "        with open(f\"{out_folder}/module_metadata_{qm}.yml\",\"w\") as fd:\n",
-    "            yaml.safe_dump(\n",
-    "                {\n",
-    "                    \"module\": qm,\n",
-    "                    \"pdu\": qm_db[\"db_module\"],\n",
-    "                    \"old-constants\": old_mdata[cap][qm]\n",
-    "                }, fd)"
+    "old_const = {}\n",
+    "old_mdata = {}\n",
+    "for qm in offset_g.keys():\n",
+    "    old_const[qm] = {}\n",
+    "    old_mdata[qm] = {}\n",
+    "    qm_db = qm_dict[qm]\n",
+    "    karabo_da = qm_db[\"karabo_da\"]\n",
+    "    cellid_pattern = cellid_patterns_g[qm]\n",
+    "\n",
+    "    condition = Conditions.Dark.LPD(\n",
+    "        memory_cells=max_cells,\n",
+    "        bias_voltage=bias_voltage,\n",
+    "        capacitor=capacitor_setting_s,\n",
+    "        memory_cell_order=make_cell_order_condition(inject_cell_order, cellid_pattern),\n",
+    "    )\n",
+    "    for const in clist:\n",
+    "        constant = getattr(Constants.LPD, const)()\n",
+    "        if not qm_db[\"db_module\"]:\n",
+    "            # This should be used in case of running notebook\n",
+    "            # by a different method other than myMDC which already\n",
+    "            # sends CalCat info.\n",
+    "            qm_db[\"db_module\"] = get_pdu_from_db(karabo_id, [karabo_da], constant,\n",
+    "                                                 condition, cal_db_interface,\n",
+    "                                                 snapshot_at=creation_time)[0]\n",
+    "\n",
+    "        data, mdata = get_from_db(karabo_id, karabo_da,\n",
+    "                                  constant,\n",
+    "                                  condition, None,\n",
+    "                                  cal_db_interface,\n",
+    "                                  creation_time=creation_time,\n",
+    "                                  verbosity=2, timeout=cal_db_timeout)\n",
+    "\n",
+    "        old_const[qm][const] = data\n",
+    "\n",
+    "        if mdata is None or data is None:\n",
+    "            old_mdata[qm][const] = {\n",
+    "                \"timestamp\": \"Not found\",\n",
+    "                \"filepath\": None,\n",
+    "                \"h5path\": None\n",
+    "            }\n",
+    "        else:\n",
+    "            timestamp = mdata.calibration_constant_version.begin_at.isoformat()\n",
+    "            filepath = Path(\n",
+    "                mdata.calibration_constant_version.hdf5path,\n",
+    "                mdata.calibration_constant_version.filename\n",
+    "            )\n",
+    "            h5path = mdata.calibration_constant_version.h5path\n",
+    "            old_mdata[qm][const] = {\n",
+    "                \"timestamp\": timestamp,\n",
+    "                \"filepath\": str(filepath),\n",
+    "                \"h5path\": h5path\n",
+    "            }\n",
+    "\n",
+    "    with open(f\"{out_folder}/module_metadata_{qm}.yml\",\"w\") as fd:\n",
+    "        yaml.safe_dump(\n",
+    "            {\n",
+    "                \"module\": qm,\n",
+    "                \"pdu\": qm_db[\"db_module\"],\n",
+    "                \"old-constants\": old_mdata[qm]\n",
+    "            }, fd)"
    ]
   },
   {
@@ -484,16 +435,14 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "res = OrderedDict()\n",
-    "for cap in capacitor_settings:\n",
-    "    res[cap] = OrderedDict()\n",
-    "    for i in modules:\n",
-    "        qm = module_index_to_qm(i)\n",
+    "res = {}\n",
+    "for i in modules:\n",
+    "    qm = module_index_to_qm(i)\n",
     "\n",
-    "        res[cap][qm] = {'Offset': offset_g[cap][qm],\n",
-    "                        'Noise': noise_g[cap][qm],\n",
-    "                        'BadPixelsDark': badpix_g[cap][qm]\n",
-    "                        }"
+    "    res[qm] = {'Offset': offset_g[qm],\n",
+    "               'Noise': noise_g[qm],\n",
+    "               'BadPixelsDark': badpix_g[qm]\n",
+    "              }"
    ]
   },
   {
@@ -504,54 +453,53 @@
    "source": [
     "# Save constants in the calibration DB\n",
     "md = None\n",
-    "for cap in capacitor_settings:\n",
-    "    for qm in res[cap]:\n",
-    "\n",
-    "        karabo_da = qm_dict[qm][\"karabo_da\"]\n",
-    "        db_module = qm_dict[qm][\"db_module\"]\n",
-    "        cellid_pattern = cellid_patterns_g[cap][qm]\n",
-    "        if inject_cell_order:\n",
-    "            mem_cell_order = \",\".join([str(c) for c in cellid_pattern]) + \",\"\n",
-    "        else:\n",
-    "            mem_cell_order = None\n",
-    "\n",
-    "        # Do not store empty constants\n",
-    "        # In case of 0 trains data_g is initiated with nans and never refilled.\n",
-    "        if np.count_nonzero(~np.isnan(data_g[cap][qm]))==0:\n",
-    "            print(f\"Constant ({cap}, {qm}) would be empty, skipping saving\")\n",
-    "            continue\n",
-    "\n",
-    "        for const in res[cap][qm]:\n",
-    "\n",
-    "            dconst = getattr(Constants.LPD, const)()\n",
-    "            dconst.data = res[cap][qm][const]\n",
-    "\n",
-    "            # set the operating condition\n",
-    "\n",
-    "            condition = Conditions.Dark.LPD(memory_cells=max_cells,\n",
-    "                                        bias_voltage=bias_voltage,\n",
-    "                                        capacitor=cap,\n",
-    "                                        memory_cell_order=mem_cell_order,\n",
-    "                                       )\n",
-    "\n",
-    "            if db_output:\n",
-    "                md = send_to_db(db_module, karabo_id, dconst, condition,\n",
-    "                                file_loc, report_path=report,\n",
-    "                                cal_db_interface=cal_db_interface,\n",
-    "                                creation_time=creation_time,\n",
-    "                                timeout=cal_db_timeout)\n",
-    "\n",
-    "            if local_output:\n",
-    "                md = save_const_to_h5(db_module, karabo_id, dconst, condition,\n",
-    "                                      dconst.data, file_loc, report, creation_time, out_folder)\n",
-    "                print(f\"Calibration constant {const} is stored locally.\\n\")\n",
-    "\n",
-    "        print(\"Constants parameter conditions are:\\n\")\n",
-    "        print(f\"• memory_cells: {max_cells}\\n\"\n",
-    "              f\"• bias_voltage: {bias_voltage}\\n\"\n",
-    "              f\"• capacitor: {cap}\\n\"\n",
-    "              f\"• memory cell order: {mem_cell_order}\\n\"\n",
-    "              f\"• creation_time: {md.calibration_constant_version.begin_at if md is not None else creation_time}\\n\")"
+    "\n",
+    "for qm in res:\n",
+    "\n",
+    "    karabo_da = qm_dict[qm][\"karabo_da\"]\n",
+    "    db_module = qm_dict[qm][\"db_module\"]\n",
+    "    mem_cell_order = make_cell_order_condition(\n",
+    "        inject_cell_order, cellid_patterns_g[qm]\n",
+    "    )\n",
+    "    print(\"Memory cell order:\", mem_cell_order)\n",
+    "\n",
+    "    # Do not store empty constants\n",
+    "    # In case of 0 trains data_g is initiated with nans and never refilled.\n",
+    "    if np.count_nonzero(~np.isnan(data_g[qm]))==0:\n",
+    "        print(f\"Constant ({qm}) would be empty, skipping saving\")\n",
+    "        continue\n",
+    "\n",
+    "    for const in res[qm]:\n",
+    "\n",
+    "        dconst = getattr(Constants.LPD, const)()\n",
+    "        dconst.data = res[qm][const]\n",
+    "\n",
+    "        # set the operating condition\n",
+    "\n",
+    "        condition = Conditions.Dark.LPD(memory_cells=max_cells,\n",
+    "                                    bias_voltage=bias_voltage,\n",
+    "                                    capacitor=capacitor_setting_s,\n",
+    "                                    memory_cell_order=mem_cell_order,\n",
+    "                                   )\n",
+    "\n",
+    "        if db_output:\n",
+    "            md = send_to_db(db_module, karabo_id, dconst, condition,\n",
+    "                            file_loc, report_path=report,\n",
+    "                            cal_db_interface=cal_db_interface,\n",
+    "                            creation_time=creation_time,\n",
+    "                            timeout=cal_db_timeout)\n",
+    "\n",
+    "        if local_output:\n",
+    "            md = save_const_to_h5(db_module, karabo_id, dconst, condition,\n",
+    "                                  dconst.data, file_loc, report, creation_time, out_folder)\n",
+    "            print(f\"Calibration constant {const} is stored locally.\\n\")\n",
+    "\n",
+    "    print(\"Constants parameter conditions are:\\n\")\n",
+    "    print(f\"• memory_cells: {max_cells}\\n\"\n",
+    "          f\"• bias_voltage: {bias_voltage}\\n\"\n",
+    "          f\"• capacitor: {capacitor_setting_s}\\n\"\n",
+    "          f\"• memory cell order: {mem_cell_order}\\n\"\n",
+    "          f\"• creation_time: {md.calibration_constant_version.begin_at if md is not None else creation_time}\\n\")"
    ]
   },
   {
@@ -586,77 +534,76 @@
     "fig, grid = plt.subplots(3, 1, sharex=\"col\", sharey=\"row\", figsize=(10, 7))\n",
     "fig.subplots_adjust(wspace=0, hspace=0)\n",
     "\n",
-    "for cap in capacitor_settings:\n",
-    "    for i in modules:\n",
-    "        qm = module_index_to_qm(i)\n",
-    "        if np.count_nonzero(~np.isnan(data_g[cap][qm])) == 0:\n",
-    "            break\n",
-    "        for gain in range(3):\n",
-    "            data = data_g[cap][qm][:, gain]\n",
-    "            offset = np.nanmedian(data)\n",
-    "            noise = np.nanstd(data)\n",
-    "            xrange = [np.nanmin(data_g[cap][qm]), np.nanmax(data_g[cap][qm])]\n",
-    "            if xrange[1] == xrange[0]:\n",
-    "                xrange = [0, xrange[0]+xrange[0]//2]\n",
-    "                nbins = data_g[cap][qm].shape[0]\n",
-    "            else:\n",
-    "                nbins = int(xrange[1] - xrange[0])\n",
-    "\n",
-    "            hn, cn = np.histogram(data, bins=nbins, range=xrange)\n",
-    "\n",
-    "            grid[gain].hist(data, range=xrange, bins=nbins)\n",
-    "            grid[gain].plot([offset-noise, offset-noise], [0, np.nanmax(hn)], \n",
-    "                            linewidth=1.5, color='red',\n",
-    "                            label='1 $\\sigma$ deviation')\n",
-    "            grid[gain].plot([offset+noise, offset+noise],\n",
-    "                            [0, np.nanmax(hn)], linewidth=1.5, color='red')\n",
-    "            grid[gain].plot([offset, offset], [0, 0],\n",
-    "                            linewidth=1.5, color='y', label='median')\n",
-    "\n",
-    "            grid[gain].plot([np.nanmedian(offset_g[cap][qm][:, :, 12, gain]), \n",
-    "                             np.nanmedian(offset_g[cap][qm][:, :, 12, gain])],\n",
-    "                            [0, np.nanmax(hn)], linewidth=1.5, color='green', \n",
-    "                            label='average over pixels')\n",
-    "\n",
-    "            grid[gain].set_xlim(xrange)\n",
-    "            grid[gain].set_ylim(0, np.nanmax(hn)*1.1)\n",
-    "            grid[gain].set_xlabel(\"Offset value [ADU]\")\n",
-    "            grid[gain].set_ylabel(\"# of occurance\")\n",
-    "\n",
-    "            if gain == 0:\n",
-    "                leg = grid[gain].legend(\n",
-    "                    loc='upper center', ncol=3, \n",
-    "                    bbox_to_anchor=(0.1, 0.25, 0.7, 1.0))\n",
-    "\n",
-    "            grid[gain].text(820, np.nanmax(hn)*0.4,\n",
-    "                            \"{} gain\".format(gain_names[gain]), fontsize=20)\n",
-    "\n",
-    "            a = plt.axes([.125, .1, 0.775, .8], frame_on=False)\n",
-    "            a.patch.set_alpha(0.05)\n",
-    "            a.set_xlim(xrange)\n",
-    "            plt.plot([offset, offset], [0, 1], linewidth=1.5, color='y')\n",
-    "            plt.xticks([])\n",
-    "            plt.yticks([])\n",
-    "\n",
-    "        ypos = 0.9\n",
-    "        x1pos = (np.nanmedian(data_g[cap][qm][:, 0]) +\n",
-    "                 np.nanmedian(data_g[cap][qm][:, 2]))/2.\n",
-    "        x2pos = (np.nanmedian(data_g[cap][qm][:, 2]) +\n",
-    "                 np.nanmedian(data_g[cap][qm][:, 1]))/2.-10\n",
-    "\n",
-    "        plt.annotate(\"\", xy=(np.nanmedian(data_g[cap][qm][:, 0]), ypos), xycoords='data',\n",
-    "                     xytext=(np.nanmedian(data_g[cap][qm][:, 2]), ypos), textcoords='data',\n",
-    "                     arrowprops=dict(arrowstyle=\"<->\", connectionstyle=\"arc3\"))\n",
-    "\n",
-    "        plt.annotate('{}'.format(np.nanmedian(data_g[cap][qm][:, 0])-np.nanmedian(data_g[cap][qm][:, 2])),\n",
-    "                     xy=(x1pos, ypos), xycoords='data', xytext=(5, 5), textcoords='offset points')\n",
-    "\n",
-    "        plt.annotate(\"\", xy=(np.nanmedian(data_g[cap][qm][:, 2]), ypos), xycoords='data',\n",
-    "                     xytext=(np.nanmedian(data_g[cap][qm][:, 1]), ypos), textcoords='data',\n",
-    "                     arrowprops=dict(arrowstyle=\"<->\", connectionstyle=\"arc3\"))\n",
-    "\n",
-    "        plt.annotate('{}'.format(np.nanmedian(data_g[cap][qm][:, 2])-np.nanmedian(data_g[cap][qm][:, 1])),\n",
-    "                     xy=(x2pos, ypos), xycoords='data', xytext=(5, 5), textcoords='offset points')\n",
+    "for i in modules:\n",
+    "    qm = module_index_to_qm(i)\n",
+    "    if np.count_nonzero(~np.isnan(data_g[qm])) == 0:\n",
+    "        break\n",
+    "    for gain in range(3):\n",
+    "        data = data_g[qm][:, gain]\n",
+    "        offset = np.nanmedian(data)\n",
+    "        noise = np.nanstd(data)\n",
+    "        xrange = [np.nanmin(data_g[qm]), np.nanmax(data_g[qm])]\n",
+    "        if xrange[1] == xrange[0]:\n",
+    "            xrange = [0, xrange[0]+xrange[0]//2]\n",
+    "            nbins = data_g[qm].shape[0]\n",
+    "        else:\n",
+    "            nbins = int(xrange[1] - xrange[0])\n",
+    "\n",
+    "        hn, cn = np.histogram(data, bins=nbins, range=xrange)\n",
+    "\n",
+    "        grid[gain].hist(data, range=xrange, bins=nbins)\n",
+    "        grid[gain].plot([offset-noise, offset-noise], [0, np.nanmax(hn)], \n",
+    "                        linewidth=1.5, color='red',\n",
+    "                        label='1 $\\sigma$ deviation')\n",
+    "        grid[gain].plot([offset+noise, offset+noise],\n",
+    "                        [0, np.nanmax(hn)], linewidth=1.5, color='red')\n",
+    "        grid[gain].plot([offset, offset], [0, 0],\n",
+    "                        linewidth=1.5, color='y', label='median')\n",
+    "\n",
+    "        grid[gain].plot([np.nanmedian(offset_g[qm][:, :, 12, gain]), \n",
+    "                         np.nanmedian(offset_g[qm][:, :, 12, gain])],\n",
+    "                        [0, np.nanmax(hn)], linewidth=1.5, color='green', \n",
+    "                        label='average over pixels')\n",
+    "\n",
+    "        grid[gain].set_xlim(xrange)\n",
+    "        grid[gain].set_ylim(0, np.nanmax(hn)*1.1)\n",
+    "        grid[gain].set_xlabel(\"Offset value [ADU]\")\n",
+    "        grid[gain].set_ylabel(\"# of occurance\")\n",
+    "\n",
+    "        if gain == 0:\n",
+    "            leg = grid[gain].legend(\n",
+    "                loc='upper center', ncol=3, \n",
+    "                bbox_to_anchor=(0.1, 0.25, 0.7, 1.0))\n",
+    "\n",
+    "        grid[gain].text(820, np.nanmax(hn)*0.4,\n",
+    "                        \"{} gain\".format(gain_names[gain]), fontsize=20)\n",
+    "\n",
+    "        a = plt.axes([.125, .1, 0.775, .8], frame_on=False)\n",
+    "        a.patch.set_alpha(0.05)\n",
+    "        a.set_xlim(xrange)\n",
+    "        plt.plot([offset, offset], [0, 1], linewidth=1.5, color='y')\n",
+    "        plt.xticks([])\n",
+    "        plt.yticks([])\n",
+    "\n",
+    "    ypos = 0.9\n",
+    "    x1pos = (np.nanmedian(data_g[qm][:, 0]) +\n",
+    "             np.nanmedian(data_g[qm][:, 2]))/2.\n",
+    "    x2pos = (np.nanmedian(data_g[qm][:, 2]) +\n",
+    "             np.nanmedian(data_g[qm][:, 1]))/2.-10\n",
+    "\n",
+    "    plt.annotate(\"\", xy=(np.nanmedian(data_g[qm][:, 0]), ypos), xycoords='data',\n",
+    "                 xytext=(np.nanmedian(data_g[qm][:, 2]), ypos), textcoords='data',\n",
+    "                 arrowprops=dict(arrowstyle=\"<->\", connectionstyle=\"arc3\"))\n",
+    "\n",
+    "    plt.annotate('{}'.format(np.nanmedian(data_g[qm][:, 0])-np.nanmedian(data_g[qm][:, 2])),\n",
+    "                 xy=(x1pos, ypos), xycoords='data', xytext=(5, 5), textcoords='offset points')\n",
+    "\n",
+    "    plt.annotate(\"\", xy=(np.nanmedian(data_g[qm][:, 2]), ypos), xycoords='data',\n",
+    "                 xytext=(np.nanmedian(data_g[qm][:, 1]), ypos), textcoords='data',\n",
+    "                 arrowprops=dict(arrowstyle=\"<->\", connectionstyle=\"arc3\"))\n",
+    "\n",
+    "    plt.annotate('{}'.format(np.nanmedian(data_g[qm][:, 2])-np.nanmedian(data_g[qm][:, 1])),\n",
+    "                 xy=(x2pos, ypos), xycoords='data', xytext=(5, 5), textcoords='offset points')\n",
     "\n",
     "plt.show()"
    ]
@@ -676,16 +623,16 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "# Loop over capacitor settings, modules, constants\n",
-    "for cap in capacitor_settings:\n",
-    "    if not test_for_normality:\n",
-    "        print('Normality test was not requested. Flag `test_for_normality` False')\n",
-    "        break\n",
+    "# Loop over modules, constants\n",
+    "\n",
+    "if not test_for_normality:\n",
+    "    print('Normality test was not requested. Flag `test_for_normality` False')\n",
+    "else:\n",
     "    for i in modules:\n",
     "        qm = module_index_to_qm(i)\n",
     "\n",
-    "        data = np.copy(ntest_g[cap][qm][:,:,:,:])\n",
-    "        data[badpix_g[cap][qm][:,:,:,:]>0] = 1.01\n",
+    "        data = np.copy(ntest_g[qm][:,:,:,:])\n",
+    "        data[badpix_g[qm][:,:,:,:]>0] = 1.01\n",
     "            \n",
     "        hn,cn = np.histogram(data[:,:,:,0], bins=100)\n",
     "       \n",
@@ -748,84 +695,86 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "scrolled": false
+   },
    "outputs": [],
    "source": [
     "cell = 12\n",
-    "for cap in capacitor_settings:\n",
-    "    for gain in range(3):\n",
-    "        display(\n",
-    "            Markdown('### Cell-12 overview - {} gain'.format(gain_names[gain])))\n",
-    "\n",
-    "        fig = plt.figure(figsize=(18, 22) , tight_layout={'pad': 0.1, 'w_pad': 0.1})\n",
-    "        for qm in res[cap]:\n",
-    "            for iconst, const in enumerate(['Offset', 'Noise', 'BadPixelsDark']):\n",
-    "\n",
-    "                ax = fig.add_subplot(321+iconst)\n",
     "\n",
-    "                data = res[cap][qm][const][:, :, 12, gain]\n",
-    "                vmax = 1.5 * np.nanmedian(res[cap][qm][const][:, :, 12, gain])\n",
-    "                title = const\n",
-    "                label = '{} value [ADU]'.format(const)\n",
-    "                title = '{} value'.format(const)\n",
-    "                if const == 'BadPixelsDark':\n",
-    "                    vmax = 4\n",
-    "                    bpix_code = data.astype(np.float32)\n",
-    "                    bpix_code[bpix_code == 0] = np.nan\n",
-    "                    title = 'Bad pixel code'\n",
-    "                    label = title\n",
-    "\n",
-    "                    cb_labels = ['1 {}'.format(BadPixels.NOISE_OUT_OF_THRESHOLD.name),\n",
-    "                                 '2 {}'.format(BadPixels.OFFSET_NOISE_EVAL_ERROR.name),\n",
-    "                                 '3 {}'.format(BadPixels.OFFSET_OUT_OF_THRESHOLD.name),\n",
-    "                                 '4 {}'.format('MIXED')]\n",
-    "\n",
-    "                    heatmapPlot(bpix_code, add_panels=False, cmap='viridis',\n",
-    "                                y_label='Rows', x_label='Columns',\n",
-    "                                lut_label='', vmax=vmax,\n",
-    "                                use_axis=ax, cb_ticklabels=cb_labels, cb_ticks = np.arange(4)+1,\n",
-    "                                title='{}'.format(title))\n",
-    "                    del bpix_code\n",
-    "                else:\n",
+    "for gain in range(3):\n",
+    "    display(\n",
+    "        Markdown('### Cell-12 overview - {} gain'.format(gain_names[gain])))\n",
+    "\n",
+    "    fig = plt.figure(figsize=(18, 22) , tight_layout={'pad': 0.1, 'w_pad': 0.1})\n",
+    "    for qm in res:\n",
+    "        for iconst, const in enumerate(['Offset', 'Noise', 'BadPixelsDark']):\n",
+    "\n",
+    "            ax = fig.add_subplot(321+iconst)\n",
+    "\n",
+    "            data = res[qm][const][:, :, 12, gain]\n",
+    "            vmax = 1.5 * np.nanmedian(res[qm][const][:, :, 12, gain])\n",
+    "            title = const\n",
+    "            label = '{} value [ADU]'.format(const)\n",
+    "            title = '{} value'.format(const)\n",
+    "            if const == 'BadPixelsDark':\n",
+    "                vmax = 4\n",
+    "                bpix_code = data.astype(np.float32)\n",
+    "                bpix_code[bpix_code == 0] = np.nan\n",
+    "                title = 'Bad pixel code'\n",
+    "                label = title\n",
+    "\n",
+    "                cb_labels = ['1 {}'.format(BadPixels.NOISE_OUT_OF_THRESHOLD.name),\n",
+    "                             '2 {}'.format(BadPixels.OFFSET_NOISE_EVAL_ERROR.name),\n",
+    "                             '3 {}'.format(BadPixels.OFFSET_OUT_OF_THRESHOLD.name),\n",
+    "                             '4 {}'.format('MIXED')]\n",
+    "\n",
+    "                heatmapPlot(bpix_code, add_panels=False, cmap='viridis',\n",
+    "                            y_label='Rows', x_label='Columns',\n",
+    "                            lut_label='', vmax=vmax,\n",
+    "                            use_axis=ax, cb_ticklabels=cb_labels, cb_ticks = np.arange(4)+1,\n",
+    "                            title='{}'.format(title))\n",
+    "                del bpix_code\n",
+    "            else:\n",
     "\n",
-    "                    heatmapPlot(data, add_panels=False, cmap='viridis',\n",
-    "                                y_label='Rows', x_label='Columns',\n",
-    "                                lut_label=label, vmax=vmax,\n",
+    "                heatmapPlot(data, add_panels=False, cmap='viridis',\n",
+    "                            y_label='Rows', x_label='Columns',\n",
+    "                            lut_label=label, vmax=vmax,\n",
+    "                            use_axis=ax,\n",
+    "                            title='{}'.format(title))\n",
+    "\n",
+    "    for qm in res:\n",
+    "        for iconst, const in enumerate(['Offset', 'Noise']):\n",
+    "            data = res[qm][const]\n",
+    "            dataBP = np.copy(data)\n",
+    "            dataBP[res[qm]['BadPixelsDark'] > 0] = -1\n",
+    "\n",
+    "            x_ranges = [[0, 1500], [0, 40]]\n",
+    "            hn, cn = np.histogram(\n",
+    "                data[:, :, :, gain], bins=100, range=x_ranges[iconst])\n",
+    "            hnBP, cnBP = np.histogram(dataBP[:, :, :, gain], bins=cn)\n",
+    "\n",
+    "            d = [{'x': cn[:-1],\n",
+    "                  'y': hn,\n",
+    "                  'drawstyle': 'steps-pre',\n",
+    "                  'label': 'All data',\n",
+    "                  },\n",
+    "                 {'x': cnBP[:-1],\n",
+    "                  'y': hnBP,\n",
+    "                  'drawstyle': 'steps-pre',\n",
+    "                  'label': 'Bad pixels masked',\n",
+    "                  },\n",
+    "                 ]\n",
+    "\n",
+    "            ax = fig.add_subplot(325+iconst)\n",
+    "            _ = simplePlot(d, figsize=(5, 7), aspect=1,\n",
+    "                                x_label=\"{} value [ADU]\".format(const),\n",
+    "                                y_label=\"# of occurance\",\n",
+    "                                title='', legend_pad=0.1, legend_size='10%',\n",
     "                                use_axis=ax,\n",
-    "                                title='{}'.format(title))\n",
-    "\n",
-    "        for qm in res[cap]:\n",
-    "            for iconst, const in enumerate(['Offset', 'Noise']):\n",
-    "                data = res[cap][qm][const]\n",
-    "                dataBP = np.copy(data)\n",
-    "                dataBP[res[cap][qm]['BadPixelsDark'] > 0] = -1\n",
-    "\n",
-    "                x_ranges = [[0, 1500], [0, 40]]\n",
-    "                hn, cn = np.histogram(\n",
-    "                    data[:, :, :, gain], bins=100, range=x_ranges[iconst])\n",
-    "                hnBP, cnBP = np.histogram(dataBP[:, :, :, gain], bins=cn)\n",
-    "\n",
-    "                d = [{'x': cn[:-1],\n",
-    "                      'y': hn,\n",
-    "                      'drawstyle': 'steps-pre',\n",
-    "                      'label': 'All data',\n",
-    "                      },\n",
-    "                     {'x': cnBP[:-1],\n",
-    "                      'y': hnBP,\n",
-    "                      'drawstyle': 'steps-pre',\n",
-    "                      'label': 'Bad pixels masked',\n",
-    "                      },\n",
-    "                     ]\n",
-    "\n",
-    "                ax = fig.add_subplot(325+iconst)\n",
-    "                _ = simplePlot(d, figsize=(5, 7), aspect=1,\n",
-    "                                    x_label=\"{} value [ADU]\".format(const),\n",
-    "                                    y_label=\"# of occurance\",\n",
-    "                                    title='', legend_pad=0.1, legend_size='10%',\n",
-    "                                    use_axis=ax,\n",
-    "                                    y_log=True, legend='outside-top-2col-frame')\n",
+    "                                y_log=True, legend='outside-top-2col-frame')\n",
     "\n",
-    "        plt.show()"
+    "    plt.show()"
    ]
   },
   {
@@ -870,12 +819,11 @@
     "    rebin = 2\n",
     "    for gain in range(3):\n",
     "        display(Markdown('### Bad pixel behaviour - {} gain ###'.format(gain_names[gain])))\n",
-    "        for cap in capacitor_settings:\n",
-    "            for mod, data in badpix_g[cap].items():\n",
-    "                plot_badpix_3d(data[...,gain], cols, title='', rebin_fac=rebin)\n",
-    "                ax = plt.gca()\n",
-    "                leg = ax.get_legend()\n",
-    "                leg.set(alpha=0.5)\n",
+    "        for mod, data in badpix_g.items():\n",
+    "            plot_badpix_3d(data[...,gain], cols, title='', rebin_fac=rebin)\n",
+    "            ax = plt.gca()\n",
+    "            leg = ax.get_legend()\n",
+    "            leg.set(alpha=0.5)\n",
     "        plt.show()"
    ]
   },
@@ -904,91 +852,91 @@
    "outputs": [],
    "source": [
     "time_summary = []\n",
-    "for cap, cap_data in old_mdata.items():\n",
-    "    time_summary.append(f\"The following pre-existing constants are used for comparison for capacitor setting **{cap}**:\")\n",
-    "    for qm, qm_data in cap_data.items():\n",
-    "        time_summary.append(f\"- Module {qm}\")\n",
-    "        for const, const_data in qm_data.items():\n",
-    "            time_summary.append(f\"    - {const} created at {const_data['timestamp']}\")\n",
+    "time_summary.append(f\"The following pre-existing constants are used for comparison:\")\n",
+    "for qm, qm_data in old_mdata.items():\n",
+    "    time_summary.append(f\"- Module {qm}\")\n",
+    "    for const, const_data in qm_data.items():\n",
+    "        time_summary.append(f\"    - {const} created at {const_data['timestamp']}\")\n",
     "display(Markdown(\"\\n\".join(time_summary)))"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "scrolled": false
+   },
    "outputs": [],
    "source": [
-    "# Loop over capacitor settings, modules, constants\n",
-    "for cap in res:\n",
-    "    for qm in res[cap]:\n",
-    "        for gain in range(3):\n",
-    "            display(Markdown('### Summary across tiles - {} gain'.format(gain_names[gain])))\n",
+    "# Loop over modules, constants\n",
+    "for qm in res:\n",
+    "    for gain in range(3):\n",
+    "        display(Markdown('### Summary across tiles - {} gain'.format(gain_names[gain])))\n",
+    "\n",
+    "        for const in res[qm]:\n",
+    "            data = np.copy(res[qm][const][:, :, :, gain])\n",
+    "\n",
+    "            label = 'Fraction of bad pixels'\n",
+    "\n",
+    "            if const != 'BadPixelsDark':\n",
+    "                data[badpix_g[qm][:, :, :, gain] > 0] = np.nan\n",
+    "                label = '{} value [ADU]'.format(const)\n",
+    "            else:\n",
+    "                data[data>0] = 1.0\n",
     "\n",
-    "            for const in res[cap][qm]:\n",
-    "                data = np.copy(res[cap][qm][const][:, :, :, gain])\n",
+    "            data = data.reshape(\n",
+    "                int(data.shape[0] / 32),\n",
+    "                32,\n",
+    "                int(data.shape[1] / 128),\n",
+    "                128,\n",
+    "                data.shape[2])\n",
+    "            data = np.nanmean(data, axis=(1, 3)).swapaxes(\n",
+    "                0, 2).reshape(512, 16)\n",
     "\n",
-    "                label = 'Fraction of bad pixels'\n",
+    "            fig = plt.figure(figsize=(15, 6))\n",
+    "            ax = fig.add_subplot(121)\n",
+    "\n",
+    "            _ = heatmapPlot(data[:510, :], add_panels=True,\n",
+    "                            y_label='Momery Cell ID', x_label='Tile ID',\n",
+    "                            lut_label=label, use_axis=ax,\n",
+    "                            panel_y_label=label, panel_x_label=label,\n",
+    "                            cmap='viridis',  # cb_loc='right',cb_aspect=15,\n",
+    "                            x_ticklabels=np.arange(16)+1,\n",
+    "                            x_ticks=np.arange(16)+0.5)\n",
+    "\n",
+    "            if old_const[qm][const] is not None:\n",
+    "                ax = fig.add_subplot(122)\n",
+    "\n",
+    "                dataold = np.copy(old_const[qm][const][:, :, :, gain])\n",
+    "\n",
+    "                label = '$\\Delta$ {}'.format(label)\n",
     "\n",
     "                if const != 'BadPixelsDark':\n",
-    "                    data[badpix_g[cap][qm][:, :, :, gain] > 0] = np.nan\n",
-    "                    label = '{} value [ADU]'.format(const)\n",
+    "                    if old_const[qm]['BadPixelsDark'] is not None:\n",
+    "                        dataold[old_const[qm]['BadPixelsDark'][:, :, :, gain] > 0] = np.nan\n",
+    "                    else:\n",
+    "                        dataold[:] = np.nan\n",
     "                else:\n",
-    "                    data[data>0] = 1.0\n",
-    "                    \n",
-    "                data = data.reshape(\n",
-    "                    int(data.shape[0] / 32),\n",
+    "                    dataold[dataold>0]=1.0\n",
+    "\n",
+    "                dataold = dataold.reshape(\n",
+    "                    int(dataold.shape[0] / 32),\n",
     "                    32,\n",
-    "                    int(data.shape[1] / 128),\n",
+    "                    int(dataold.shape[1] / 128),\n",
     "                    128,\n",
-    "                    data.shape[2])\n",
-    "                data = np.nanmean(data, axis=(1, 3)).swapaxes(\n",
-    "                    0, 2).reshape(512, 16)\n",
+    "                    dataold.shape[2])\n",
+    "                dataold = np.nanmean(dataold, axis=(\n",
+    "                    1, 3)).swapaxes(0, 2).reshape(512, 16)\n",
+    "                dataold = dataold - data\n",
     "\n",
-    "                fig = plt.figure(figsize=(15, 6))\n",
-    "                ax = fig.add_subplot(121)\n",
-    "\n",
-    "                _ = heatmapPlot(data[:510, :], add_panels=True,\n",
+    "                _ = heatmapPlot(dataold[:510, :], add_panels=True,\n",
     "                                y_label='Momery Cell ID', x_label='Tile ID',\n",
     "                                lut_label=label, use_axis=ax,\n",
     "                                panel_y_label=label, panel_x_label=label,\n",
     "                                cmap='viridis',  # cb_loc='right',cb_aspect=15,\n",
     "                                x_ticklabels=np.arange(16)+1,\n",
     "                                x_ticks=np.arange(16)+0.5)\n",
-    "\n",
-    "                if old_const[cap][qm][const] is not None:\n",
-    "                    ax = fig.add_subplot(122)\n",
-    "\n",
-    "                    dataold = np.copy(old_const[cap][qm][const][:, :, :, gain])\n",
-    "                    \n",
-    "                    label = '$\\Delta$ {}'.format(label)\n",
-    "\n",
-    "                    if const != 'BadPixelsDark':\n",
-    "                        if old_const[cap][qm]['BadPixelsDark'] is not None:\n",
-    "                            dataold[old_const[cap][qm]['BadPixelsDark'][:, :, :, gain] > 0] = np.nan\n",
-    "                        else:\n",
-    "                            dataold[:] = np.nan\n",
-    "                    else:\n",
-    "                        dataold[dataold>0]=1.0\n",
-    "\n",
-    "                    dataold = dataold.reshape(\n",
-    "                        int(dataold.shape[0] / 32),\n",
-    "                        32,\n",
-    "                        int(dataold.shape[1] / 128),\n",
-    "                        128,\n",
-    "                        dataold.shape[2])\n",
-    "                    dataold = np.nanmean(dataold, axis=(\n",
-    "                        1, 3)).swapaxes(0, 2).reshape(512, 16)\n",
-    "                    dataold = dataold - data\n",
-    "\n",
-    "                    _ = heatmapPlot(dataold[:510, :], add_panels=True,\n",
-    "                                    y_label='Momery Cell ID', x_label='Tile ID',\n",
-    "                                    lut_label=label, use_axis=ax,\n",
-    "                                    panel_y_label=label, panel_x_label=label,\n",
-    "                                    cmap='viridis',  # cb_loc='right',cb_aspect=15,\n",
-    "                                    x_ticklabels=np.arange(16)+1,\n",
-    "                                    x_ticks=np.arange(16)+0.5)\n",
-    "            plt.show()"
+    "        plt.show()"
    ]
   },
   {
@@ -1015,31 +963,30 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "# Loop over capacitor settings, modules, constants\n",
-    "for cap in res:\n",
-    "    for qm in res[cap]:\n",
-    "        for gain in range(3):\n",
-    "            display(Markdown('### Variation of offset and noise across ASICs - {} gain'.format(gain_names[gain])))\n",
-    "\n",
-    "            fig = plt.figure(figsize=(15, 6))\n",
-    "            for iconst, const in enumerate(['Offset', 'Noise']):\n",
-    "                data = np.copy(res[cap][qm][const][:, :, :, gain])\n",
-    "                data[badpix_g[cap][qm][:, :, :, gain] > 0] = np.nan\n",
-    "                label = '$\\sigma$ {} [ADU]'.format(const)\n",
-    "\n",
-    "                dataA = np.nanmean(data, axis=2)  # average over cells\n",
-    "                dataA = dataA.reshape(8, 32, 16, 16)\n",
-    "                dataA = np.nanstd(dataA, axis=(0, 2))  # average across ASICs\n",
-    "\n",
-    "                ax = fig.add_subplot(121+iconst)\n",
-    "                _ = heatmapPlot(dataA, add_panels=True,\n",
-    "                                y_label='rows', x_label='columns',\n",
-    "                                lut_label=label, use_axis=ax,\n",
-    "                                panel_y_label=label, panel_x_label=label,\n",
-    "                                cmap='viridis'\n",
-    "                                )\n",
+    "# Loop over modules, constants\n",
+    "for qm in res:\n",
+    "    for gain in range(3):\n",
+    "        display(Markdown('### Variation of offset and noise across ASICs - {} gain'.format(gain_names[gain])))\n",
+    "\n",
+    "        fig = plt.figure(figsize=(15, 6))\n",
+    "        for iconst, const in enumerate(['Offset', 'Noise']):\n",
+    "            data = np.copy(res[qm][const][:, :, :, gain])\n",
+    "            data[badpix_g[qm][:, :, :, gain] > 0] = np.nan\n",
+    "            label = '$\\sigma$ {} [ADU]'.format(const)\n",
+    "\n",
+    "            dataA = np.nanmean(data, axis=2)  # average over cells\n",
+    "            dataA = dataA.reshape(8, 32, 16, 16)\n",
+    "            dataA = np.nanstd(dataA, axis=(0, 2))  # average across ASICs\n",
+    "\n",
+    "            ax = fig.add_subplot(121+iconst)\n",
+    "            _ = heatmapPlot(dataA, add_panels=True,\n",
+    "                            y_label='rows', x_label='columns',\n",
+    "                            lut_label=label, use_axis=ax,\n",
+    "                            panel_y_label=label, panel_x_label=label,\n",
+    "                            cmap='viridis'\n",
+    "                            )\n",
     "\n",
-    "            plt.show()"
+    "        plt.show()"
    ]
   },
   {
@@ -1048,34 +995,33 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "# Loop over capacitor settings, modules, constants\n",
-    "for cap in res:\n",
-    "    for qm in res[cap]:\n",
-    "        for gain in range(3):\n",
-    "            display(Markdown('### Variation of offset and noise across tiles - {} gain'.format(gain_names[gain])))\n",
-    "\n",
-    "            fig = plt.figure(figsize=(15, 6))\n",
-    "            for iconst, const in enumerate(['Offset', 'Noise']):\n",
-    "                data = np.copy(res[cap][qm][const][:, :, :, gain])\n",
-    "                data[badpix_g[cap][qm][:, :, :, gain] > 0] = np.nan\n",
-    "                label = '$\\sigma$ {} [ADU]'.format(const)\n",
-    "                    \n",
-    "                dataT = data.reshape(\n",
-    "                    int(data.shape[0] / 32),\n",
-    "                    32,\n",
-    "                    int(data.shape[1] / 128),\n",
-    "                    128,\n",
-    "                    data.shape[2])\n",
-    "                dataT = np.nanstd(dataT, axis=(0, 2))\n",
-    "                dataT = np.nanmean(dataT, axis=2)\n",
-    "                \n",
-    "                ax = fig.add_subplot(121+iconst)\n",
-    "                _ = heatmapPlot(dataT, add_panels=True,\n",
-    "                                y_label='rows', x_label='columns',\n",
-    "                                lut_label=label, use_axis=ax,\n",
-    "                                panel_y_label=label, panel_x_label=label,\n",
-    "                                cmap='viridis')\n",
-    "            plt.show()"
+    "# Loop over modules, constants\n",
+    "for qm in res:\n",
+    "    for gain in range(3):\n",
+    "        display(Markdown('### Variation of offset and noise across tiles - {} gain'.format(gain_names[gain])))\n",
+    "\n",
+    "        fig = plt.figure(figsize=(15, 6))\n",
+    "        for iconst, const in enumerate(['Offset', 'Noise']):\n",
+    "            data = np.copy(res[qm][const][:, :, :, gain])\n",
+    "            data[badpix_g[qm][:, :, :, gain] > 0] = np.nan\n",
+    "            label = '$\\sigma$ {} [ADU]'.format(const)\n",
+    "\n",
+    "            dataT = data.reshape(\n",
+    "                int(data.shape[0] / 32),\n",
+    "                32,\n",
+    "                int(data.shape[1] / 128),\n",
+    "                128,\n",
+    "                data.shape[2])\n",
+    "            dataT = np.nanstd(dataT, axis=(0, 2))\n",
+    "            dataT = np.nanmean(dataT, axis=2)\n",
+    "\n",
+    "            ax = fig.add_subplot(121+iconst)\n",
+    "            _ = heatmapPlot(dataT, add_panels=True,\n",
+    "                            y_label='rows', x_label='columns',\n",
+    "                            lut_label=label, use_axis=ax,\n",
+    "                            panel_y_label=label, panel_x_label=label,\n",
+    "                            cmap='viridis')\n",
+    "        plt.show()"
    ]
   },
   {
@@ -1099,73 +1045,74 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "scrolled": false
+   },
    "outputs": [],
    "source": [
-    "# Loop over capacitor settings, modules, constants\n",
-    "for cap in res:\n",
-    "    for qm in res[cap]:\n",
-    "        for gain in range(3):\n",
-    "            display(Markdown('### Mean over pixels - {} gain'.format(gain_names[gain])))\n",
-    "            \n",
-    "            fig = plt.figure(figsize=(9,11))\n",
-    "\n",
-    "            for iconst, const in enumerate(res[cap][qm]):\n",
-    "\n",
-    "                ax = fig.add_subplot(311+iconst)\n",
-    "                    \n",
-    "                data = res[cap][qm][const][:,:,:510,gain]\n",
-    "                if const == 'BadPixelsDark':\n",
-    "                    data[data>0] = 1.0\n",
-    "                    \n",
-    "                dataBP = np.copy(data)\n",
-    "                dataBP[badpix_g[cap][qm][:,:,:510,gain]>0] = -10\n",
-    "\n",
-    "                data = np.nanmean(data, axis=(0,1))\n",
-    "                dataBP = np.nanmean(dataBP, axis=(0,1))\n",
-    "                \n",
-    "                d = [{'y': data,\n",
-    "                      'x': np.arange(data.shape[0]),\n",
-    "                      'drawstyle': 'steps-mid',\n",
-    "                      'label' : 'All data'\n",
-    "                     }\n",
-    "                    ]\n",
-    "                \n",
-    "                if const != 'BadPixelsDark':\n",
-    "                    d.append({'y': dataBP,\n",
-    "                      'x': np.arange(data.shape[0]),\n",
-    "                      'drawstyle': 'steps-mid',\n",
-    "                      'label' : 'good pixels only'\n",
-    "                     })\n",
-    "                    y_title = \"{} value [ADU]\".format(const)\n",
-    "                    title = \"{} value, {} gain\".format(const, gain_names[gain])\n",
-    "                else:\n",
-    "                    y_title = \"Fraction of Bad Pixels\"\n",
-    "                    title = \"Fraction of Bad Pixels, {} gain\".format(gain_names[gain])\n",
-    "                \n",
-    "                data_min = np.min([data, dataBP])if const != 'BadPixelsDark' else np.min([data])\n",
-    "                data_max = np.max([data[20:], dataBP[20:]])\n",
-    "                data_dif = data_max - data_min\n",
-    "                \n",
-    "                local_max = np.max([data[200:300], dataBP[200:300]])\n",
-    "                frac = 0.35\n",
-    "                new_max = (local_max - data_min*(1-frac))/frac\n",
-    "                new_max = np.max([data_max, new_max])\n",
-    "               \n",
-    "                _ = simplePlot(d, figsize=(10,10), aspect=2, xrange=(-12, 510),\n",
-    "                                  x_label = 'Memory Cell ID', \n",
-    "                                  y_label=y_title, use_axis=ax,\n",
-    "                                  title=title,\n",
-    "                                  title_position=[0.5, 1.15],  \n",
-    "                                  inset='xy-coord-right', inset_x_range=(0,20), inset_indicated=True,\n",
-    "                                  inset_labeled=True, inset_coord=[0.2,0.5,0.6,0.95],\n",
-    "                                    inset_lw = 1.0, y_range = [data_min-data_dif*0.05, new_max+data_dif*0.05],\n",
-    "                                  y_log=False, legend='outside-top-ncol2-frame', legend_size='18%',\n",
-    "                                     legend_pad=0.00)\n",
-    "                \n",
-    "                plt.tight_layout(pad=1.08, h_pad=0.35)\n",
-    "                \n",
-    "            plt.show()"
+    "# Loop over modules, constants\n",
+    "for qm in res:\n",
+    "    for gain in range(3):\n",
+    "        display(Markdown('### Mean over pixels - {} gain'.format(gain_names[gain])))\n",
+    "\n",
+    "        fig = plt.figure(figsize=(9,11))\n",
+    "\n",
+    "        for iconst, const in enumerate(res[qm]):\n",
+    "\n",
+    "            ax = fig.add_subplot(311+iconst)\n",
+    "\n",
+    "            data = res[qm][const][:,:,:510,gain]\n",
+    "            if const == 'BadPixelsDark':\n",
+    "                data[data>0] = 1.0\n",
+    "\n",
+    "            dataBP = np.copy(data)\n",
+    "            dataBP[badpix_g[qm][:,:,:510,gain]>0] = -10\n",
+    "\n",
+    "            data = np.nanmean(data, axis=(0,1))\n",
+    "            dataBP = np.nanmean(dataBP, axis=(0,1))\n",
+    "\n",
+    "            d = [{'y': data,\n",
+    "                  'x': np.arange(data.shape[0]),\n",
+    "                  'drawstyle': 'steps-mid',\n",
+    "                  'label' : 'All data'\n",
+    "                 }\n",
+    "                ]\n",
+    "\n",
+    "            if const != 'BadPixelsDark':\n",
+    "                d.append({'y': dataBP,\n",
+    "                  'x': np.arange(data.shape[0]),\n",
+    "                  'drawstyle': 'steps-mid',\n",
+    "                  'label' : 'good pixels only'\n",
+    "                 })\n",
+    "                y_title = \"{} value [ADU]\".format(const)\n",
+    "                title = \"{} value, {} gain\".format(const, gain_names[gain])\n",
+    "            else:\n",
+    "                y_title = \"Fraction of Bad Pixels\"\n",
+    "                title = \"Fraction of Bad Pixels, {} gain\".format(gain_names[gain])\n",
+    "\n",
+    "            data_min = np.min([data, dataBP])if const != 'BadPixelsDark' else np.min([data])\n",
+    "            data_max = np.max([data[20:], dataBP[20:]])\n",
+    "            data_dif = data_max - data_min\n",
+    "\n",
+    "            local_max = np.max([data[200:300], dataBP[200:300]])\n",
+    "            frac = 0.35\n",
+    "            new_max = (local_max - data_min*(1-frac))/frac\n",
+    "            new_max = np.max([data_max, new_max])\n",
+    "\n",
+    "            _ = simplePlot(d, figsize=(10,10), aspect=2, xrange=(-12, 510),\n",
+    "                              x_label = 'Memory Cell ID', \n",
+    "                              y_label=y_title, use_axis=ax,\n",
+    "                              title=title,\n",
+    "                              title_position=[0.5, 1.15],  \n",
+    "                              inset='xy-coord-right', inset_x_range=(0,20), inset_indicated=True,\n",
+    "                              inset_labeled=True, inset_coord=[0.2,0.5,0.6,0.95],\n",
+    "                                inset_lw = 1.0, y_range = [data_min-data_dif*0.05, new_max+data_dif*0.05],\n",
+    "                              y_log=False, legend='outside-top-ncol2-frame', legend_size='18%',\n",
+    "                                 legend_pad=0.00)\n",
+    "\n",
+    "            plt.tight_layout(pad=1.08, h_pad=0.35)\n",
+    "\n",
+    "        plt.show()"
    ]
   },
   {
@@ -1193,42 +1140,46 @@
    "outputs": [],
    "source": [
     "table = []\n",
-    "bits = [BadPixels.NOISE_OUT_OF_THRESHOLD, BadPixels.OFFSET_OUT_OF_THRESHOLD, BadPixels.OFFSET_NOISE_EVAL_ERROR]\n",
-    "for cap in res:\n",
-    "    for qm in res[cap]:\n",
-    "        for gain in range(3):\n",
-    "            \n",
-    "            l_data = []\n",
-    "            l_data_old = []\n",
-    "            \n",
-    "            data = np.copy(res[cap][qm]['BadPixelsDark'][:,:,:,gain])\n",
-    "            l_data.append(len(data[data>0].flatten()))\n",
+    "bits = [\n",
+    "    BadPixels.NOISE_OUT_OF_THRESHOLD,\n",
+    "    BadPixels.OFFSET_OUT_OF_THRESHOLD,\n",
+    "    BadPixels.OFFSET_NOISE_EVAL_ERROR\n",
+    "]\n",
+    "\n",
+    "for qm in res:\n",
+    "    for gain in range(3):\n",
+    "\n",
+    "        l_data = []\n",
+    "        l_data_old = []\n",
+    "\n",
+    "        data = np.copy(res[qm]['BadPixelsDark'][:,:,:,gain])\n",
+    "        l_data.append(len(data[data>0].flatten()))\n",
+    "        for bit in bits:\n",
+    "            l_data.append(np.count_nonzero(badpix_g[qm][:,:,:,gain] & bit.value))\n",
+    "\n",
+    "        if old_const[qm]['BadPixelsDark'] is not None:\n",
+    "            old_const[qm]['BadPixelsDark'] = old_const[qm]['BadPixelsDark'].astype(np.uint32)\n",
+    "            dataold = np.copy(old_const[qm]['BadPixelsDark'][:, :, :, gain])\n",
+    "            l_data_old.append(len(dataold[dataold>0].flatten()))\n",
     "            for bit in bits:\n",
-    "                l_data.append(np.count_nonzero(badpix_g[cap][qm][:,:,:,gain] & bit.value))\n",
-    "            \n",
-    "            if old_const[cap][qm]['BadPixelsDark'] is not None:\n",
-    "                old_const[cap][qm]['BadPixelsDark'] = old_const[cap][qm]['BadPixelsDark'].astype(np.uint32)\n",
-    "                dataold = np.copy(old_const[cap][qm]['BadPixelsDark'][:, :, :, gain])\n",
-    "                l_data_old.append(len(dataold[dataold>0].flatten()))\n",
-    "                for bit in bits:\n",
-    "                    l_data_old.append(np.count_nonzero(old_const[cap][qm]['BadPixelsDark'][:, :, :, gain] & bit.value))\n",
-    "\n",
-    "            l_data_name = ['All bad pixels', 'NOISE_OUT_OF_THRESHOLD', \n",
-    "                           'OFFSET_OUT_OF_THRESHOLD', 'OFFSET_NOISE_EVAL_ERROR']\n",
-    "            \n",
-    "            l_threshold = ['', f'{thresholds_noise_sigma}', f'{thresholds_offset_sigma}',\n",
-    "                           f'{thresholds_offset_hard}/{thresholds_noise_hard}']\n",
-    "            \n",
-    "            for i in range(len(l_data)):\n",
-    "                line = [f'{l_data_name[i]}, gain {gain_names[gain]}', l_threshold[i], l_data[i]]\n",
-    "            \n",
-    "                if old_const[cap][qm]['BadPixelsDark'] is not None:\n",
-    "                    line += [l_data_old[i]]\n",
-    "                else:\n",
-    "                    line += ['-']\n",
-    "                    \n",
-    "                table.append(line)\n",
-    "            table.append(['', '', '', ''])\n",
+    "                l_data_old.append(np.count_nonzero(old_const[qm]['BadPixelsDark'][:, :, :, gain] & bit.value))\n",
+    "\n",
+    "        l_data_name = ['All bad pixels', 'NOISE_OUT_OF_THRESHOLD', \n",
+    "                       'OFFSET_OUT_OF_THRESHOLD', 'OFFSET_NOISE_EVAL_ERROR']\n",
+    "\n",
+    "        l_threshold = ['', f'{thresholds_noise_sigma}', f'{thresholds_offset_sigma}',\n",
+    "                       f'{thresholds_offset_hard}/{thresholds_noise_hard}']\n",
+    "\n",
+    "        for i in range(len(l_data)):\n",
+    "            line = [f'{l_data_name[i]}, gain {gain_names[gain]}', l_threshold[i], l_data[i]]\n",
+    "\n",
+    "            if old_const[qm]['BadPixelsDark'] is not None:\n",
+    "                line += [l_data_old[i]]\n",
+    "            else:\n",
+    "                line += ['-']\n",
+    "\n",
+    "            table.append(line)\n",
+    "        table.append(['', '', '', ''])\n",
     "\n",
     "display(Markdown('''\n",
     "\n",
@@ -1256,29 +1207,28 @@
     "\n",
     "for const in ['Offset', 'Noise']:\n",
     "    table = [['','High gain', 'High gain', 'Medium gain', 'Medium gain', 'Low gain', 'Low gain']]\n",
-    "    for cap in res:\n",
-    "        for qm in res[cap]:\n",
+    "    for qm in res:\n",
     "\n",
-    "            data = np.copy(res[cap][qm][const])\n",
-    "            data[res[cap][qm]['BadPixelsDark']>0] = np.nan\n",
-    "            \n",
-    "            if old_const[cap][qm][const] is not None and old_const[cap][qm]['BadPixelsDark'] is not None :\n",
-    "                dataold = np.copy(old_const[cap][qm][const])\n",
-    "                dataold[old_const[cap][qm]['BadPixelsDark']>0] = np.nan\n",
-    "\n",
-    "            f_list = [np.nanmedian, np.nanmean, np.nanstd, np.nanmin, np.nanmax]\n",
-    "            n_list = ['Median', 'Mean', 'Std', 'Min', 'Max']\n",
-    "\n",
-    "            for i, f in enumerate(f_list):\n",
-    "                line = [n_list[i]]\n",
-    "                for gain in range(3):\n",
-    "                    line.append('{:6.1f}'.format(f(data[...,gain])))\n",
-    "                    if old_const[cap][qm][const] is not None and old_const[cap][qm]['BadPixelsDark'] is not None:\n",
-    "                        line.append('{:6.1f}'.format(f(dataold[...,gain])))\n",
-    "                    else:\n",
-    "                        line.append('-')\n",
+    "        data = np.copy(res[qm][const])\n",
+    "        data[res[qm]['BadPixelsDark']>0] = np.nan\n",
+    "\n",
+    "        if old_const[qm][const] is not None and old_const[qm]['BadPixelsDark'] is not None :\n",
+    "            dataold = np.copy(old_const[qm][const])\n",
+    "            dataold[old_const[qm]['BadPixelsDark']>0] = np.nan\n",
+    "\n",
+    "        f_list = [np.nanmedian, np.nanmean, np.nanstd, np.nanmin, np.nanmax]\n",
+    "        n_list = ['Median', 'Mean', 'Std', 'Min', 'Max']\n",
+    "\n",
+    "        for i, f in enumerate(f_list):\n",
+    "            line = [n_list[i]]\n",
+    "            for gain in range(3):\n",
+    "                line.append('{:6.1f}'.format(f(data[...,gain])))\n",
+    "                if old_const[qm][const] is not None and old_const[qm]['BadPixelsDark'] is not None:\n",
+    "                    line.append('{:6.1f}'.format(f(dataold[...,gain])))\n",
+    "                else:\n",
+    "                    line.append('-')\n",
     "\n",
-    "                table.append(line)\n",
+    "            table.append(line)\n",
     "\n",
     "    display(Markdown('### {} [ADU], good pixels only ###'.format(const)))\n",
     "    md = display(Latex(tabulate.tabulate(table, tablefmt='latex', headers=header)))  "
@@ -1287,9 +1237,9 @@
  ],
  "metadata": {
   "kernelspec": {
-   "display_name": "Python 3",
+   "display_name": "Offline Cal",
    "language": "python",
-   "name": "python3"
+   "name": "offline-cal"
   },
   "language_info": {
    "codemirror_mode": {
@@ -1301,7 +1251,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.8.12"
+   "version": "3.8.10"
   }
  },
  "nbformat": 4,
diff --git a/notebooks/LPD/LPD_Correct_Fast.ipynb b/notebooks/LPD/LPD_Correct_Fast.ipynb
index 7a53b167a08368cf056ec748585ccde714ab7f5b..f702ac2741c3d2bea4020e2589613ab00838bc8b 100644
--- a/notebooks/LPD/LPD_Correct_Fast.ipynb
+++ b/notebooks/LPD/LPD_Correct_Fast.ipynb
@@ -33,12 +33,14 @@
     "karabo_id = 'FXE_DET_LPD1M-1'  # Karabo domain for detector.\n",
     "input_source = '{karabo_id}/DET/{module_index}CH0:xtdf'  # Input fast data source.\n",
     "output_source = ''  # Output fast data source, empty to use same as input.\n",
+    "xgm_source = 'SA1_XTD2_XGM/DOOCS/MAIN'\n",
+    "xgm_pulse_count_key = 'pulseEnergy.numberOfSa1BunchesActual'\n",
     "\n",
     "# CalCat parameters\n",
     "creation_time = \"\"  # The timestamp to use with Calibration DB. Required Format: \"YYYY-MM-DD hh:mm:ss\" e.g. 2019-07-04 11:02:41\n",
     "cal_db_interface = ''  # Not needed, compatibility with current webservice.\n",
     "cal_db_timeout = 0  # Not needed, compatbility with current webservice.\n",
-    "cal_db_root = '/gpfs/exfel/d/cal/caldb_store'\n",
+    "cal_db_root = '/gpfs/exfel/d/cal/caldb_store'  # The calibration database root path to access constant files. For example accessing constants from the test database.\n",
     "\n",
     "# Operating conditions\n",
     "mem_cells = 512  # Memory cells, LPD constants are always taken with 512 cells.\n",
@@ -46,7 +48,7 @@
     "capacitor = '5pF'  # Capacitor setting: 5pF or 50pF\n",
     "photon_energy = 9.2  # Photon energy in keV.\n",
     "category = 0  # Whom to blame.\n",
-    "use_cell_order = False  # Whether to use memory cell order as a detector condition (not stored for older constants)\n",
+    "use_cell_order = 'auto'  # Whether to use memory cell order as a detector condition; auto/always/never\n",
     "\n",
     "# Correction parameters\n",
     "offset_corr = True  # Offset correction.\n",
@@ -55,6 +57,7 @@
     "gain_amp_map = True  # Gain correction based on GainAmpMap constant.\n",
     "\n",
     "# Output options\n",
+    "ignore_no_frames_no_pulses = False  # Whether to run without SA1 pulses AND frames.\n",
     "overwrite = True  # set to True if existing data should be overwritten\n",
     "chunks_data = 1  # HDF chunk size for pixel data in number of frames.\n",
     "chunks_ids = 32  # HDF chunk size for cellId and pulseId datasets.\n",
@@ -82,12 +85,11 @@
    },
    "outputs": [],
    "source": [
-    "from collections import OrderedDict\n",
+    "from logging import warning\n",
     "from pathlib import Path\n",
     "from time import perf_counter\n",
     "import gc\n",
     "import re\n",
-    "import warnings\n",
     "\n",
     "import numpy as np\n",
     "import h5py\n",
@@ -97,19 +99,21 @@
     "import matplotlib.pyplot as plt\n",
     "%matplotlib inline\n",
     "\n",
-    "from calibration_client import CalibrationClient\n",
-    "from calibration_client.modules import CalibrationConstantVersion\n",
     "import extra_data as xd\n",
     "import extra_geom as xg\n",
     "import pasha as psh\n",
-    "\n",
     "from extra_data.components import LPD1M\n",
     "\n",
+    "import cal_tools.restful_config as rest_cfg\n",
+    "from cal_tools.calcat_interface import CalCatError, LPD_CalibrationData\n",
     "from cal_tools.lpdalgs import correct_lpd_frames\n",
-    "from cal_tools.lpdlib import get_mem_cell_order\n",
-    "from cal_tools.tools import CalibrationMetadata, calcat_creation_time\n",
-    "from cal_tools.files import DataFile\n",
-    "from cal_tools.restful_config import restful_config"
+    "from cal_tools.lpdlib import get_mem_cell_pattern, make_cell_order_condition\n",
+    "from cal_tools.tools import (\n",
+    "    CalibrationMetadata,\n",
+    "    calcat_creation_time,\n",
+    "    write_constants_fragment,\n",
+    ")\n",
+    "from cal_tools.files import DataFile"
    ]
   },
   {
@@ -133,19 +137,16 @@
     "\n",
     "output_source = output_source or input_source\n",
     "\n",
-    "cal_db_root = Path(cal_db_root)\n",
-    "\n",
-    "metadata = CalibrationMetadata(metadata_folder or out_folder)\n",
-    "\n",
     "creation_time = calcat_creation_time(in_folder, run, creation_time)\n",
     "print(f'Using {creation_time.isoformat()} as creation time')\n",
     "\n",
     "# Pick all modules/aggregators or those selected.\n",
-    "if not karabo_da or karabo_da == ['']:\n",
-    "    if not modules or modules == [-1]:\n",
+    "if karabo_da == ['']:\n",
+    "    if modules == [-1]:\n",
     "        modules = list(range(16))\n",
-    "\n",
     "    karabo_da = [f'LPD{i:02d}' for i in modules]\n",
+    "else:\n",
+    "    modules = [int(x[-2:]) for x in karabo_da]\n",
     "    \n",
     "# Pick all sequences or those selected.\n",
     "if not sequences or sequences == [-1]:\n",
@@ -154,7 +155,10 @@
     "    do_sequence = [int(x) for x in sequences].__contains__    \n",
     "    \n",
     "# List of detector sources.\n",
-    "det_inp_sources = [input_source.format(karabo_id=karabo_id, module_index=int(da[-2:])) for da in karabo_da]"
+    "det_inp_sources = [input_source.format(karabo_id=karabo_id, module_index=int(da[-2:])) for da in karabo_da]\n",
+    "\n",
+    "if use_cell_order not in {'auto', 'always', 'never'}:\n",
+    "    raise ValueError(\"use_cell_order must be auto/always/never\")"
    ]
   },
   {
@@ -185,7 +189,42 @@
     "\n",
     "print('Files to process:')\n",
     "for data_descr in sorted(data_to_process, key=lambda x: f'{x[0]}{x[1]}'):\n",
-    "    print(f'{data_descr[0]}\\t{data_descr[1]}')"
+    "    print(f'{data_descr[0]}\\t{data_descr[1]}')\n",
+    "    \n",
+    "# Collect the train ID contained in the input LPD files.\n",
+    "inp_lpd_dc = xd.DataCollection.from_paths([x[1] for x in data_to_process])\n",
+    "\n",
+    "frame_count = sum([\n",
+    "    int(inp_lpd_dc[source, 'image.data'].data_counts(labelled=False).sum())\n",
+    "    for source in inp_lpd_dc.all_sources], 0)\n",
+    "\n",
+    "if frame_count == 0:\n",
+    "    inp_dc = xd.RunDirectory(run_folder) \\\n",
+    "        .select_trains(xd.by_id[inp_lpd_dc.train_ids])\n",
+    "    \n",
+    "    try:\n",
+    "        pulse_count = int(inp_dc[xgm_source, xgm_pulse_count_key].ndarray().sum())\n",
+    "    except xd.SourceNameError:\n",
+    "        warning(f'Missing XGM source `{xgm_source}`')\n",
+    "        pulse_count = None\n",
+    "    except xd.PropertyNameError:\n",
+    "        warning(f'Missing XGM pulse count key `{xgm_pulse_count_key}`')\n",
+    "        pulse_count = None\n",
+    "    \n",
+    "    if pulse_count == 0 and not ignore_no_frames_no_pulses:\n",
+    "        warning(f'Affected files contain neither LPD frames nor SA1 pulses '\n",
+    "                f'according to {xgm_source}, processing is skipped. If this '\n",
+    "                f'incorrect, please contact da-support@xfel.eu')\n",
+    "        from sys import exit\n",
+    "        exit(0)\n",
+    "    elif pulse_count is None:\n",
+    "        raise ValueError('Affected files contain no LPD frames and SA1 pulses '\n",
+    "                         'could not be inferred from XGM data')\n",
+    "    else:\n",
+    "        raise ValueError('Affected files contain no LPD frames but SA1 pulses')\n",
+    "        \n",
+    "else:\n",
+    "    print(f'Total number of LPD pulses across all modules: {frame_count}')"
    ]
   },
   {
@@ -198,32 +237,46 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# Connect to CalCat.\n",
-    "calcat_config = restful_config['calcat']\n",
-    "client = CalibrationClient(\n",
-    "    base_api_url=calcat_config['base-api-url'],\n",
-    "    use_oauth2=calcat_config['use-oauth2'],\n",
-    "    client_id=calcat_config['user-id'],\n",
-    "    client_secret=calcat_config['user-secret'],\n",
-    "    user_email=calcat_config['user-email'],\n",
-    "    token_url=calcat_config['token-url'],\n",
-    "    refresh_url=calcat_config['refresh-url'],\n",
-    "    auth_url=calcat_config['auth-url'],\n",
-    "    scope='')"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "tags": []
+   },
    "outputs": [],
    "source": [
-    "metadata = CalibrationMetadata(metadata_folder or out_folder)\n",
-    "# Constant paths & timestamps are saved under retrieved-constants in calibration_metadata.yml\n",
-    "const_yaml = metadata.setdefault(\"retrieved-constants\", {})"
+    "start = perf_counter()\n",
+    "\n",
+    "cell_ids_pattern_s = None\n",
+    "if use_cell_order != 'never':\n",
+    "    # Read the order of memory cells used\n",
+    "    raw_data = xd.DataCollection.from_paths([e[1] for e in data_to_process])\n",
+    "    cell_ids_pattern_s = make_cell_order_condition(\n",
+    "        use_cell_order, get_mem_cell_pattern(raw_data, det_inp_sources)\n",
+    "    )\n",
+    "print(\"Memory cells order:\", cell_ids_pattern_s)\n",
+    "\n",
+    "lpd_cal = LPD_CalibrationData(\n",
+    "    detector_name=karabo_id,\n",
+    "    modules=karabo_da,\n",
+    "    sensor_bias_voltage=bias_voltage,\n",
+    "    memory_cells=mem_cells,\n",
+    "    feedback_capacitor=capacitor,\n",
+    "    source_energy=photon_energy,\n",
+    "    memory_cell_order=cell_ids_pattern_s,\n",
+    "    category=category,\n",
+    "    event_at=creation_time,\n",
+    "    client=rest_cfg.calibration_client(),\n",
+    "    caldb_root=Path(cal_db_root),\n",
+    ")\n",
+    "\n",
+    "lpd_metadata = lpd_cal.metadata([\"Offset\", \"BadPixelsDark\"])\n",
+    "try:\n",
+    "    illum_metadata = lpd_cal.metadata(lpd_cal.illuminated_calibrations)\n",
+    "    for key, value in illum_metadata.items():\n",
+    "        lpd_metadata.setdefault(key, {}).update(value)\n",
+    "except CalCatError as e:  # TODO: replace when API errors are improved.\n",
+    "    warning(f\"CalCatError: {e}\")\n",
+    "\n",
+    "total_time = perf_counter() - start\n",
+    "print(f'Looking up constants {total_time:.1f}s')"
    ]
   },
   {
@@ -232,93 +285,22 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "const_data = {}\n",
-    "const_load_mp = psh.ProcessContext(num_workers=24)\n",
-    "\n",
-    "if const_yaml:  # Read constants from YAML file.\n",
-    "    start = perf_counter()\n",
-    "    for da, ccvs in const_yaml.items():\n",
-    "\n",
-    "        for calibration_name, ccv in ccvs['constants'].items():\n",
-    "            if ccv['file-path'] is None:\n",
-    "                warnings.warn(f\"Missing {calibration_name} for {da}\")\n",
-    "                continue\n",
-    "\n",
-    "            dtype = np.uint32 if calibration_name.startswith('BadPixels') else np.float32\n",
-    "\n",
-    "            const_data[(da, calibration_name)] = dict(\n",
-    "                path=Path(ccv['file-path']),\n",
-    "                dataset=ccv['dataset-name'],\n",
-    "                data=const_load_mp.alloc(shape=(256, 256, mem_cells, 3), dtype=dtype)\n",
-    "            )\n",
-    "else:  # Retrieve constants from CALCAT.\n",
-    "    dark_calibrations = {\n",
-    "        1: 'Offset',  # np.float32\n",
-    "        14: 'BadPixelsDark'  # should be np.uint32, but is np.float64\n",
-    "    }\n",
-    "\n",
-    "    base_condition = [\n",
-    "        dict(parameter_id=1, value=bias_voltage),  # Sensor bias voltage\n",
-    "        dict(parameter_id=7, value=mem_cells),  # Memory cells\n",
-    "        dict(parameter_id=15, value=capacitor),  # Feedback capacitor\n",
-    "        dict(parameter_id=13, value=256),  # Pixels X\n",
-    "        dict(parameter_id=14, value=256),  # Pixels Y\n",
-    "    ]\n",
-    "    if use_cell_order:\n",
-    "        # Read the order of memory cells used\n",
-    "        raw_data = xd.DataCollection.from_paths([e[1] for e in data_to_process])\n",
-    "        cell_ids_pattern_s = get_mem_cell_order(raw_data, det_inp_sources)\n",
-    "        print(\"Memory cells order:\", cell_ids_pattern_s)\n",
-    "\n",
-    "        dark_condition = base_condition + [\n",
-    "            dict(parameter_id=30, value=cell_ids_pattern_s),  # Memory cell order\n",
-    "        ]\n",
-    "    else:\n",
-    "        dark_condition = base_condition.copy()\n",
-    "\n",
-    "    illuminated_calibrations = {\n",
-    "        20: 'BadPixelsFF',  # np.uint32\n",
-    "        42: 'GainAmpMap',  # np.float32\n",
-    "        43: 'FFMap',  # np.float32\n",
-    "        44: 'RelativeGain'  # np.float32\n",
-    "    }\n",
-    "\n",
-    "    illuminated_condition = base_condition + [\n",
-    "        dict(parameter_id=3, value=photon_energy),  # Source energy\n",
-    "        dict(parameter_id=25, value=category)  # category\n",
-    "    ]\n",
-    "\n",
-    "    print('Querying calibration database', end='', flush=True)\n",
-    "    start = perf_counter()\n",
-    "    for calibrations, condition in [\n",
-    "        (dark_calibrations, dark_condition),\n",
-    "        (illuminated_calibrations, illuminated_condition)\n",
-    "    ]:\n",
-    "        resp = CalibrationConstantVersion.get_closest_by_time_by_detector_conditions(\n",
-    "            client, karabo_id, list(calibrations.keys()),\n",
-    "            {'parameters_conditions_attributes': condition},\n",
-    "            karabo_da='', event_at=creation_time.isoformat()\n",
-    "        )\n",
-    "\n",
-    "        if not resp['success']:\n",
-    "            raise RuntimeError(resp)\n",
-    "\n",
-    "        for ccv in resp['data']:\n",
-    "            cc = ccv['calibration_constant']\n",
-    "            da = ccv['physical_detector_unit']['karabo_da']\n",
-    "            calibration_name = calibrations[cc['calibration_id']]\n",
-    "            \n",
-    "            dtype = np.uint32 if calibration_name.startswith('BadPixels') else np.float32\n",
-    "            \n",
-    "            const_data[(da, calibration_name)] = dict(\n",
-    "                path=Path(ccv['path_to_file']) / ccv['file_name'],\n",
-    "                dataset=ccv['data_set_name'],\n",
-    "                data=const_load_mp.alloc(shape=(256, 256, mem_cells, 3), dtype=dtype)\n",
-    "            )\n",
-    "        print('.', end='', flush=True)\n",
-    "            \n",
-    "total_time = perf_counter() - start\n",
-    "print(f'{total_time:.1f}s')"
+    "# Validate the constants availability and raise/warn accordingly.\n",
+    "for mod, calibrations in lpd_metadata.items():\n",
+    "    missing_offset = {\"Offset\"} - set(calibrations)\n",
+    "    warn_missing_constants = {\n",
+    "        \"BadPixelsDark\", \"BadPixelsFF\", \"GainAmpMap\",\n",
+    "        \"FFMap\", \"RelativeGain\"} - set(calibrations)\n",
+    "    if missing_offset:\n",
+    "        warning(f\"Offset constant is not available to correct {mod}.\")\n",
+    "        karabo_da.remove(mod)\n",
+    "    if warn_missing_constants:\n",
+    "        warning(f\"Constants {warn_missing_constants} were not retrieved for {mod}.\")\n",
+    "if not karabo_da:  # Offsets are missing for all modules.\n",
+    "    raise Exception(\"Could not find offset constants for any modules, will not correct data.\")\n",
+    "\n",
+    "# Remove skipped correction modules from data_to_process\n",
+    "data_to_process = [(mod, in_f, out_f) for mod, in_f, out_f in data_to_process if mod in karabo_da]"
    ]
   },
   {
@@ -327,20 +309,15 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "def load_constant_dataset(wid, index, const_descr):\n",
-    "    ccv_entry = const_data[const_descr]\n",
-    "    \n",
-    "    with h5py.File(cal_db_root / ccv_entry['path'], 'r') as fp:\n",
-    "        fp[ccv_entry['dataset'] + '/data'].read_direct(ccv_entry['data'])\n",
-    "        \n",
-    "    print('.', end='', flush=True)\n",
-    "\n",
-    "print('Loading calibration data', end='', flush=True)\n",
-    "start = perf_counter()\n",
-    "const_load_mp.map(load_constant_dataset, list(const_data.keys()))\n",
-    "total_time = perf_counter() - start\n",
-    "\n",
-    "print(f'{total_time:.1f}s')"
+    "# write constants metadata to fragment YAML\n",
+    "write_constants_fragment(\n",
+    "    out_folder=(metadata_folder or out_folder),\n",
+    "    det_metadata=lpd_metadata,\n",
+    "    caldb_root=lpd_cal.caldb_root,\n",
+    ")\n",
+    "\n",
+    "# Load constants data for all constants\n",
+    "const_data = lpd_cal.ndarray_map(metadata=lpd_metadata)"
    ]
   },
   {
@@ -359,19 +336,21 @@
     "constant_order = {\n",
     "    'Offset':        (2, 1, 0, 3),\n",
     "    'BadPixelsDark': (2, 1, 0, 3),\n",
-    "    'RelativeGain':  (2, 1, 0, 3),\n",
+    "    'RelativeGain':  (2, 0, 1, 3),\n",
     "    'FFMap':         (2, 0, 1, 3),\n",
     "    'BadPixelsFF':   (2, 0, 1, 3),\n",
     "    'GainAmpMap':    (2, 0, 1, 3),\n",
     "}\n",
     "\n",
     "def prepare_constants(wid, index, aggregator):\n",
-    "    consts = {calibration_name: entry['data']\n",
-    "              for (aggregator_, calibration_name), entry\n",
-    "              in const_data.items()\n",
-    "              if aggregator == aggregator_}\n",
-    "    \n",
+    "    consts = const_data.get(aggregator, {})\n",
     "    def _prepare_data(calibration_name, dtype):\n",
+    "        # Some old BadPixels constants have <f8 dtype.\n",
+    "        # Convert nan to float 0 to avoid having 2147483648 after\n",
+    "        # converting float64 to uint32.\n",
+    "        if \"BadPixels\" in calibration_name and consts[calibration_name].dtype != np.uint32:\n",
+    "            consts[calibration_name] = np.nan_to_num(\n",
+    "                consts[calibration_name], nan=0.0)\n",
     "        return consts[calibration_name] \\\n",
     "            .transpose(constant_order[calibration_name]) \\\n",
     "            .astype(dtype, copy=True)  # Make sure array is contiguous.\n",
@@ -512,6 +491,12 @@
     "    [(11.4, 299), (-11.5, 8), (254.5, -16), (278.5, 275)])\n",
     "\n",
     "output_paths = [outp_path for _, _, outp_path in data_to_process if outp_path.exists()]\n",
+    "\n",
+    "if not output_paths:\n",
+    "    warning('Data preview is skipped as there are no existing output paths')\n",
+    "    from sys import exit\n",
+    "    exit(0)\n",
+    "\n",
     "dc = xd.DataCollection.from_paths(output_paths).select_trains(np.s_[0])\n",
     "\n",
     "det = LPD1M(dc, detector_name=karabo_id)\n",
diff --git a/notebooks/LPD/LPD_Correct_and_Verify.ipynb b/notebooks/LPD/LPD_Correct_and_Verify.ipynb
index c34b835b67d1306830450d733fa7156faa9998b3..f99dfd20c38fecba36b53e6eef42aee3d7545c1e 100644
--- a/notebooks/LPD/LPD_Correct_and_Verify.ipynb
+++ b/notebooks/LPD/LPD_Correct_and_Verify.ipynb
@@ -35,7 +35,7 @@
     "h5path_idx = '/INDEX/{}/DET/{}:xtdf/' # path in the HDF5 file to images\n",
     "\n",
     "use_dir_creation_date = True # use the creation date of the directory for database time derivation\n",
-    "cal_db_interface = \"tcp://max-exfl016:8015#8020\" # the database interface to use\n",
+    "cal_db_interface = \"tcp://max-exfl-cal001:8015#8020\" # the database interface to use\n",
     "cal_db_timeout = 30000 # timeout for calibration db requests in milliseconds\n",
     "\n",
     "\n",
diff --git a/notebooks/LPD/LPD_retrieve_constants_precorrection.ipynb b/notebooks/LPD/LPD_retrieve_constants_precorrection.ipynb
deleted file mode 100644
index 31b4eafa961e8686af5a70caad9956f0254d05b8..0000000000000000000000000000000000000000
--- a/notebooks/LPD/LPD_retrieve_constants_precorrection.ipynb
+++ /dev/null
@@ -1,237 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "# LPD Retrieving Constants Pre-correction #\n",
-    "\n",
-    "Author: European XFEL Detector Group, Version: 1.0\n",
-    "\n",
-    "The following notebook provides a constants metadata in a YAML file to use while correcting LPD images."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# Input parameters\n",
-    "in_folder = \"/gpfs/exfel/exp/FXE/202201/p003073/raw/\"  # the folder to read data from, required\n",
-    "out_folder = \"/gpfs/exfel/data/scratch/ahmedk/test/remove/LPD_test\"  # the folder to output to, required\n",
-    "metadata_folder = ''  # Directory containing calibration_metadata.yml when run by xfel-calibrate.\n",
-    "modules = [-1]  # Modules indices to correct, use [-1] for all, only used when karabo_da is empty\n",
-    "karabo_da = ['']  # Data aggregators names to correct, use [''] for all\n",
-    "run = 10  # run to process, required\n",
-    "\n",
-    "# Source parameters\n",
-    "karabo_id = 'FXE_DET_LPD1M-1'  # Karabo domain for detector.\n",
-    "input_source = '{karabo_id}/DET/{module_index}CH0:xtdf'  # Input fast data source.\n",
-    "\n",
-    "# CalCat parameters\n",
-    "creation_time = \"\"  # The timestamp to use with Calibration DB. Required Format: \"YYYY-MM-DD hh:mm:ss\" e.g. 2019-07-04 11:02:41\n",
-    "\n",
-    "# Operating conditions\n",
-    "mem_cells = 512  # Memory cells, LPD constants are always taken with 512 cells.\n",
-    "bias_voltage = 250.0  # Detector bias voltage.\n",
-    "capacitor = '5pF'  # Capacitor setting: 5pF or 50pF\n",
-    "photon_energy = 9.2  # Photon energy in keV.\n",
-    "category = 0  # Whom to blame.\n",
-    "use_cell_order = False  # Whether to use memory cell order as a detector condition (not stored for older constants)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "from pathlib import Path\n",
-    "from time import perf_counter\n",
-    "\n",
-    "import numpy as np\n",
-    "\n",
-    "from calibration_client import CalibrationClient\n",
-    "from calibration_client.modules import CalibrationConstantVersion\n",
-    "import extra_data as xd\n",
-    "\n",
-    "from cal_tools.lpdlib import get_mem_cell_order\n",
-    "from cal_tools.tools import (\n",
-    "    CalibrationMetadata,\n",
-    "    calcat_creation_time,\n",
-    "    save_constant_metadata,\n",
-    ")\n",
-    "from cal_tools.restful_config import restful_config"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "out_folder = Path(out_folder)\n",
-    "out_folder.mkdir(exist_ok=True)\n",
-    "\n",
-    "metadata = CalibrationMetadata(metadata_folder or out_folder)\n",
-    "# Constant paths & timestamps are saved under retrieved-constants in calibration_metadata.yml\n",
-    "retrieved_constants = metadata.setdefault(\"retrieved-constants\", {})\n",
-    "\n",
-    "creation_time = calcat_creation_time(in_folder, run, creation_time)\n",
-    "print(f'Using {creation_time.isoformat()} as creation time')\n",
-    "\n",
-    "# Pick all modules/aggregators or those selected.\n",
-    "if not karabo_da or karabo_da == ['']:\n",
-    "    if not modules or modules == [-1]:\n",
-    "        modules = list(range(16))\n",
-    "\n",
-    "    karabo_da = [f'LPD{i:02d}' for i in modules]\n",
-    "\n",
-    "# List of detector sources.\n",
-    "det_inp_sources = [input_source.format(karabo_id=karabo_id, module_index=int(da[-2:])) for da in karabo_da]"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# Connect to CalCat.\n",
-    "calcat_config = restful_config['calcat']\n",
-    "client = CalibrationClient(\n",
-    "    base_api_url=calcat_config['base-api-url'],\n",
-    "    use_oauth2=calcat_config['use-oauth2'],\n",
-    "    client_id=calcat_config['user-id'],\n",
-    "    client_secret=calcat_config['user-secret'],\n",
-    "    user_email=calcat_config['user-email'],\n",
-    "    token_url=calcat_config['token-url'],\n",
-    "    refresh_url=calcat_config['refresh-url'],\n",
-    "    auth_url=calcat_config['auth-url'],\n",
-    "    scope='')"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "dark_calibrations = {\n",
-    "    1: 'Offset',\n",
-    "    14: 'BadPixelsDark',\n",
-    "}\n",
-    "\n",
-    "base_condition = [\n",
-    "    dict(parameter_id=1, value=bias_voltage),  # Sensor bias voltage\n",
-    "    dict(parameter_id=7, value=mem_cells),  # Memory cells\n",
-    "    dict(parameter_id=15, value=capacitor),  # Feedback capacitor\n",
-    "    dict(parameter_id=13, value=256),  # Pixels X\n",
-    "    dict(parameter_id=14, value=256),  # Pixels Y\n",
-    "]\n",
-    "if use_cell_order:\n",
-    "    # Read the order of memory cells used\n",
-    "    raw_data = xd.RunDirectory(Path(in_folder, f'r{run:04d}'))\n",
-    "    cell_ids_pattern_s = get_mem_cell_order(raw_data, det_inp_sources)\n",
-    "    print(\"Memory cells order:\", cell_ids_pattern_s)\n",
-    "\n",
-    "    dark_condition = base_condition + [\n",
-    "        dict(parameter_id=30, value=cell_ids_pattern_s),  # Memory cell order\n",
-    "    ]\n",
-    "else:\n",
-    "    dark_condition = base_condition.copy()\n",
-    "\n",
-    "illuminated_calibrations = {\n",
-    "    20: 'BadPixelsFF',\n",
-    "    42: 'GainAmpMap',\n",
-    "    43: 'FFMap',\n",
-    "    44: 'RelativeGain',\n",
-    "}\n",
-    "\n",
-    "illuminated_condition = base_condition + [\n",
-    "    dict(parameter_id=3, value=photon_energy),  # Source energy\n",
-    "    dict(parameter_id=25, value=category)  # category\n",
-    "]"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "const_data = {}\n",
-    "\n",
-    "print('Querying calibration database', end='', flush=True)\n",
-    "start = perf_counter()\n",
-    "for k_da in karabo_da:\n",
-    "    pdu = None\n",
-    "    retrieved_constants[k_da] = dict()\n",
-    "    const_mdata = retrieved_constants[k_da][\"constants\"] = dict()\n",
-    "    for calibrations, condition in [\n",
-    "        (dark_calibrations, dark_condition),\n",
-    "        (illuminated_calibrations, illuminated_condition)\n",
-    "    ]:\n",
-    "        resp = CalibrationConstantVersion.get_closest_by_time_by_detector_conditions(\n",
-    "            client, karabo_id, list(calibrations.keys()),\n",
-    "            {'parameters_conditions_attributes': condition},\n",
-    "            karabo_da=k_da, event_at=creation_time.isoformat())\n",
-    "\n",
-    "        if not resp[\"success\"]:\n",
-    "            print(f\"ERROR: Constants {list(calibrations.values())} \"\n",
-    "            f\"were not retrieved, {resp['app_info']}\")\n",
-    "            for cname in calibrations.values():\n",
-    "                const_mdata[cname] = dict()\n",
-    "                const_mdata[cname][\"file-path\"] = None\n",
-    "                const_mdata[cname][\"dataset-name\"] = None\n",
-    "                const_mdata[cname][\"creation-time\"] = None     \n",
-    "            continue\n",
-    "\n",
-    "        for ccv in resp[\"data\"]:\n",
-    "            cc = ccv['calibration_constant']\n",
-    "            cname = calibrations[cc['calibration_id']]\n",
-    "            const_mdata[cname] = dict()\n",
-    "            const_mdata[cname][\"file-path\"] = str(Path(ccv['path_to_file']) / ccv['file_name'])\n",
-    "            const_mdata[cname][\"dataset-name\"] = ccv['data_set_name']\n",
-    "            const_mdata[cname][\"creation-time\"] = ccv['begin_at']\n",
-    "            pdu = ccv['physical_detector_unit']['physical_name']\n",
-    "\n",
-    "        print('.', end='', flush=True)\n",
-    "    retrieved_constants[k_da][\"physical-detector-unit\"] = pdu\n",
-    "metadata.save()\n",
-    "\n",
-    "total_time = perf_counter() - start\n",
-    "print(f'{total_time:.1f}s')\n",
-    "print(f\"Stored retrieved constants in {metadata.filename}\")"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3.8.11 ('.cal4_venv')",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.8.11"
-  },
-  "orig_nbformat": 4,
-  "vscode": {
-   "interpreter": {
-    "hash": "ccde353e8822f411c1c49844e1cbe3edf63293a69efd975d1b44f5e852832668"
-   }
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 2
-}
diff --git a/notebooks/LPD/playground/Investigate_Single_Photons.ipynb b/notebooks/LPD/playground/Investigate_Single_Photons.ipynb
index f6151dbc6cde414490116c35342b43e4cb9d83bd..14fa239a7cf690bfebb44c11b8b8a634bf5076ca 100644
--- a/notebooks/LPD/playground/Investigate_Single_Photons.ipynb
+++ b/notebooks/LPD/playground/Investigate_Single_Photons.ipynb
@@ -208,7 +208,7 @@
      "traceback": [
       "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
       "\u001b[0;31mAgain\u001b[0m                                     Traceback (most recent call last)",
-      "\u001b[0;32m<ipython-input-144-cccdcd794c38>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m     11\u001b[0m                                                 capacitor=5),\n\u001b[1;32m     12\u001b[0m                                             \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mones\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m256\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m256\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m512\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m2\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 13\u001b[0;31m                                             \"tcp://max-exfl016:8021\", timeout=30000000)\n\u001b[0m\u001b[1;32m     14\u001b[0m     \u001b[0mrel_gains\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mslopesCI\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m...\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     15\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
+      "\u001b[0;32m<ipython-input-144-cccdcd794c38>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m     11\u001b[0m                                                 capacitor=5),\n\u001b[1;32m     12\u001b[0m                                             \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mones\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m256\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m256\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m512\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m2\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 13\u001b[0;31m                                             \"tcp://max-exfl-cal001:8021\", timeout=30000000)\n\u001b[0m\u001b[1;32m     14\u001b[0m     \u001b[0mrel_gains\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mslopesCI\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m...\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     15\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
       "\u001b[0;32m/home/haufs/pycalibrate_tmp/cal_tools/cal_tools/tools.py\u001b[0m in \u001b[0;36mget_constant_from_db\u001b[0;34m(device, constant, condition, empty_constant, cal_db_interface, creation_time, print_once, timeout)\u001b[0m\n\u001b[1;32m    305\u001b[0m         \u001b[0;32mif\u001b[0m \u001b[0mcreation_time\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    306\u001b[0m             \u001b[0mmetadata\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcalibration_constant_version\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mVersions\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mNow\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdevice\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mdevice\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 307\u001b[0;31m             \u001b[0mmetadata\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mretrieve\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcal_db_interface\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    308\u001b[0m         \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    309\u001b[0m             metadata.calibration_constant_version = Versions.Timespan(device=device,\n",
       "\u001b[0;32m/home/haufs/calkarabo/karabo/extern/lib/python3.4/site-packages/iCalibrationDB/meta_data.py\u001b[0m in \u001b[0;36mretrieve\u001b[0;34m(self, receiver, when, silent, timeout)\u001b[0m\n\u001b[1;32m    264\u001b[0m             \u001b[0msock\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mconnect\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mreceiver\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    265\u001b[0m             \u001b[0msock\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msend_pyobj\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 266\u001b[0;31m             \u001b[0mresp\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msock\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrecv_pyobj\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    267\u001b[0m         \u001b[0;32mfinally\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    268\u001b[0m             \u001b[0;32mdel\u001b[0m \u001b[0msock\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
       "\u001b[0;32m/home/haufs/calkarabo/karabo/extern/lib/python3.4/site-packages/zmq/sugar/socket.py\u001b[0m in \u001b[0;36mrecv_pyobj\u001b[0;34m(self, flags)\u001b[0m\n\u001b[1;32m    489\u001b[0m             \u001b[0mThe\u001b[0m \u001b[0mPython\u001b[0m \u001b[0mobject\u001b[0m \u001b[0mthat\u001b[0m \u001b[0marrives\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0ma\u001b[0m \u001b[0mmessage\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    490\u001b[0m         \"\"\"\n\u001b[0;32m--> 491\u001b[0;31m         \u001b[0mmsg\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrecv\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mflags\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    492\u001b[0m         \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_deserialize\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmsg\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpickle\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mloads\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    493\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
@@ -234,7 +234,7 @@
     "                                                bias_voltage=250,\n",
     "                                                capacitor=5),\n",
     "                                            np.ones((256, 256, 512, 2)),\n",
-    "                                            \"tcp://max-exfl016:8021\", timeout=30000000)\n",
+    "                                            \"tcp://max-exfl-cal001:8021\", timeout=30000000)\n",
     "    rel_gains = slopesCI[..., 0]\n",
     "    \n",
     "    flat_fields = np.squeeze(\n",
@@ -248,7 +248,7 @@
     "                                                        beam_energy=None,\n",
     "                                                        capacitor=5),  # noqa\n",
     "                             np.ones((256, 256)),\n",
-    "                             \"tcp://max-exfl016:8021\", timeout=30000000))\n",
+    "                             \"tcp://max-exfl-cal001:8021\", timeout=30000000))\n",
     "    \n",
     "    for tile in tiles:\n",
     "        tx0, ty0 = 256-(tile//8+1)* tile_y, 256 - (tile%8+1) * tile_x\n",
diff --git a/notebooks/LPDMini/LPD_Mini_Char_Darks_NBC.ipynb b/notebooks/LPDMini/LPD_Mini_Char_Darks_NBC.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..a875f52a4c458590bbaae4c4fe3d08035628eab1
--- /dev/null
+++ b/notebooks/LPDMini/LPD_Mini_Char_Darks_NBC.ipynb
@@ -0,0 +1,1188 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# LPD Mini Offset, Noise and Dead Pixels Characterization #\n",
+    "\n",
+    "Author: M. Karnevskiy, S. Hauf\n",
+    "\n",
+    "This notebook performs re-characterize of dark images to derive offset, noise and bad-pixel maps. All three types of constants are evaluated per-pixel and per-memory cell.\n",
+    "\n",
+    "The notebook will correctly handle veto settings, but note that if you veto cells you will not be able to use these offsets for runs with different veto settings - vetoed cells will have zero offset.\n",
+    "\n",
+    "The evaluated calibration constants are stored locally and injected in the calibration data base.\n",
+    "\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "in_folder = \"/gpfs/exfel/exp/FXE/202330/p900319/raw/\" # path to input data, required\n",
+    "out_folder = \"/gpfs/exfel/data/scratch/kluyvert/darks-lpdmini\" # path to output to, required\n",
+    "metadata_folder = \"\"  # Directory containing calibration_metadata.yml when run by xfel-calibrate\n",
+    "run_high = 3 # run number in which high gain data was recorded, required\n",
+    "run_med = 4 # run number in which medium gain data was recorded, required\n",
+    "run_low = 5 # run number in which low gain data was recorded, required\n",
+    "\n",
+    "karabo_id = \"FXE_DET_LPD_MINI\" # karabo karabo_id\n",
+    "karabo_da = ['']  # a list of data aggregators names with module number, e.g. 'LPDMINI00/2', Default [''] for selecting all data aggregators\n",
+    "source_name = \"{}/DET/0CH0:xtdf\"  # Source name for raw detector data\n",
+    "control_source_name = \"{}/FPGA/FEM\"  # Source name for control device\n",
+    "\n",
+    "creation_time = \"\"  # Override the timestamp taken from the run (format '2023-03-30 15:04:31')\n",
+    "cal_db_interface = \"tcp://max-exfl-cal001:8015#8025\" # the database interface to use\n",
+    "cal_db_timeout = 300000 # timeout on caldb requests\n",
+    "local_output = True # output constants locally\n",
+    "db_output = False # output constants to database\n",
+    "\n",
+    "capacitor_setting = 5 # capacitor_setting for which data was taken \n",
+    "bias_voltage_0 = -1 # bias voltage for minis 1, 3, 5, 7; Setting -1 will read the value from files\n",
+    "bias_voltage_1 = -1 # bias voltage for minis 2, 4, 6, 8; Setting -1 will read the value from files\n",
+    "thresholds_offset_sigma = 3. # bad pixel relative threshold in terms of n sigma offset\n",
+    "thresholds_offset_hard = [400, 1500] # bad pixel hard threshold\n",
+    "thresholds_noise_sigma = 7. # bad pixel relative threshold in terms of n sigma noise\n",
+    "thresholds_noise_hard = [1, 35] # bad pixel hard threshold\n",
+    "skip_first_ntrains = 10 # Number of first trains to skip\n",
+    "\n",
+    "ntrains = 500 # number of trains to use\n",
+    "min_trains = 370  # minimum number of trains needed for each gain stage\n",
+    "high_res_badpix_3d = False # plot bad-pixel summary in high resolution\n",
+    "test_for_normality = False # permorm normality test\n",
+    "inject_cell_order = 'auto'  # Include memory cell order as part of the detector condition (auto = only if cells wrap around)\n",
+    "operation_mode = \"\"  # This in given as a default input argument through the webservice in production. Don't remove!"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import multiprocessing\n",
+    "import os\n",
+    "import warnings\n",
+    "\n",
+    "warnings.filterwarnings('ignore')\n",
+    "\n",
+    "import matplotlib\n",
+    "import pasha as psh\n",
+    "import scipy.stats\n",
+    "from IPython.display import Latex, Markdown, display\n",
+    "\n",
+    "matplotlib.use(\"agg\")\n",
+    "import matplotlib.pyplot as plt\n",
+    "\n",
+    "%matplotlib inline\n",
+    "import numpy as np\n",
+    "import tabulate\n",
+    "import yaml\n",
+    "from extra_data import RunDirectory\n",
+    "from iCalibrationDB import Conditions, Constants\n",
+    "from XFELDetAna.plotting.heatmap import heatmapPlot\n",
+    "from XFELDetAna.plotting.simpleplot import simplePlot\n",
+    "\n",
+    "from cal_tools.calcat_interface import CalCatApi\n",
+    "from cal_tools.enums import BadPixels\n",
+    "from cal_tools.lpdlib import make_cell_order_condition\n",
+    "from cal_tools.plotting import plot_badpix_3d\n",
+    "from cal_tools.restful_config import calibration_client\n",
+    "from cal_tools.tools import (\n",
+    "    calcat_creation_time,\n",
+    "    get_from_db,\n",
+    "    get_report,\n",
+    "    run_prop_seq_from_path,\n",
+    "    save_const_to_h5,\n",
+    "    send_to_db,\n",
+    ")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "mem_cells = 512\n",
+    "gain_names = ['High', 'Medium', 'Low']\n",
+    "const_shape = (mem_cells, 32, 256, 3)  # cells, slow_scan, fast_scan, gain\n",
+    "    \n",
+    "gain_runs = {}\n",
+    "if capacitor_setting == 5:\n",
+    "    gain_runs[\"high_5pf\"] = run_high\n",
+    "    gain_runs[\"med_5pf\"] =  run_med\n",
+    "    gain_runs[\"low_5pf\"] =  run_low\n",
+    "elif capacitor_setting == 50:\n",
+    "    gain_runs[\"high_50pf\"] = run_high\n",
+    "    gain_runs[\"med_50pf\"] =  run_med\n",
+    "    gain_runs[\"low_50pf\"] =  run_low\n",
+    "\n",
+    "capacitor_settings = [capacitor_setting]\n",
+    "capacitor_settings = ['{}pf'.format(c) for c in capacitor_settings]\n",
+    "\n",
+    "creation_time = calcat_creation_time(in_folder, run_high, creation_time)\n",
+    "print(f\"Using {creation_time} as creation time\")\n",
+    "\n",
+    "source_name = source_name.format(karabo_id)\n",
+    "control_source_name = control_source_name.format(karabo_id)\n",
+    "\n",
+    "if -1 in {bias_voltage_0, bias_voltage_1}:\n",
+    "    run_data = RunDirectory(os.path.join(in_folder, f\"r{run_high:04d}\"))\n",
+    "    if bias_voltage_0 == -1:\n",
+    "        bias_voltage_0 = run_data[control_source_name, 'sensorBiasVoltage0'].as_single_value(atol=5.)\n",
+    "    if bias_voltage_1 == -1:\n",
+    "        bias_voltage_1 = run_data[control_source_name, 'sensorBiasVoltage1'].as_single_value(atol=5.)\n",
+    "\n",
+    "run, prop, seq = run_prop_seq_from_path(in_folder)\n",
+    "\n",
+    "display(Markdown('## Evaluated parameters'))\n",
+    "print(f'CalDB Interface {cal_db_interface}')\n",
+    "print(f\"Proposal: {prop}\")\n",
+    "print(f\"Memory cells: {mem_cells}\")\n",
+    "print(f\"Runs: {run_high}, {run_med}, {run_low}\")\n",
+    "print(f\"Using DB: {db_output}\")\n",
+    "print(f\"Input: {in_folder}\")\n",
+    "print(f\"Output: {out_folder}\")\n",
+    "print(f\"Bias voltage: {bias_voltage_0}V & {bias_voltage_1}V\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "calcat = CalCatApi(client=calibration_client())\n",
+    "detector_id = calcat.detector(karabo_id)['id']\n",
+    "pdus_by_da = calcat.physical_detector_units(detector_id, pdu_snapshot_at=creation_time)\n",
+    "pdu_name_by_da = {da: p['physical_name'] for (da, p) in pdus_by_da.items()}\n",
+    "\n",
+    "if karabo_da and karabo_da != ['']:\n",
+    "    karabo_da = [da for da in karabo_da if da in pdu_name_by_da]\n",
+    "    pdu_name_by_da = {da: pdu_name_by_da[da] for da in karabo_da}\n",
+    "else:\n",
+    "    karabo_da = sorted(pdu_name_by_da.keys())\n",
+    "    \n",
+    "modules = [int(x.split('/')[-1]) for x in karabo_da]"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "print(\"Minis in use (1-8) and PDUs:\")\n",
+    "for mod_num, karabo_da_m in zip(modules, karabo_da):\n",
+    "    print(f\"Mini {mod_num} -> {pdu_name_by_da[karabo_da_m]}\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Data processing"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "parallel_num_procs = min(6, len(modules)*3)\n",
+    "parallel_num_threads = multiprocessing.cpu_count() // parallel_num_procs\n",
+    "\n",
+    "# the actual characterization\n",
+    "def characterize_detector(run_path, gg):\n",
+    "\n",
+    "    run = RunDirectory(run_path, parallelize=False)\n",
+    "    det_source = source_name.format(karabo_id)\n",
+    "    data = run[det_source, 'image.data'].drop_empty_trains()\n",
+    "    data = data[skip_first_ntrains : skip_first_ntrains + ntrains]\n",
+    "    cell_ids = run[det_source, 'image.cellId'].drop_empty_trains()\n",
+    "    cell_ids = cell_ids[skip_first_ntrains : skip_first_ntrains + ntrains]\n",
+    "    \n",
+    "    if len(data.train_ids) < min_trains:\n",
+    "        raise Exception(f\"Run {run_path} only contains {len(data.train_ids)} trains, but {min_trains} required\")\n",
+    "\n",
+    "    im = data.ndarray()\n",
+    "    if im.ndim > 3:\n",
+    "        im = im[:, 0]  # Drop extra dimension\n",
+    "    \n",
+    "    cellid = cell_ids.ndarray()\n",
+    "    cellid_pattern = cell_ids[0].ndarray()\n",
+    "    if cellid.ndim > 1:\n",
+    "        cellid = cellid[:, 0]\n",
+    "        cellid_pattern = cellid_pattern[:, 0]\n",
+    "\n",
+    "    # Mask off gain bits, leaving only data\n",
+    "    im &= 0b0000111111111111\n",
+    "\n",
+    "    im = im.astype(np.float32)\n",
+    "\n",
+    "    context = psh.context.ThreadContext(num_workers=parallel_num_threads)\n",
+    "    # Results here should have shape (512, [<= 256], 256)\n",
+    "    offset = context.alloc(shape=(mem_cells,) + im.shape[1:], dtype=np.float64)\n",
+    "    noise = context.alloc(like=offset)\n",
+    "    normal_test = context.alloc(like=offset)\n",
+    "\n",
+    "    def process_cell(worker_id, array_index, cc):\n",
+    "        idx = cellid == cc\n",
+    "        im_slice = im[idx]\n",
+    "        if np.any(idx):\n",
+    "            offset[cc] = np.median(im_slice, axis=0)\n",
+    "            noise[cc] = np.std(im_slice, axis=0)\n",
+    "            if test_for_normality:\n",
+    "                _, normal_test[cc] = scipy.stats.normaltest(im_slice, axis=0)\n",
+    "    context.map(process_cell, np.unique(cellid))\n",
+    "\n",
+    "    # bad pixels\n",
+    "    bp = np.zeros(offset.shape, np.uint32)\n",
+    "    # offset related bad pixels\n",
+    "    offset_mn = np.nanmedian(offset, axis=(1, 2)).reshape(-1, 1, 1)  # add some axes to make broadcasting work\n",
+    "    offset_std = np.nanstd(offset, axis=(1, 2)).reshape(-1, 1, 1)\n",
+    "\n",
+    "    bp[(offset < offset_mn-thresholds_offset_sigma*offset_std) |\n",
+    "       (offset > offset_mn+thresholds_offset_sigma*offset_std)] |= BadPixels.OFFSET_OUT_OF_THRESHOLD.value\n",
+    "    bp[(offset < thresholds_offset_hard[0]) | (\n",
+    "        offset > thresholds_offset_hard[1])] |= BadPixels.OFFSET_OUT_OF_THRESHOLD.value\n",
+    "    bp[~np.isfinite(offset)] |= BadPixels.OFFSET_NOISE_EVAL_ERROR.value\n",
+    "\n",
+    "    # noise related bad pixels\n",
+    "    noise_mn = np.nanmedian(noise, axis=(1, 2)).reshape(-1, 1, 1)  # add some axes to make broadcasting work\n",
+    "    noise_std = np.nanstd(noise, axis=(1, 2)).reshape(-1, 1, 1)\n",
+    "\n",
+    "    bp[(noise < noise_mn-thresholds_noise_sigma*noise_std) |\n",
+    "       (noise > noise_mn+thresholds_noise_sigma*noise_std)] |= BadPixels.NOISE_OUT_OF_THRESHOLD.value\n",
+    "    bp[(noise < thresholds_noise_hard[0]) | (\n",
+    "        noise > thresholds_noise_hard[1])] |= BadPixels.NOISE_OUT_OF_THRESHOLD.value\n",
+    "    bp[~np.isfinite(noise)] |= BadPixels.OFFSET_NOISE_EVAL_ERROR.value\n",
+    "\n",
+    "    idx = (cellid == cellid[0])\n",
+    "    return offset, noise, gg, bp, im[idx, 12::32, 12], normal_test, cellid_pattern"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def modno_to_slice(m):\n",
+    "    return slice((m-1) * 32, m * 32)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "offset_consts = {m: np.zeros(const_shape, dtype=np.float64) for m in modules}\n",
+    "noise_consts =  {m: np.zeros(const_shape, dtype=np.float64) for m in modules}\n",
+    "badpix_consts = {m: np.zeros(const_shape, dtype=np.uint32)  for m in modules}\n",
+    "normal_tests =  {m: np.zeros(const_shape, dtype=np.float64) for m in modules}\n",
+    "data_samples = {m: np.full((ntrains, 3), np.nan) for m in modules}  # pixel (12, 12) in first frame of each train\n",
+    "\n",
+    "# Should be the same cell order for all modules & all gain stages\n",
+    "cellid_pattern_prev = None\n",
+    "\n",
+    "gg = 0\n",
+    "inp = []\n",
+    "    \n",
+    "for gain_i, (gain, run_num) in enumerate(gain_runs.items()):\n",
+    "    run_path = os.path.join(in_folder, f\"r{run_num:04d}\")\n",
+    "    print(\"Process run: \", run_path)\n",
+    "    inp.append((run_path, gain_i))\n",
+    "\n",
+    "with multiprocessing.Pool(processes=parallel_num_procs) as pool:\n",
+    "    results = pool.starmap(characterize_detector, inp)\n",
+    "\n",
+    "for ir, r in enumerate(results):\n",
+    "    offset, noise, gg, bp, data, normal, cellid_pattern = r\n",
+    "    \n",
+    "    if cellid_pattern_prev is not None and not np.array_equal(cellid_pattern, cellid_pattern_prev):\n",
+    "        raise ValueError(\"Inconsistent cell ID pattern between gain stages\")\n",
+    "    cellid_pattern_prev = cellid_pattern\n",
+    "\n",
+    "    # Split results up by module\n",
+    "    for m in modules:\n",
+    "        mod_slice = modno_to_slice(m)\n",
+    "        offset_consts[m][..., gg] = offset[:, mod_slice]\n",
+    "        noise_consts[m][..., gg] = noise[:, mod_slice]\n",
+    "        badpix_consts[m][..., gg] = bp[:, mod_slice]\n",
+    "        data_samples[m][..., gg][:data.shape[0], ...] = data[:, m - 1]\n",
+    "        normal_tests[m][..., gg] = normal[:, mod_slice]\n",
+    "\n",
+    "    print(f\"{gain_names[gg]} gain. \"\n",
+    "          f\"Number of processed trains per cell: {data.shape[0]}.\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Read report path and create file location tuple to add with the injection\n",
+    "proposal = list(filter(None, in_folder.strip('/').split('/')))[-2]\n",
+    "file_loc = 'proposal:{} runs:{} {} {}'.format(proposal, run_low, run_med, run_high)\n",
+    "\n",
+    "report = get_report(metadata_folder)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Retrieve existing constants for comparison\n",
+    "clist = [\"Offset\", \"Noise\", \"BadPixelsDark\"]\n",
+    "old_const = {}\n",
+    "old_mdata = {}\n",
+    "\n",
+    "print('Retrieve pre-existing constants for comparison.')\n",
+    "cap = capacitor_settings[0]\n",
+    "\n",
+    "for mod_num, karabo_da_m in zip(modules, karabo_da):\n",
+    "    db_module = pdu_name_by_da[karabo_da_m]\n",
+    "    old_const[mod_num] = {}\n",
+    "    old_mdata[mod_num] = {}\n",
+    "\n",
+    "    mem_cell_order = make_cell_order_condition(inject_cell_order, cellid_pattern)\n",
+    "\n",
+    "    # mod_num is from 1 to 8, so b_v_0 applies to odd numbers\n",
+    "    bias_voltage = bias_voltage_0 if (mod_num % 2 == 1) else bias_voltage_1\n",
+    "    condition = Conditions.Dark.LPD(\n",
+    "        memory_cells=mem_cells,\n",
+    "        pixels_x=256,\n",
+    "        pixels_y=32,\n",
+    "        bias_voltage=bias_voltage,\n",
+    "        capacitor=cap,\n",
+    "        memory_cell_order=mem_cell_order,\n",
+    "    )\n",
+    "    for const in clist:\n",
+    "        constant = getattr(Constants.LPD, const)()\n",
+    "\n",
+    "        data, mdata = get_from_db(karabo_id, karabo_da_m,\n",
+    "                                  constant,\n",
+    "                                  condition, None,\n",
+    "                                  cal_db_interface,\n",
+    "                                  creation_time=creation_time,\n",
+    "                                  verbosity=2, timeout=cal_db_timeout)\n",
+    "\n",
+    "        old_const[mod_num][const] = data\n",
+    "\n",
+    "        if mdata is None or data is None:\n",
+    "            old_mdata[mod_num][const] = {\n",
+    "                \"timestamp\": \"Not found\",\n",
+    "                \"filepath\": None,\n",
+    "                \"h5path\": None\n",
+    "            }\n",
+    "        else:\n",
+    "            old_mdata[mod_num][const] = {\n",
+    "                \"timestamp\": mdata.calibration_constant_version.begin_at.isoformat(),\n",
+    "                \"filepath\": os.path.join(\n",
+    "                    mdata.calibration_constant_version.hdf5path,\n",
+    "                    mdata.calibration_constant_version.filename\n",
+    "                ),\n",
+    "                \"h5path\": mdata.calibration_constant_version.h5path,\n",
+    "            }\n",
+    "\n",
+    "    with open(f\"{out_folder}/module_metadata_{mod_num}.yml\",\"w\") as fd:\n",
+    "        yaml.safe_dump(\n",
+    "            {\n",
+    "                \"module\": mod_num,\n",
+    "                \"pdu\": db_module,\n",
+    "                \"old-constants\": old_mdata[mod_num]\n",
+    "            }, fd)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "const_types = {\n",
+    "    'Offset': offset_consts,\n",
+    "    'Noise': noise_consts,\n",
+    "    'BadPixelsDark': badpix_consts,\n",
+    "}"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Save constants in the calibration DB\n",
+    "md = None\n",
+    "cap = capacitor_settings[0]\n",
+    "for mod_num, karabo_da_m in zip(modules, karabo_da):\n",
+    "    db_module = pdu_name_by_da[karabo_da_m]\n",
+    "    print(f\"Storing constants for PDU {db_module}\")\n",
+    "\n",
+    "    mem_cell_order = make_cell_order_condition(inject_cell_order, cellid_pattern)\n",
+    "\n",
+    "    # Do not store empty constants\n",
+    "    # In case of 0 trains data_g is initiated with nans and never refilled.\n",
+    "    if np.count_nonzero(~np.isnan(data_samples[mod_num]))==0:\n",
+    "        print(f\"Constant ({cap}, {mod_num}) would be empty, skipping saving\")\n",
+    "        continue\n",
+    "\n",
+    "    for const_name, const_dict in const_types.items():\n",
+    "\n",
+    "        dconst = getattr(Constants.LPD, const_name)()\n",
+    "        dconst.data = const_dict[mod_num]\n",
+    "\n",
+    "        # mod_num is from 1 to 8, so b_v_0 applies to odd numbers\n",
+    "        bias_voltage = bias_voltage_0 if (mod_num % 2 == 1) else bias_voltage_1\n",
+    "        \n",
+    "        # set the operating condition\n",
+    "        condition = Conditions.Dark.LPD(\n",
+    "            memory_cells=mem_cells,\n",
+    "            pixels_x=256,\n",
+    "            pixels_y=32,\n",
+    "            bias_voltage=bias_voltage,\n",
+    "            capacitor=cap,\n",
+    "            memory_cell_order=mem_cell_order,\n",
+    "       )\n",
+    "\n",
+    "        if db_output:\n",
+    "            md = send_to_db(db_module, karabo_id, dconst, condition,\n",
+    "                            file_loc, report_path=report,\n",
+    "                            cal_db_interface=cal_db_interface,\n",
+    "                            creation_time=creation_time,\n",
+    "                            timeout=cal_db_timeout)\n",
+    "\n",
+    "        if local_output:\n",
+    "            md = save_const_to_h5(db_module, karabo_id, dconst, condition,\n",
+    "                                  dconst.data, file_loc, report, creation_time, out_folder)\n",
+    "            print(f\"Calibration constant {const_name} is stored locally.\\n\")\n",
+    "\n",
+    "    print(\"Constants parameter conditions are:\\n\")\n",
+    "    print(f\"• memory_cells: {mem_cells}\\n\"\n",
+    "          f\"• bias_voltage: {bias_voltage}\\n\"\n",
+    "          f\"• capacitor: {cap}\\n\"\n",
+    "          f\"• memory cell order: {mem_cell_order}\\n\"\n",
+    "          f\"• creation_time: {md.calibration_constant_version.begin_at if md is not None else creation_time}\\n\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Raw pedestal distribution ##\n",
+    "\n",
+    "Distribution of a pedestal (ADUs) over trains for the pixel (12,12), memory cell 12. A median of the distribution is shown in yellow. A standard deviation is shown in red. The green line shows average over all pixels for a given memory cell and gain stage."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "for i in modules:\n",
+    "    fig, grid = plt.subplots(3, 1, sharex=\"col\", sharey=\"row\", figsize=(10, 7))\n",
+    "    fig.suptitle(f\"Module {i}\")\n",
+    "    fig.subplots_adjust(wspace=0, hspace=0)\n",
+    "    \n",
+    "    if np.count_nonzero(~np.isnan(data_samples[i])) == 0:\n",
+    "        break\n",
+    "    for gain in range(3):\n",
+    "        data = data_samples[i][:, gain]\n",
+    "        offset = np.nanmedian(data)\n",
+    "        noise = np.nanstd(data)\n",
+    "        xrange = [np.nanmin(data_samples[i]), np.nanmax(data_samples[i])]\n",
+    "        if xrange[1] == xrange[0]:\n",
+    "            xrange = [0, xrange[0]+xrange[0]//2]\n",
+    "            nbins = data_samples[i].shape[0]\n",
+    "        else:\n",
+    "            nbins = int(xrange[1] - xrange[0])\n",
+    "\n",
+    "        hn, cn = np.histogram(data, bins=nbins, range=xrange)\n",
+    "\n",
+    "        grid[gain].hist(data, range=xrange, bins=nbins)\n",
+    "        grid[gain].plot([offset-noise, offset-noise], [0, np.nanmax(hn)], \n",
+    "                        linewidth=1.5, color='red',\n",
+    "                        label='1 $\\sigma$ deviation')\n",
+    "        grid[gain].plot([offset+noise, offset+noise],\n",
+    "                        [0, np.nanmax(hn)], linewidth=1.5, color='red')\n",
+    "        grid[gain].plot([offset, offset], [0, 0],\n",
+    "                        linewidth=1.5, color='y', label='median')\n",
+    "\n",
+    "        grid[gain].plot([np.nanmedian(offset_consts[i][:, :, 12, gain]), \n",
+    "                         np.nanmedian(offset_consts[i][:, :, 12, gain])],\n",
+    "                        [0, np.nanmax(hn)], linewidth=1.5, color='green', \n",
+    "                        label='average over pixels')\n",
+    "\n",
+    "        grid[gain].set_xlim(xrange)\n",
+    "        grid[gain].set_ylim(0, np.nanmax(hn)*1.1)\n",
+    "        grid[gain].set_xlabel(\"Offset value [ADU]\")\n",
+    "        grid[gain].set_ylabel(\"# of occurance\")\n",
+    "\n",
+    "        if gain == 0:\n",
+    "            leg = grid[gain].legend(\n",
+    "                loc='upper center', ncol=3, \n",
+    "                bbox_to_anchor=(0.1, 0.25, 0.7, 1.0))\n",
+    "\n",
+    "        grid[gain].text(xrange[0], np.nanmax(hn)*0.4,\n",
+    "                        \"{} gain\".format(gain_names[gain]), fontsize=20)\n",
+    "\n",
+    "        a = plt.axes([.125, .1, 0.775, .8], frame_on=False)\n",
+    "        a.patch.set_alpha(0.05)\n",
+    "        a.set_xlim(xrange)\n",
+    "        plt.plot([offset, offset], [0, 1], linewidth=1.5, color='y')\n",
+    "        plt.xticks([])\n",
+    "        plt.yticks([])\n",
+    "\n",
+    "    ypos = 0.9\n",
+    "    x1pos = (np.nanmedian(data_samples[i][:, 0]) +\n",
+    "             np.nanmedian(data_samples[i][:, 2]))/2.\n",
+    "    x2pos = (np.nanmedian(data_samples[i][:, 2]) +\n",
+    "             np.nanmedian(data_samples[i][:, 1]))/2.-10\n",
+    "\n",
+    "    plt.annotate(\"\", xy=(np.nanmedian(data_samples[i][:, 0]), ypos), xycoords='data',\n",
+    "                 xytext=(np.nanmedian(data_samples[i][:, 2]), ypos), textcoords='data',\n",
+    "                 arrowprops=dict(arrowstyle=\"<->\", connectionstyle=\"arc3\"))\n",
+    "\n",
+    "    plt.annotate('{}'.format(np.nanmedian(data_samples[i][:, 0])-np.nanmedian(data_samples[i][:, 2])),\n",
+    "                 xy=(x1pos, ypos), xycoords='data', xytext=(5, 5), textcoords='offset points')\n",
+    "\n",
+    "    plt.annotate(\"\", xy=(np.nanmedian(data_samples[i][:, 2]), ypos), xycoords='data',\n",
+    "                 xytext=(np.nanmedian(data_samples[i][:, 1]), ypos), textcoords='data',\n",
+    "                 arrowprops=dict(arrowstyle=\"<->\", connectionstyle=\"arc3\"))\n",
+    "\n",
+    "    plt.annotate('{}'.format(np.nanmedian(data_samples[i][:, 2])-np.nanmedian(data_samples[i][:, 1])),\n",
+    "                 xy=(x2pos, ypos), xycoords='data', xytext=(5, 5), textcoords='offset points')\n",
+    "\n",
+    "\n",
+    "    plt.show()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "scrolled": false
+   },
+   "outputs": [],
+   "source": [
+    "if not test_for_normality:\n",
+    "    print('Normality test was not requested. Flag `test_for_normality` False')\n",
+    "else:\n",
+    "    for i in modules:\n",
+    "        data = np.copy(normal_tests[i])\n",
+    "        data[badpix_consts[i] > 0] = 1.01\n",
+    "            \n",
+    "        hn,cn = np.histogram(data[:,:,:,0], bins=100)\n",
+    "       \n",
+    "        d = [{'x': np.arange(100)*0.01+0.01,\n",
+    "              'y': np.histogram(data[:,:,:,0], bins=100)[0],\n",
+    "              'drawstyle': 'steps-pre',\n",
+    "              'label' : 'High gain',\n",
+    "              },\n",
+    "             {'x': np.arange(100)*0.01+0.01,\n",
+    "              'y': np.histogram(data[:,:,:,1], bins=100)[0],\n",
+    "              'drawstyle': 'steps-pre',\n",
+    "              'label' : 'Medium gain',\n",
+    "              },\n",
+    "             {'x': np.arange(100)*0.01+0.01,\n",
+    "              'y': np.histogram(data[:,:,:,2], bins=100)[0],\n",
+    "              'drawstyle': 'steps-pre',\n",
+    "              'label' : 'Low gain',\n",
+    "              },\n",
+    "            ]\n",
+    "            \n",
+    "\n",
+    "        fig = plt.figure(figsize=(15,15), tight_layout={'pad': 0.5, 'w_pad': 0.3})\n",
+    "\n",
+    "        for gain in range(3):\n",
+    "            ax = fig.add_subplot(2, 2, 1+gain)\n",
+    "            heatmapPlot(data[:,:,12,gain], add_panels=False, cmap='viridis', figsize=(10,10),\n",
+    "                y_label='Rows', x_label='Columns',\n",
+    "                lut_label='p-Value',\n",
+    "                use_axis=ax,\n",
+    "                title='p-Value for cell 12, {} gain'.format(gain_names[gain]) )\n",
+    "            \n",
+    "        ax = fig.add_subplot(2, 2, 4)\n",
+    "        _ = simplePlot(d, #aspect=1.6, \n",
+    "                              x_label = \"p-Value\".format(gain), \n",
+    "                              y_label=\"# of occurance\",\n",
+    "                              use_axis=ax,\n",
+    "                               y_log=False, legend='outside-top-ncol3-frame', legend_pad=0.05, legend_size='5%')\n",
+    "        ax.ticklabel_format(style='sci', axis='y', scilimits=(4,6))\n",
+    "        "
+   ]
+  },
+  {
+   "cell_type": "raw",
+   "metadata": {},
+   "source": [
+    ".. raw:: latex\n",
+    "\n",
+    "    \\newpage"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Single-Cell Overviews ##\n",
+    "\n",
+    "Single cell overviews allow to identify potential effects on all memory cells, e.g. on a sensor level. Additionally, they should serve as a first sanity check on expected behaviour, e.g. if structuring on the ASIC level is visible in the offsets, but otherwise no immediate artifacts are visible."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "scrolled": false
+   },
+   "outputs": [],
+   "source": [
+    "for gain in range(3):\n",
+    "    display(\n",
+    "        Markdown('### Cell-12 overview - {} gain'.format(gain_names[gain])))\n",
+    "\n",
+    "    fig = plt.figure(figsize=(18, 22) , tight_layout={'pad': 0.1, 'w_pad': 0.1})\n",
+    "    for i in modules:\n",
+    "        for iconst, const in enumerate(['Offset', 'Noise', 'BadPixelsDark']):\n",
+    "\n",
+    "            ax = fig.add_subplot(3, 2, 1+iconst)\n",
+    "\n",
+    "            data = const_types[const][i][12, :, :, gain]\n",
+    "            vmax = 1.5 * np.nanmedian(data)\n",
+    "            title = const\n",
+    "            label = f'{const} value [ADU]'\n",
+    "            title = f'{const} value'\n",
+    "            if const == 'BadPixelsDark':\n",
+    "                vmax = 4\n",
+    "                bpix_code = data.astype(np.float32)\n",
+    "                bpix_code[bpix_code == 0] = np.nan\n",
+    "                title = 'Bad pixel code'\n",
+    "                label = title\n",
+    "\n",
+    "                cb_labels = ['1 {}'.format(BadPixels.NOISE_OUT_OF_THRESHOLD.name),\n",
+    "                             '2 {}'.format(BadPixels.OFFSET_NOISE_EVAL_ERROR.name),\n",
+    "                             '3 {}'.format(BadPixels.OFFSET_OUT_OF_THRESHOLD.name),\n",
+    "                             '4 {}'.format('MIXED')]\n",
+    "\n",
+    "                heatmapPlot(bpix_code, add_panels=False, cmap='viridis',\n",
+    "                            y_label='Rows', x_label='Columns',\n",
+    "                            lut_label='', vmax=vmax, aspect=1.,\n",
+    "                            use_axis=ax, cb_ticklabels=cb_labels, cb_ticks = np.arange(4)+1, cb_loc='bottom',\n",
+    "                            title=title)\n",
+    "                del bpix_code\n",
+    "            else:\n",
+    "\n",
+    "                heatmapPlot(data, add_panels=False, cmap='viridis',\n",
+    "                            y_label='Rows', x_label='Columns',\n",
+    "                            lut_label=label, vmax=vmax, aspect=1.,\n",
+    "                            use_axis=ax, cb_loc='bottom',\n",
+    "                            title=title)\n",
+    "\n",
+    "    for i in modules:\n",
+    "        for iconst, const in enumerate(['Offset', 'Noise']):\n",
+    "            data = const_types[const][i]\n",
+    "            dataBP = np.copy(data)\n",
+    "            dataBP[badpix_consts[i] > 0] = -1\n",
+    "\n",
+    "            x_ranges = [[0, 1500], [0, 40]]\n",
+    "            hn, cn = np.histogram(\n",
+    "                data[:, :, :, gain], bins=100, range=x_ranges[iconst])\n",
+    "            hnBP, cnBP = np.histogram(dataBP[:, :, :, gain], bins=cn)\n",
+    "\n",
+    "            d = [{'x': cn[:-1],\n",
+    "                  'y': hn,\n",
+    "                  'drawstyle': 'steps-pre',\n",
+    "                  'label': 'All data',\n",
+    "                  },\n",
+    "                 {'x': cnBP[:-1],\n",
+    "                  'y': hnBP,\n",
+    "                  'drawstyle': 'steps-pre',\n",
+    "                  'label': 'Bad pixels masked',\n",
+    "                  },\n",
+    "                 ]\n",
+    "\n",
+    "            ax = fig.add_subplot(3, 2, 5+iconst)\n",
+    "            _ = simplePlot(d, figsize=(5, 7), aspect=1,\n",
+    "                                x_label=f\"{const} value [ADU]\",\n",
+    "                                y_label=\"# of occurence\",\n",
+    "                                title='', legend_pad=0.1, legend_size='10%',\n",
+    "                                use_axis=ax,\n",
+    "                                y_log=True, legend='outside-top-2col-frame')\n",
+    "\n",
+    "    plt.show()"
+   ]
+  },
+  {
+   "cell_type": "raw",
+   "metadata": {},
+   "source": [
+    ".. raw:: latex\n",
+    "\n",
+    "    \\newpage"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": []
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "cols = {BadPixels.NOISE_OUT_OF_THRESHOLD.value: (BadPixels.NOISE_OUT_OF_THRESHOLD.name, '#FF000080'),\n",
+    "        BadPixels.OFFSET_NOISE_EVAL_ERROR.value: (BadPixels.OFFSET_NOISE_EVAL_ERROR.name, '#0000FF80'),\n",
+    "        BadPixels.OFFSET_OUT_OF_THRESHOLD.value: (BadPixels.OFFSET_OUT_OF_THRESHOLD.name, '#00FF0080'),\n",
+    "        BadPixels.OFFSET_OUT_OF_THRESHOLD.value | BadPixels.NOISE_OUT_OF_THRESHOLD.value: ('MIXED', '#DD00DD80')}\n",
+    "\n",
+    "if high_res_badpix_3d:\n",
+    "    display(Markdown(\"\"\"\n",
+    "    \n",
+    "    ## Global Bad Pixel Behaviour ##\n",
+    "\n",
+    "    The following plots shows the results of a bad pixel evaluation for all evaluated memory cells.\n",
+    "    Cells are stacked in the Z-dimension, while pixels values in x/y are re-binned with a factor of 2.\n",
+    "    This excludes single bad pixels present only in disconnected pixels.\n",
+    "    Hence, any bad pixels spanning at least 4 pixels in the x/y-plane, or across at least two memory cells are indicated.\n",
+    "    Colors encode the bad pixel type, or mixed type.\n",
+    "\n",
+    "        \"\"\"))\n",
+    "    # Switch rebin to 1 for full resolution and \n",
+    "    # no interpolation for badpixel values.\n",
+    "    rebin = 2\n",
+    "    for gain in range(3):\n",
+    "        display(Markdown('### Bad pixel behaviour - {} gain ###'.format(gain_names[gain])))\n",
+    "        for mod, data in badpix_consts.items():\n",
+    "            plot_badpix_3d(data[...,gain], cols, title='', rebin_fac=rebin)\n",
+    "            ax = plt.gca()\n",
+    "            leg = ax.get_legend()\n",
+    "            leg.set(alpha=0.5)\n",
+    "        plt.show()"
+   ]
+  },
+  {
+   "cell_type": "raw",
+   "metadata": {},
+   "source": [
+    ".. raw:: latex\n",
+    "\n",
+    "    \\newpage"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Summary across tiles ##\n",
+    "\n",
+    "Plots give an overview of calibration constants averaged across tiles. A bad pixel mask is applied. Constants are compared with pre-existing constants retrieved from the calibration database. Differences $\\Delta$ between the old and new constants is shown."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "time_summary = []\n",
+    "time_summary.append(f\"The following pre-existing constants are used for comparison:\")\n",
+    "for mod_num, mod_data in old_mdata.items():\n",
+    "    time_summary.append(f\"- Module {mod_num}\")\n",
+    "    for const, const_data in mod_data.items():\n",
+    "        time_summary.append(f\"    - {const} created at {const_data['timestamp']}\")\n",
+    "display(Markdown(\"\\n\".join(time_summary)))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "scrolled": false
+   },
+   "outputs": [],
+   "source": [
+    "for i in modules:\n",
+    "    for gain in range(3):\n",
+    "        display(Markdown('### Summary across tiles - {} gain'.format(gain_names[gain])))\n",
+    "\n",
+    "        for const in const_types:\n",
+    "            data = np.copy(const_types[const][i][:, :, :, gain])\n",
+    "\n",
+    "            label = 'Fraction of bad pixels'\n",
+    "\n",
+    "            if const != 'BadPixelsDark':\n",
+    "                data[badpix_consts[i][:, :, :, gain] > 0] = np.nan\n",
+    "                label = '{} value [ADU]'.format(const)\n",
+    "            else:\n",
+    "                data[data>0] = 1.0\n",
+    "\n",
+    "            # Split tiles, making shape (mem_cells, slow_scan, tiles(=2), fast_scan)\n",
+    "            data = data.reshape(data.shape[0], 32, 2, 128)    \n",
+    "            # Average within each tile\n",
+    "            data = np.nanmean(data, axis=(1, 3))\n",
+    "            \n",
+    "            fig = plt.figure(figsize=(15, 6))\n",
+    "            ax = fig.add_subplot(1, 2, 1)\n",
+    "\n",
+    "            _ = heatmapPlot(data[:510, :], add_panels=True,\n",
+    "                            y_label='Memory Cell ID', x_label='Tile ID',\n",
+    "                            lut_label=label, use_axis=ax,\n",
+    "                            panel_y_label=label, panel_x_label=label,\n",
+    "                            cmap='viridis',  # cb_loc='right',cb_aspect=15,\n",
+    "                            x_ticklabels=np.arange(2)+1,\n",
+    "                            x_ticks=np.arange(2)+0.5)\n",
+    "\n",
+    "            if old_const[i][const] is not None:\n",
+    "                ax = fig.add_subplot(1, 2, 2)\n",
+    "\n",
+    "                dataold = np.copy(old_const[i][const][:, :, :, gain])\n",
+    "\n",
+    "                label = '$\\Delta$ {}'.format(label)\n",
+    "\n",
+    "                if const != 'BadPixelsDark':\n",
+    "                    if old_const[i]['BadPixelsDark'] is not None:\n",
+    "                        dataold[old_const[i]['BadPixelsDark'][:, :, :, gain] > 0] = np.nan\n",
+    "                    else:\n",
+    "                        dataold[:] = np.nan\n",
+    "                else:\n",
+    "                    dataold[dataold>0]=1.0\n",
+    "\n",
+    "                dataold = dataold.reshape(dataold.shape[0], 32, 2, 128)    \n",
+    "                dataold = np.nanmean(dataold, axis=(1, 3))\n",
+    "                dataold = dataold - data\n",
+    "\n",
+    "                _ = heatmapPlot(dataold[:510, :], add_panels=True,\n",
+    "                                y_label='Memory Cell ID', x_label='Tile ID',\n",
+    "                                lut_label=label, use_axis=ax,\n",
+    "                                panel_y_label=label, panel_x_label=label,\n",
+    "                                cmap='viridis',  # cb_loc='right',cb_aspect=15,\n",
+    "                                x_ticklabels=np.arange(2)+1,\n",
+    "                                x_ticks=np.arange(2)+0.5)\n",
+    "        plt.show()"
+   ]
+  },
+  {
+   "cell_type": "raw",
+   "metadata": {},
+   "source": [
+    ".. raw:: latex\n",
+    "\n",
+    "    \\newpage"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Variation of offset and noise across Tiles and ASICs ##\n",
+    "\n",
+    "The following plots show a standard deviation $\\sigma$ of the calibration constant. The plot of standard deviations across tiles show pixels of one tile ($128 \\times 32$). Value for each pixel shows a standard deviation across 16 tiles. The standard deviation across ASICs are shown overall tiles. The plot shows pixels of one ASIC ($16 \\times 32$), where the value shows a standard deviation across all ACIS of the module."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "scrolled": false
+   },
+   "outputs": [],
+   "source": [
+    "for i in modules:\n",
+    "    for gain in range(3):\n",
+    "        display(Markdown('### Variation of offset and noise across ASICs - {} gain'.format(gain_names[gain])))\n",
+    "\n",
+    "        fig = plt.figure(figsize=(15, 6))\n",
+    "        for iconst, const in enumerate(['Offset', 'Noise']):\n",
+    "            data = np.copy(const_types[const][i][:, :, :, gain])\n",
+    "            data[badpix_consts[i][:, :, :, gain] > 0] = np.nan\n",
+    "            label = '$\\sigma$ {} [ADU]'.format(const)\n",
+    "\n",
+    "            dataA = np.nanmean(data, axis=0)  # average over cells\n",
+    "            dataA = dataA.reshape(32, 16, 16)\n",
+    "            dataA = np.nanstd(dataA, axis=1)  # average across ASICs\n",
+    "\n",
+    "            ax = fig.add_subplot(1, 2, 1+iconst)\n",
+    "            _ = heatmapPlot(dataA, add_panels=True,\n",
+    "                            y_label='rows', x_label='columns',\n",
+    "                            lut_label=label, use_axis=ax,\n",
+    "                            panel_y_label=label, panel_x_label=label,\n",
+    "                            cmap='viridis'\n",
+    "                            )\n",
+    "\n",
+    "        plt.show()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "scrolled": false
+   },
+   "outputs": [],
+   "source": [
+    "for i in modules:\n",
+    "    for gain in range(3):\n",
+    "        display(Markdown('### Variation of offset and noise across tiles - {} gain'.format(gain_names[gain])))\n",
+    "\n",
+    "        fig = plt.figure(figsize=(15, 6))\n",
+    "        for iconst, const in enumerate(['Offset', 'Noise']):\n",
+    "            data = np.copy(const_types[const][i][:, :, :, gain])\n",
+    "            data[badpix_consts[i][:, :, :, gain] > 0] = np.nan\n",
+    "            label = '$\\sigma$ {} [ADU]'.format(const)\n",
+    "\n",
+    "            dataT = data.reshape(data.shape[0], 32, 2, 128)\n",
+    "            dataT = np.nanstd(dataT, axis=2)\n",
+    "            dataT = np.nanmean(dataT, axis=0)\n",
+    "\n",
+    "            ax = fig.add_subplot(121+iconst)\n",
+    "            _ = heatmapPlot(dataT, add_panels=True,\n",
+    "                            y_label='rows', x_label='columns',\n",
+    "                            lut_label=label, use_axis=ax,\n",
+    "                            panel_y_label=label, panel_x_label=label,\n",
+    "                            cmap='viridis')\n",
+    "        plt.show()"
+   ]
+  },
+  {
+   "cell_type": "raw",
+   "metadata": {},
+   "source": [
+    ".. raw:: latex\n",
+    "\n",
+    "    \\newpage"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Aggregate values and per cell behaviour ##\n",
+    "\n",
+    "The following tables and plots give an overview of statistical aggregates for each constant, as well as per-cell behavior, averaged across pixels."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "scrolled": false
+   },
+   "outputs": [],
+   "source": [
+    "for i in modules:\n",
+    "    for gain in range(3):\n",
+    "        display(Markdown('### Mean over pixels - {} gain'.format(gain_names[gain])))\n",
+    "\n",
+    "        fig = plt.figure(figsize=(9,11))\n",
+    "\n",
+    "        for iconst, const in enumerate(const_types):\n",
+    "\n",
+    "            ax = fig.add_subplot(3, 1, 1+iconst)\n",
+    "\n",
+    "            data = const_types[const][i][:510,:,:,gain]\n",
+    "            if const == 'BadPixelsDark':\n",
+    "                data[data>0] = 1.0\n",
+    "\n",
+    "            dataBP = np.copy(data)\n",
+    "            dataBP[badpix_consts[i][:510,:,:,gain] > 0] = -10\n",
+    "\n",
+    "            data = np.nanmean(data, axis=(1, 2))\n",
+    "            dataBP = np.nanmean(dataBP, axis=(1, 2))\n",
+    "\n",
+    "            d = [{'y': data,\n",
+    "                  'x': np.arange(data.shape[0]),\n",
+    "                  'drawstyle': 'steps-mid',\n",
+    "                  'label' : 'All data'\n",
+    "                 }\n",
+    "                ]\n",
+    "\n",
+    "            if const != 'BadPixelsDark':\n",
+    "                d.append({'y': dataBP,\n",
+    "                  'x': np.arange(data.shape[0]),\n",
+    "                  'drawstyle': 'steps-mid',\n",
+    "                  'label' : 'good pixels only'\n",
+    "                 })\n",
+    "                y_title = f\"{const} value [ADU]\"\n",
+    "                title = f\"{const} value, {gain_names[gain]} gain\"\n",
+    "            else:\n",
+    "                y_title = \"Fraction of Bad Pixels\"\n",
+    "                title = f\"Fraction of Bad Pixels, {gain_names[gain]} gain\"\n",
+    "\n",
+    "            data_min = np.min([data, dataBP]) if const != 'BadPixelsDark' else np.min([data])\n",
+    "            data_max = np.max([data[20:], dataBP[20:]])\n",
+    "            data_dif = data_max - data_min\n",
+    "\n",
+    "            local_max = np.max([data[200:300], dataBP[200:300]])\n",
+    "            frac = 0.35\n",
+    "            new_max = (local_max - data_min*(1-frac))/frac\n",
+    "            new_max = np.max([data_max, new_max])\n",
+    "\n",
+    "            _ = simplePlot(d, figsize=(10,10), aspect=2, xrange=(-12, 510),\n",
+    "                              x_label = 'Memory Cell ID', \n",
+    "                              y_label=y_title, use_axis=ax,\n",
+    "                              title=title,\n",
+    "                              title_position=[0.5, 1.15],  \n",
+    "                              inset='xy-coord-right', inset_x_range=(0,20), inset_indicated=True,\n",
+    "                              inset_labeled=True, inset_coord=[0.2,0.5,0.6,0.95],\n",
+    "                                inset_lw = 1.0, y_range = [data_min-data_dif*0.05, new_max+data_dif*0.05],\n",
+    "                              y_log=False, legend='outside-top-ncol2-frame', legend_size='18%',\n",
+    "                                 legend_pad=0.00)\n",
+    "\n",
+    "            plt.tight_layout(pad=1.08, h_pad=0.35)\n",
+    "\n",
+    "        plt.show()"
+   ]
+  },
+  {
+   "cell_type": "raw",
+   "metadata": {},
+   "source": [
+    ".. raw:: latex\n",
+    "\n",
+    "    \\newpage"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Summary tables ##\n",
+    "\n",
+    "The following tables show summary information for the evaluated module. Values for currently evaluated constants are compared with values for pre-existing constants retrieved from the calibration database."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "table = []\n",
+    "bits = [BadPixels.NOISE_OUT_OF_THRESHOLD, BadPixels.OFFSET_OUT_OF_THRESHOLD, BadPixels.OFFSET_NOISE_EVAL_ERROR]\n",
+    "\n",
+    "for mod_num in modules:\n",
+    "    for gain in range(3):\n",
+    "\n",
+    "        l_data = []\n",
+    "        l_data_old = []\n",
+    "\n",
+    "        data = np.copy(badpix_consts[mod_num][:,:,:,gain])\n",
+    "        l_data.append(len(data[data>0].flatten()))\n",
+    "        for bit in bits:\n",
+    "            l_data.append(np.count_nonzero(badpix_consts[mod_num][:,:,:,gain] & bit.value))\n",
+    "\n",
+    "        if old_const[mod_num]['BadPixelsDark'] is not None:\n",
+    "            old_const[mod_num]['BadPixelsDark'] = old_const[mod_num]['BadPixelsDark'].astype(np.uint32)\n",
+    "            dataold = np.copy(old_const[mod_num]['BadPixelsDark'][:, :, :, gain])\n",
+    "            l_data_old.append(len(dataold[dataold>0].flatten()))\n",
+    "            for bit in bits:\n",
+    "                l_data_old.append(np.count_nonzero(old_const[mod_num]['BadPixelsDark'][:, :, :, gain] & bit.value))\n",
+    "\n",
+    "        l_data_name = ['All bad pixels', 'NOISE_OUT_OF_THRESHOLD', \n",
+    "                       'OFFSET_OUT_OF_THRESHOLD', 'OFFSET_NOISE_EVAL_ERROR']\n",
+    "\n",
+    "        l_threshold = ['', f'{thresholds_noise_sigma}', f'{thresholds_offset_sigma}',\n",
+    "                       f'{thresholds_offset_hard}/{thresholds_noise_hard}']\n",
+    "\n",
+    "        for i in range(len(l_data)):\n",
+    "            line = [f'{l_data_name[i]}, gain {gain_names[gain]}', l_threshold[i], l_data[i]]\n",
+    "\n",
+    "            if old_const[mod_num]['BadPixelsDark'] is not None:\n",
+    "                line += [l_data_old[i]]\n",
+    "            else:\n",
+    "                line += ['-']\n",
+    "\n",
+    "            table.append(line)\n",
+    "        table.append(['', '', '', ''])\n",
+    "\n",
+    "display(Markdown('''\n",
+    "\n",
+    "### Number of bad pixels ###\n",
+    "\n",
+    "One pixel can be bad for different reasons, therefore, the sum of all types of bad pixels can be more than the number of all bad pixels.\n",
+    "\n",
+    "'''))\n",
+    "if len(table)>0:\n",
+    "    md = display(Latex(tabulate.tabulate(table, tablefmt='latex', \n",
+    "                                     headers=[\"Pixel type\", \"Threshold\",\n",
+    "                                              \"New constant\", \"Old constant\"])))  "
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "header = ['Parameter', \n",
+    "          \"New constant\", \"Old constant \", \n",
+    "          \"New constant\", \"Old constant \", \n",
+    "          \"New constant\", \"Old constant \"]\n",
+    "\n",
+    "for const in ['Offset', 'Noise']:\n",
+    "    table = [['','High gain', 'High gain', 'Medium gain', 'Medium gain', 'Low gain', 'Low gain']]\n",
+    "    for mod_num in modules:\n",
+    "\n",
+    "        data = np.copy(const_types[const][mod_num])\n",
+    "        data[badpix_consts[mod_num] > 0] = np.nan\n",
+    "\n",
+    "        if old_const[mod_num][const] is not None and old_const[mod_num]['BadPixelsDark'] is not None :\n",
+    "            dataold = np.copy(old_const[mod_num][const])\n",
+    "            dataold[old_const[mod_num]['BadPixelsDark']>0] = np.nan\n",
+    "\n",
+    "        f_list = [np.nanmedian, np.nanmean, np.nanstd, np.nanmin, np.nanmax]\n",
+    "        n_list = ['Median', 'Mean', 'Std', 'Min', 'Max']\n",
+    "\n",
+    "        for i, f in enumerate(f_list):\n",
+    "            line = [n_list[i]]\n",
+    "            for gain in range(3):\n",
+    "                line.append('{:6.1f}'.format(f(data[...,gain])))\n",
+    "                if old_const[mod_num][const] is not None and old_const[mod_num]['BadPixelsDark'] is not None:\n",
+    "                    line.append('{:6.1f}'.format(f(dataold[...,gain])))\n",
+    "                else:\n",
+    "                    line.append('-')\n",
+    "\n",
+    "            table.append(line)\n",
+    "\n",
+    "    display(Markdown('### {} [ADU], good pixels only ###'.format(const)))\n",
+    "    md = display(Latex(tabulate.tabulate(table, tablefmt='latex', headers=header)))  "
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Offline Cal",
+   "language": "python",
+   "name": "offline-cal"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.8.10"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/notebooks/LPDMini/LPD_Mini_Correct.ipynb b/notebooks/LPDMini/LPD_Mini_Correct.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..769e5121b04947b0ecf6e376cb04c7752d720137
--- /dev/null
+++ b/notebooks/LPDMini/LPD_Mini_Correct.ipynb
@@ -0,0 +1,678 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# LPD Mini Offline Correction #\n",
+    "\n",
+    "Author: European XFEL Data Analysis Group"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2018-12-03T15:19:56.056417Z",
+     "start_time": "2018-12-03T15:19:56.003012Z"
+    }
+   },
+   "outputs": [],
+   "source": [
+    "# Input parameters\n",
+    "in_folder = \"/gpfs/exfel/exp/FXE/202321/p004576/raw/\"  # the folder to read data from, required\n",
+    "out_folder = \"/gpfs/exfel/data/scratch/kluyvert/correct-lpdmini-p4576-r48\"  # the folder to output to, required\n",
+    "metadata_folder = ''  # Directory containing calibration_metadata.yml when run by xfel-calibrate.\n",
+    "sequences = [-1]  # Sequences to correct, use [-1] for all\n",
+    "karabo_da = ['']  # Data aggregators names to correct, e.g. 'LPDMINI00/8', use [''] for all\n",
+    "run = 48  # run to process, required\n",
+    "\n",
+    "# Source parameters\n",
+    "karabo_id = 'FXE_DET_LPD_MINI'  # Karabo domain for detector.\n",
+    "input_source = '{karabo_id}/DET/0CH0:xtdf'  # Input fast data source.\n",
+    "output_source = '{karabo_id}/CORR/0CH0:output'  # Output fast data source, empty to use same as input.\n",
+    "control_source = '{karabo_id}/FPGA/FEM'  # Control source\n",
+    "\n",
+    "# CalCat parameters\n",
+    "creation_time = \"\"  # The timestamp to use with Calibration DB. Required Format: \"YYYY-MM-DD hh:mm:ss\" e.g. 2019-07-04 11:02:41\n",
+    "cal_db_interface = ''  # Not needed, compatibility with current webservice.\n",
+    "cal_db_timeout = 0  # Not needed, compatbility with current webservice.\n",
+    "cal_db_root = '/gpfs/exfel/d/cal/caldb_store'\n",
+    "\n",
+    "# Operating conditions\n",
+    "bias_voltage_0 = -1 # bias voltage for minis 1, 3, 5, 7; Setting -1 will read the value from files\n",
+    "bias_voltage_1 = -1 # bias voltage for minis 2, 4, 6, 8; Setting -1 will read the value from files\n",
+    "capacitor = '5pF'  # Capacitor setting: 5pF or 50pF\n",
+    "photon_energy = 9.3  # Photon energy in keV.\n",
+    "use_cell_order = 'auto'  # Whether to use memory cell order as a detector condition (auto = used only when memory cells wrap around)\n",
+    "\n",
+    "# Correction parameters\n",
+    "offset_corr = True  # Offset correction.\n",
+    "rel_gain = True  # Gain correction based on RelativeGain constant.\n",
+    "ff_map = True  # Gain correction based on FFMap constant.\n",
+    "gain_amp_map = True  # Gain correction based on GainAmpMap constant.\n",
+    "\n",
+    "# Output options\n",
+    "overwrite = True  # set to True if existing data should be overwritten\n",
+    "chunks_data = 1  # HDF chunk size for pixel data in number of frames.\n",
+    "chunks_ids = 32  # HDF chunk size for cellId and pulseId datasets.\n",
+    "\n",
+    "# Parallelization options\n",
+    "sequences_per_node = 1  # Sequence files to process per node\n",
+    "max_nodes = 8  # Maximum number of SLURM jobs to split correction work into\n",
+    "num_workers = 8  # Worker processes per node, 8 is safe on 768G nodes but won't work on 512G.\n",
+    "num_threads_per_worker = 32  # Number of threads per worker.\n",
+    "\n",
+    "def balance_sequences(in_folder, run, sequences, sequences_per_node, karabo_da, max_nodes):\n",
+    "    from xfel_calibrate.calibrate import balance_sequences as bs\n",
+    "    return bs(in_folder, run, sequences, sequences_per_node, karabo_da, max_nodes=max_nodes)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2018-12-03T15:19:56.990566Z",
+     "start_time": "2018-12-03T15:19:56.058378Z"
+    }
+   },
+   "outputs": [],
+   "source": [
+    "from pathlib import Path\n",
+    "from time import perf_counter\n",
+    "import re\n",
+    "import warnings\n",
+    "\n",
+    "from IPython.display import Markdown\n",
+    "import numpy as np\n",
+    "import h5py\n",
+    "\n",
+    "import matplotlib\n",
+    "matplotlib.use('agg')\n",
+    "import matplotlib.pyplot as plt\n",
+    "%matplotlib inline\n",
+    "\n",
+    "from calibration_client.modules import CalibrationConstantVersion\n",
+    "import extra_data as xd\n",
+    "import extra_geom as xg\n",
+    "import pasha as psh\n",
+    "\n",
+    "from cal_tools.calcat_interface import CalCatApi\n",
+    "from cal_tools.lpdalgs import correct_lpd_frames\n",
+    "from cal_tools.lpdlib import get_mem_cell_pattern, make_cell_order_condition\n",
+    "from cal_tools.tools import CalibrationMetadata, calcat_creation_time\n",
+    "from cal_tools.files import DataFile\n",
+    "from cal_tools.restful_config import calibration_client"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Prepare environment"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "file_re = re.compile(r'^RAW-R(\\d{4})-(\\w+\\d+)-S(\\d{5})$')  # This should probably move to cal_tools\n",
+    "\n",
+    "run_folder = Path(in_folder) / f'r{run:04d}'\n",
+    "out_folder = Path(out_folder)\n",
+    "out_folder.mkdir(exist_ok=True)\n",
+    "\n",
+    "output_source = output_source or input_source\n",
+    "\n",
+    "input_source = input_source.format(karabo_id=karabo_id)\n",
+    "output_source = output_source.format(karabo_id=karabo_id)\n",
+    "control_source = control_source.format(karabo_id=karabo_id)\n",
+    "\n",
+    "cal_db_root = Path(cal_db_root)\n",
+    "\n",
+    "creation_time = calcat_creation_time(in_folder, run, creation_time)\n",
+    "print(f'Using {creation_time.isoformat()} as creation time')\n",
+    "    \n",
+    "# Pick all sequences or those selected.\n",
+    "if not sequences or sequences == [-1]:\n",
+    "    do_sequence = lambda seq: True\n",
+    "else:\n",
+    "    do_sequence = [int(x) for x in sequences].__contains__\n",
+    "    \n",
+    "# List of detector sources.\n",
+    "det_inp_sources = [input_source.format(karabo_id=karabo_id)]\n",
+    "\n",
+    "mem_cells = 512"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "if -1 in {bias_voltage_0, bias_voltage_1}:\n",
+    "    run_data = xd.RunDirectory(Path(in_folder, f\"r{run:04d}\"))\n",
+    "    if bias_voltage_0 == -1:\n",
+    "        bias_voltage_0 = run_data[control_source, 'sensorBiasVoltage0'].as_single_value(atol=5.)\n",
+    "    if bias_voltage_1 == -1:\n",
+    "        bias_voltage_1 = run_data[control_source, 'sensorBiasVoltage1'].as_single_value(atol=5.)\n",
+    "\n",
+    "print(f\"Using bias voltages {bias_voltage_0}V & {bias_voltage_1}V\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Select data to process"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "calcat_client = calibration_client()\n",
+    "calcat = CalCatApi(client=calcat_client)\n",
+    "\n",
+    "# Look up PDUs\n",
+    "detector_id = calcat.detector(karabo_id)['id']\n",
+    "pdus_by_da = calcat.physical_detector_units(detector_id, pdu_snapshot_at=creation_time)\n",
+    "modnos_from_db = set()\n",
+    "\n",
+    "if not karabo_da or karabo_da == ['']:\n",
+    "    karabo_da = sorted(pdus_by_da.keys())\n",
+    "else:\n",
+    "    karabo_da = sorted(set(karabo_da) & pdus_by_da.keys())\n",
+    "\n",
+    "print(\"Modules to correct:\", karabo_da)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "data_to_process = []\n",
+    "data_agg_names = {kda.split('/')[0] for kda in karabo_da}\n",
+    "\n",
+    "for inp_path in run_folder.glob('RAW-*.h5'):\n",
+    "    match = file_re.match(inp_path.stem)\n",
+    "    \n",
+    "    if match[2] not in data_agg_names or not do_sequence(int(match[3])):\n",
+    "        continue\n",
+    "        \n",
+    "    outp_path = out_folder / 'CORR-R{run:04d}-{aggregator}-S{seq:05d}.h5'.format(\n",
+    "        run=int(match[1]), aggregator=match[2], seq=int(match[3]))\n",
+    "\n",
+    "    data_to_process.append((inp_path, outp_path))\n",
+    "\n",
+    "print('Files to process:')\n",
+    "for inp_path, _ in sorted(data_to_process):\n",
+    "    print(inp_path.name)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Obtain and prepare calibration constants"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "const_data = {}\n",
+    "retrieved_consts = {}  # To be recorded in YAML file\n",
+    "const_load_mp = psh.ProcessContext(num_workers=24)\n",
+    "\n",
+    "module_const_shape = (mem_cells, 32, 256, 3)  # cells, slow_scan, fast_scan, gain\n",
+    "\n",
+    "# Retrieve constants from CALCAT.\n",
+    "dark_calibrations = {\n",
+    "    14: 'BadPixelsDark'  # np.uint32\n",
+    "}\n",
+    "if offset_corr:\n",
+    "    dark_calibrations[1] = 'Offset'  # np.float32\n",
+    "\n",
+    "base_condition = [\n",
+    "    # Bias voltage added below as it differs by module\n",
+    "    dict(parameter_name='Feedback capacitor', value=capacitor),\n",
+    "    dict(parameter_name='Memory cells', value=mem_cells),\n",
+    "    dict(parameter_name='Pixels X', value=256),\n",
+    "    dict(parameter_name='Pixels Y', value=32),\n",
+    "]\n",
+    "dark_condition = base_condition.copy()\n",
+    "if use_cell_order != 'never':\n",
+    "    # Read the order of memory cells used\n",
+    "    raw_data = xd.DataCollection.from_paths([e[0] for e in data_to_process])\n",
+    "    cell_ids_pattern_s = make_cell_order_condition(\n",
+    "        use_cell_order, get_mem_cell_pattern(raw_data, det_inp_sources)\n",
+    "    )\n",
+    "    if cell_ids_pattern_s is not None:        \n",
+    "        print(\"Memory cells order:\", cell_ids_pattern_s)\n",
+    "        dark_condition.append(\n",
+    "            dict(parameter_name='Memory cell order', value=cell_ids_pattern_s),\n",
+    "        )\n",
+    "\n",
+    "illuminated_calibrations = {}\n",
+    "if rel_gain:\n",
+    "    illuminated_calibrations[44] = 'RelativeGain'  # np.float32\n",
+    "if ff_map:\n",
+    "    illuminated_calibrations[43] = 'FFMap'  # np.float32\n",
+    "    illuminated_calibrations[20] = 'BadPixelsFF'  # np.uint32\n",
+    "if gain_amp_map:\n",
+    "    illuminated_calibrations[42] = 'GainAmpMap'  # np.float32\n",
+    "\n",
+    "illuminated_condition = base_condition + [\n",
+    "    dict(parameter_name='Source Energy', value=photon_energy),\n",
+    "]"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "print('Querying calibration database', end='', flush=True)\n",
+    "start = perf_counter()\n",
+    "\n",
+    "for calibrations, condition in [\n",
+    "    (dark_calibrations, dark_condition),\n",
+    "    (illuminated_calibrations, illuminated_condition)\n",
+    "]:\n",
+    "    if not calibrations:\n",
+    "        continue\n",
+    "\n",
+    "    for karabo_da_m in karabo_da:\n",
+    "        mod_num = int(karabo_da_m.split('/')[-1])\n",
+    "        # mod_num is from 1 to 8, so b_v_0 applies to odd numbers\n",
+    "        bias_voltage = bias_voltage_0 if mod_num % 2 == 1 else bias_voltage_1\n",
+    "        condition_w_voltage = [dict(parameter_name='Sensor Bias Voltage', value=bias_voltage)] + condition\n",
+    "        \n",
+    "        resp = CalibrationConstantVersion.get_closest_by_time_by_detector_conditions(\n",
+    "            calcat_client, karabo_id, list(calibrations.keys()),\n",
+    "            {'parameters_conditions_attributes': condition_w_voltage},\n",
+    "            karabo_da=karabo_da_m, event_at=creation_time.isoformat()\n",
+    "        )\n",
+    "\n",
+    "        if not resp['success']:\n",
+    "            raise RuntimeError(resp)\n",
+    "\n",
+    "        for ccv in resp['data']:\n",
+    "            cc = ccv['calibration_constant']\n",
+    "\n",
+    "            calibration_name = calibrations[cc['calibration_id']]\n",
+    "\n",
+    "            mod_const_metadata = retrieved_consts.setdefault(karabo_da_m, {\n",
+    "                'physical-name': ccv['physical_detector_unit']['physical_name']\n",
+    "            })\n",
+    "            mod_const_metadata.setdefault('constants', {})[calibration_name] = {\n",
+    "                \"path\": str(cal_db_root / ccv['path_to_file'] / ccv['file_name']),\n",
+    "                \"dataset\": ccv['data_set_name'],\n",
+    "                \"creation-time\": ccv[\"begin_validity_at\"],\n",
+    "                \"ccv-id\": ccv[\"id\"],\n",
+    "            }\n",
+    "\n",
+    "            dtype = np.uint32 if calibration_name.startswith('BadPixels') else np.float32\n",
+    "\n",
+    "            const_data[(mod_num, calibration_name)] = dict(\n",
+    "                path=Path(ccv['path_to_file']) / ccv['file_name'],\n",
+    "                dataset=ccv['data_set_name'],\n",
+    "                data=const_load_mp.alloc(shape=module_const_shape, dtype=dtype)\n",
+    "            )\n",
+    "        print('.', end='', flush=True)\n",
+    "            \n",
+    "total_time = perf_counter() - start\n",
+    "print(f'{total_time:.1f}s')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "lines = []\n",
+    "for karabo_da_m, mod_const_metadata in retrieved_consts.items():\n",
+    "    lines.append(f\"- {karabo_da_m} ({mod_const_metadata['physical-name']})\")\n",
+    "    for const_name, d in mod_const_metadata['constants'].items():\n",
+    "        url = f\"https://in.xfel.eu/calibration/calibration_constant_versions/{d['ccv-id']}\"\n",
+    "        lines.append(f\"  - {const_name}: [{d['creation-time']}]({url})\")\n",
+    "\n",
+    "Markdown('\\n'.join(lines))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "CalibrationMetadata(metadata_folder or out_folder).add_fragment({\n",
+    "    \"retrieved-constants\": retrieved_consts\n",
+    "})"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def load_constant_dataset(wid, index, const_descr):\n",
+    "    ccv_entry = const_data[const_descr]\n",
+    "    \n",
+    "    with h5py.File(cal_db_root / ccv_entry['path'], 'r') as fp:\n",
+    "        fp[ccv_entry['dataset'] + '/data'].read_direct(ccv_entry['data'])\n",
+    "        \n",
+    "    print('.', end='', flush=True)\n",
+    "\n",
+    "print('Loading calibration data', end='', flush=True)\n",
+    "start = perf_counter()\n",
+    "const_load_mp.map(load_constant_dataset, list(const_data.keys()))\n",
+    "total_time = perf_counter() - start\n",
+    "\n",
+    "print(f'{total_time:.1f}s')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "module_nums = sorted({n for n, _ in const_data})\n",
+    "nmods = len(module_nums)\n",
+    "const_type_names = {t for _, t in const_data}\n",
+    "\n",
+    "const_shape = (mem_cells, 32 * len(module_nums), 256, 3)  # cells, slow_scan, fast_scan, gain\n",
+    "const_slices = [slice(i * 32, (i+1) * 32) for i in range(len(module_nums))]\n",
+    "raw_data_slices = [slice((n-1) * 32, n * 32) for n in module_nums]\n",
+    "\n",
+    "def _assemble_constant(arr, calibration_name):\n",
+    "    for mod_num, sl in zip(module_nums, const_slices):\n",
+    "        arr[:, sl] = const_data[mod_num, calibration_name]['data']\n",
+    "\n",
+    "offset_const = np.zeros(const_shape, dtype=np.float32)\n",
+    "if offset_corr:\n",
+    "    _assemble_constant(offset_const, 'Offset')\n",
+    "\n",
+    "mask_const = np.zeros(const_shape, dtype=np.uint32)\n",
+    "_assemble_constant(mask_const, 'BadPixelsDark')\n",
+    "\n",
+    "gain_const = np.ones(const_shape, dtype=np.float32)\n",
+    "if rel_gain:\n",
+    "    _assemble_constant(gain_const, 'RelativeGain')\n",
+    "\n",
+    "if ff_map:\n",
+    "    ff_map_gain = np.ones(const_shape, dtype=np.float32)\n",
+    "    _assemble_constant(ff_map_gain, 'FFMap')\n",
+    "    gain_const *= ff_map_gain\n",
+    "\n",
+    "    if 'BadPixelsFF' in const_type_names:\n",
+    "        badpix_ff = np.zeros(const_shape, dtype=np.uint32)\n",
+    "        _assemble_constant(badpix_ff, 'BadPixelsFF')\n",
+    "        mask_const |= badpix_ff\n",
+    "\n",
+    "if gain_amp_map:\n",
+    "    gain_amp_map = np.zeros(const_shape, dtype=np.float32)\n",
+    "    _assemble_constant(gain_amp_map, 'GainAmpMap')\n",
+    "    gain_const *= gain_amp_map"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def correct_file(wid, index, work):\n",
+    "    inp_path, outp_path = work\n",
+    "    \n",
+    "    start = perf_counter()\n",
+    "    dc = xd.H5File(inp_path, inc_suspect_trains=False).select('*', 'image.*', require_all=True)\n",
+    "    inp_source = dc[input_source]\n",
+    "    open_time = perf_counter() - start\n",
+    "    \n",
+    "    # Load raw data for this file.\n",
+    "    # Reshaping gets rid of the extra 1-len dimensions without\n",
+    "    # mangling the frame axis for an actual frame count of 1.\n",
+    "    start = perf_counter()\n",
+    "    in_raw = inp_source['image.data'].ndarray()\n",
+    "    if in_raw.ndim > 3:\n",
+    "        in_raw = in_raw[:, 0]\n",
+    "    in_cell = inp_source['image.cellId'].ndarray().reshape(-1)\n",
+    "    in_pulse = inp_source['image.pulseId'].ndarray().reshape(-1)\n",
+    "    read_time = perf_counter() - start\n",
+    "    \n",
+    "    # Slice modules from input data\n",
+    "    data_shape = (in_raw.shape[0], nmods * 32, 256)\n",
+    "    in_sliced = np.zeros(data_shape, dtype=in_raw.dtype)\n",
+    "    for i, sl in enumerate(raw_data_slices):\n",
+    "        in_sliced[:, i*32:(i+1)*32] = in_raw[..., sl, :]\n",
+    "        \n",
+    "    output_shape = (data_shape[0], nmods, 32, 256)\n",
+    "    \n",
+    "    # Allocate output arrays.\n",
+    "    out_data = np.zeros(in_sliced.shape, dtype=np.float32)\n",
+    "    out_gain = np.zeros(in_sliced.shape, dtype=np.uint8)\n",
+    "    out_mask = np.zeros(in_sliced.shape, dtype=np.uint32)\n",
+    "            \n",
+    "    start = perf_counter()\n",
+    "    correct_lpd_frames(in_sliced, in_cell,\n",
+    "                       out_data, out_gain, out_mask,\n",
+    "                       offset_const, gain_const, mask_const,\n",
+    "                       num_threads=num_threads_per_worker)\n",
+    "    correct_time = perf_counter() - start\n",
+    "    \n",
+    "    image_counts = inp_source['image.data'].data_counts(labelled=False)\n",
+    "    \n",
+    "    start = perf_counter()\n",
+    "    if (not outp_path.exists() or overwrite) and image_counts.sum() > 0:\n",
+    "        with DataFile(outp_path, 'w') as outp_file:            \n",
+    "            outp_file.create_index(dc.train_ids, from_file=dc.files[0])\n",
+    "            outp_file.create_metadata(like=dc, instrument_channels=(f'{output_source}/image',))\n",
+    "            \n",
+    "            outp_source = outp_file.create_instrument_source(output_source)\n",
+    "            \n",
+    "            outp_source.create_index(image=image_counts)\n",
+    "            outp_source.create_key('image.cellId', data=in_cell,\n",
+    "                                   chunks=(min(chunks_ids, in_cell.shape[0]),))\n",
+    "            outp_source.create_key('image.pulseId', data=in_pulse,\n",
+    "                                   chunks=(min(chunks_ids, in_pulse.shape[0]),))\n",
+    "            outp_source.create_key('image.data', data=out_data.reshape(output_shape),\n",
+    "                                   chunks=(min(chunks_data, out_data.shape[0]), 1, 32, 256))\n",
+    "            outp_source.create_compressed_key('image.gain', data=out_gain.reshape(output_shape))\n",
+    "            outp_source.create_compressed_key('image.mask', data=out_mask.reshape(output_shape))\n",
+    "\n",
+    "    write_time = perf_counter() - start\n",
+    "    \n",
+    "    total_time = open_time + read_time + correct_time + write_time\n",
+    "    frame_rate = in_raw.shape[0] / total_time\n",
+    "    \n",
+    "    m = file_re.match(inp_path.stem)\n",
+    "    seq = int(m[3]) if m else -1\n",
+    "    \n",
+    "    print('{}\\t{}\\t{:.3f}\\t{:.3f}\\t{:.3f}\\t{:.3f}\\t{:.3f}\\t{}\\t{:.1f}'.format(\n",
+    "        wid, seq, open_time, read_time, correct_time, write_time, total_time,\n",
+    "        in_raw.shape[0], frame_rate))\n",
+    "    \n",
+    "    in_raw = None\n",
+    "    in_cell = None\n",
+    "    in_pulse = None\n",
+    "    out_data = None\n",
+    "    out_gain = None\n",
+    "    out_mask = None\n",
+    "\n",
+    "print('worker\\tseq\\topen\\tread\\tcorrect\\twrite\\ttotal\\tframes\\trate')\n",
+    "start = perf_counter()\n",
+    "psh.ProcessContext(num_workers=num_workers).map(correct_file, data_to_process)\n",
+    "total_time = perf_counter() - start\n",
+    "print(f'Total time: {total_time:.1f}s')"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Data preview for first train"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# This geometry is arbitrary, we just want to show all the modules\n",
+    "geom = xg.LPD_MiniGeometry.from_module_positions(\n",
+    "    [(0, i * 40) for i in range(nmods)]\n",
+    ")\n",
+    "\n",
+    "output_paths = [outp_path for _, outp_path in data_to_process if outp_path.exists()]\n",
+    "dc = xd.H5File(sorted(output_paths)[0]).select_trains(np.s_[0])\n",
+    "\n",
+    "det = dc[output_source.format(karabo_id=karabo_id)]\n",
+    "data = det['image.data'].ndarray()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Intensity histogram across all cells"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "left_edge_ratio = 0.01\n",
+    "right_edge_ratio = 0.99\n",
+    "\n",
+    "fig, ax = plt.subplots(num=1, clear=True, figsize=(15, 6))\n",
+    "values, bins, _ = ax.hist(np.ravel(data), bins=2000, range=(-1500, 2000))\n",
+    "\n",
+    "def find_nearest_index(array, value):\n",
+    "    return (np.abs(array - value)).argmin()\n",
+    "\n",
+    "cum_values = np.cumsum(values)\n",
+    "vmin = bins[find_nearest_index(cum_values, cum_values[-1]*left_edge_ratio)]\n",
+    "vmax = bins[find_nearest_index(cum_values, cum_values[-1]*right_edge_ratio)]\n",
+    "\n",
+    "max_value = values.max()\n",
+    "ax.vlines([vmin, vmax], 0, max_value, color='red', linewidth=5, alpha=0.2)\n",
+    "ax.text(vmin, max_value, f'{left_edge_ratio*100:.0f}%',\n",
+    "        color='red', ha='center', va='bottom', size='large')\n",
+    "ax.text(vmax, max_value, f'{right_edge_ratio*100:.0f}%',\n",
+    "        color='red', ha='center', va='bottom', size='large')\n",
+    "ax.text(vmax+(vmax-vmin)*0.01, max_value/2, 'Colormap interval',\n",
+    "        color='red', rotation=90, ha='left', va='center', size='x-large')\n",
+    "\n",
+    "ax.set_xlim(vmin-(vmax-vmin)*0.1, vmax+(vmax-vmin)*0.1)\n",
+    "ax.set_ylim(0, max_value*1.1)\n",
+    "pass"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### First memory cell"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "fig, ax = plt.subplots(num=2, figsize=(15, 15), clear=True, nrows=1, ncols=1)\n",
+    "geom.plot_data_fast(data[0], ax=ax, vmin=vmin, vmax=vmax)\n",
+    "pass"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Train average"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2018-11-13T18:24:57.547563Z",
+     "start_time": "2018-11-13T18:24:56.995005Z"
+    },
+    "scrolled": false
+   },
+   "outputs": [],
+   "source": [
+    "fig, ax = plt.subplots(num=3, figsize=(15, 15), clear=True, nrows=1, ncols=1)\n",
+    "geom.plot_data_fast(data.mean(axis=0), ax=ax, vmin=vmin, vmax=vmax)\n",
+    "pass"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Lowest gain stage per pixel"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "highest_gain_stage = det['image.gain'].ndarray().max(axis=0)\n",
+    "\n",
+    "fig, ax = plt.subplots(num=4, figsize=(15, 15), clear=True, nrows=1, ncols=1)\n",
+    "p = geom.plot_data_fast(highest_gain_stage, ax=ax, vmin=0, vmax=2);\n",
+    "\n",
+    "cb = ax.images[0].colorbar\n",
+    "cb.set_ticks([0, 1, 2])\n",
+    "cb.set_ticklabels(['High gain', 'Medium gain', 'Low gain'])"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Offline Cal",
+   "language": "python",
+   "name": "offline-cal"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.8.10"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/notebooks/LPDMini/LPD_Mini_Inject_calibration_constants_from_h5files.ipynb b/notebooks/LPDMini/LPD_Mini_Inject_calibration_constants_from_h5files.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..36284a0f74f4f12c306c98d6d3066b07bba4c1fb
--- /dev/null
+++ b/notebooks/LPDMini/LPD_Mini_Inject_calibration_constants_from_h5files.ipynb
@@ -0,0 +1,256 @@
+{
+ "cells": [
+  {
+   "attachments": {},
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Injecting LPD-Mini calibration constant data to the database\n",
+    "\n",
+    "Author: European XFEL Detector Group, Version: 1.0\n",
+    "\n",
+    "This notebook is used to read HDF5 files with LPDMINI constants to inject into the CALCAT database.\n",
+    "\n",
+    "The notebook expects an explicit filename format: <calibration_constant_name>_<karabo_da>.h5 for example `RelativeGain_LPDMINI00_2.h5`\n",
+    "  - <calibration_constant_name>: The calibration name as it is named in CALCAT.\n",
+    "  - <karabo_da>: The data aggregator name. As LPDMini aggregators have `/` we replace it with `_`."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# calibration constant parameters:\n",
+    "constant_names = [\"\"]  # calibration constant name, required.\n",
+    "in_folder = \"\"  # calibration constants folder, required.\n",
+    "out_folder = \"\"  # output folder to store report path in case the notebook is executed by CLI, required.\n",
+    "proposal = \"\"  # Add proposal number to be sent to the database as a part of Raw data location.\n",
+    "runs = [\"\"]  # Add list of runs to be sent to the database as a part of Raw data location.\n",
+    "\n",
+    "# detector parameters:\n",
+    "karabo_id = \"FXE_DET_LPD_MINI\"  # detector identifier.\n",
+    "karabo_da = [\"\"]  # karabo data aggregators. default \"all\" for all 8 karabo data aggregator names.\n",
+    "\n",
+    "# calibration database parameters:\n",
+    "cal_db_interface = \"tcp://max-exfl-cal001:8015#8045\"  # calibration DB zmq address.\n",
+    "\n",
+    "# calibration constant conditions:\n",
+    "memory_cells = 512  # Number of memory cells. Used for constant conditions.\n",
+    "bias_voltage_0 = 250 # bias voltage for minis 1, 3, 5, 7\n",
+    "bias_voltage_1 = 250 # bias voltage for minis 2, 4, 6, 8\n",
+    "capacitor = 5  # capacitor value. Used for constant conditions.\n",
+    "photon_energy = 9.2  # calibration constant photon energy. Used for constant conditions.\n",
+    "creation_time = '2023-05-07T15:10:07'  # creation time for the injected constants. required format '2019-01-20T14:12:06'"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import multiprocessing\n",
+    "from datetime import datetime\n",
+    "from logging import warning\n",
+    "from pathlib import Path\n",
+    "from typing import List, Tuple\n",
+    "\n",
+    "import h5py\n",
+    "from cal_tools.calcat_interface import CalCatApi\n",
+    "from cal_tools.restful_config import calibration_client\n",
+    "from cal_tools.tools import get_report, send_to_db\n",
+    "from iCalibrationDB import Conditions, Constants"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "pixels_x = 256\n",
+    "pixels_y = 32\n",
+    "\n",
+    "calcat_client = calibration_client()\n",
+    "calcat = CalCatApi(client=calcat_client)\n",
+    "\n",
+    "# Look up PDUs\n",
+    "detector_id = calcat.detector(karabo_id)['id']\n",
+    "pdus_by_da = calcat.physical_detector_units(detector_id, pdu_snapshot_at=creation_time)\n",
+    "\n",
+    "if not karabo_da or karabo_da == [\"\"]:\n",
+    "    karabo_da = sorted(pdus_by_da.keys())\n",
+    "else:\n",
+    "    karabo_da = karabo_da\n",
+    "\n",
+    "# if proposal or runs are given assign file_loc\n",
+    "# for calibration constant versions metadata.\n",
+    "file_loc = \"\"\n",
+    "if proposal:\n",
+    "    file_loc += f\"proposal:{proposal}\"\n",
+    "if runs[0] != \"\":\n",
+    "    file_loc += f\"runs: {runs}\"\n",
+    "\n",
+    "if file_loc == \"\":\n",
+    "    print(\n",
+    "        \"No proposal or runs were given for constant source.\"\n",
+    "        \" No \\\"Raw data location\\\" will be injected with the constants.\\n\"\n",
+    "    )"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def validate_input_paths(\n",
+    "    in_folder: str,\n",
+    "    creation_time: str,\n",
+    "    constant_names: List[str],\n",
+    ") -> Tuple[str, datetime]:\n",
+    "\n",
+    "    # Validate input parameters:\n",
+    "    if not (in_folder):\n",
+    "        raise ValueError(\n",
+    "            \"ERROR: \\\"in_folder\\\" is not given.\"\n",
+    "            \" Please provide the constants input folder.\"\n",
+    "        )\n",
+    "\n",
+    "    c_folder = Path(in_folder)\n",
+    "\n",
+    "    if not c_folder.is_dir():\n",
+    "        raise ValueError(\n",
+    "            f\"ERROR: in_folder {in_folder} directory doesn't exist.\"\n",
+    "        )\n",
+    "\n",
+    "    try:\n",
+    "        creation_time = datetime.strptime(creation_time, '%Y-%m-%dT%H:%M:%S')\n",
+    "    except ValueError:\n",
+    "        raise ValueError(\n",
+    "            \"Incorrect data format, \"\n",
+    "            \"should be YYYY-MM-DDTHH:MM:SS i.e. 2019-01-20T14:12:06\"\n",
+    "        )\n",
+    "\n",
+    "    for constant in constant_names:\n",
+    "        if not hasattr(Constants.LPD, constant):\n",
+    "            raise ValueError(\n",
+    "                f\"ERROR: Constant name \\\"{constant}\\\" is not a known LPD constant. \"\n",
+    "                f\"Available LPD Constants are {[c for c in dir(Constants.LPD) if c[0] != '_']}\"\n",
+    "            )\n",
+    "    return c_folder, creation_time"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def inject_constants(\n",
+    "    constant_name: str,\n",
+    "    mod_da: str,\n",
+    "    physical_unit: str,\n",
+    "):\n",
+    "    mod_num = int(mod_da.split('/')[-1])\n",
+    "    # mod_num is from 1 to 8, so b_v_0 applies to odd numbers\n",
+    "    bias_voltage = bias_voltage_0 if mod_num % 2 == 1 else bias_voltage_1\n",
+    "    # Calibration constants condition object.\n",
+    "    condition = Conditions.Illuminated.LPD(\n",
+    "        memory_cells=memory_cells,\n",
+    "        bias_voltage=bias_voltage,\n",
+    "        photon_energy=photon_energy,\n",
+    "        pixels_x=pixels_x,\n",
+    "        pixels_y=pixels_y,\n",
+    "        capacitor=capacitor,\n",
+    "        category=None,\n",
+    "    )\n",
+    "    constant = getattr(Constants.LPD, constant_name)()\n",
+    "\n",
+    "    cfile = c_folder / f\"{constant_name}_{mod_da.replace('/', '_')}.h5\"\n",
+    "\n",
+    "    if not cfile.exists():\n",
+    "        warning(f\"Constant file {cfile} doesn't exist.\\n\")\n",
+    "        return\n",
+    "\n",
+    "    # load constant data.\n",
+    "    with h5py.File(cfile, \"r\") as f:\n",
+    "        cdata = f[constant_name][()]\n",
+    "\n",
+    "    # Validate for only LPD at the moment.\n",
+    "    if not cdata.shape == (memory_cells, pixels_y, pixels_x, 3):\n",
+    "        raise ValueError(\n",
+    "            f\"ERROR: {const} constant data shape is not as expected.\"\n",
+    "            f\" {cdata.shape} != ({memory_cells}, {pixels_y}, {pixels_x}, 3).\\n\"\n",
+    "        )\n",
+    "    constant.data = cdata\n",
+    "\n",
+    "    send_to_db(\n",
+    "        db_module=physical_unit,\n",
+    "        karabo_id=karabo_id,\n",
+    "        constant=constant,\n",
+    "        condition=condition,\n",
+    "        file_loc=file_loc,\n",
+    "        report_path=report_path,\n",
+    "        cal_db_interface=cal_db_interface,\n",
+    "        creation_time=creation_time,\n",
+    "    )"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "c_folder, creation_time = validate_input_paths(\n",
+    "    in_folder,\n",
+    "    creation_time,\n",
+    "    constant_names,\n",
+    ")\n",
+    "\n",
+    "# create a report path for calibration constant versions metadata.\n",
+    "report_name = f\"No_report/LPD_{datetime.now().strftime('%Y-%m-%dT%H:%M:%S')}\"\n",
+    "report_path = get_report(\n",
+    "    out_folder=in_folder,\n",
+    "    default_path=report_name\n",
+    ")\n",
+    "\n",
+    "\n",
+    "mod_mapping = {mod: pdus_by_da[mod][\"physical_name\"] for mod in karabo_da}\n",
+    "print(\"Physical detector units retrieved are: \", mod_mapping, \"\\n\")\n",
+    "\n",
+    "inp = []\n",
+    "for const in constant_names:\n",
+    "    for k_da, pdu in mod_mapping.items():\n",
+    "        inp.append((const, k_da, pdu))\n",
+    "\n",
+    "with multiprocessing.Pool(processes=5) as pool:\n",
+    "    results = pool.starmap(inject_constants, inp)"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.6.7"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 4
+}
diff --git a/notebooks/REMI/REMI_Digitize_and_Transform.ipynb b/notebooks/REMI/REMI_Digitize_and_Transform.ipynb
index 022f851dbf4260fb0067924ec05bfe83e4db1ef5..5dae80017da0519cea7136740ac5334546911389 100644
--- a/notebooks/REMI/REMI_Digitize_and_Transform.ipynb
+++ b/notebooks/REMI/REMI_Digitize_and_Transform.ipynb
@@ -13,12 +13,6 @@
     "\n",
     "calib_config_path = '/gpfs/exfel/exp/SQS/202101/p002535/usr/config_board2+4.yaml'  # Path to correction and transform configuration\n",
     "\n",
-    "# These parameters are required by xfel-calibrate but ignored in this notebook.\n",
-    "cycle = ''  # Proposal cycle, currently not used.\n",
-    "cal_db_timeout = 0  # Calibration DB timeout, currently not used.\n",
-    "cal_db_interface = 'foo'  # Calibration DB interface, currently not used.\n",
-    "karabo_da = 'bar'  # Karabo data aggregator name, currently not used\n",
-    "\n",
     "# Output parameters.\n",
     "karabo_id = 'SQS_REMI_DLD6'  # Karabo device ID root for virtual output device.\n",
     "proposal = ''  # Proposal, leave empty for auto detection based on in_folder\n",
@@ -26,12 +20,9 @@
     "out_seq_len = 5000  # Number of trains per sequence file in output.\n",
     "det_device_id = '{karabo_id}/DET/{det_name}'  # Karabo device ID for virtual output device.\n",
     "det_output_key = 'output'  # Pipeline name for fast data output.\n",
-    "save_raw_triggers = True  # Whether to save trigger position in files.\n",
-    "save_raw_edges = True  # Whether to save digitized edge positions in files.\n",
-    "save_rec_signals = True  # Whether to save reconstructed signals (u1-w2, mcp) in files.\n",
-    "save_rec_hits = True  # Whether to save reoncstructed hits (x,y,t,m) in files.\n",
     "chunks_triggers = [500]  # HDF chunk size for triggers.\n",
     "chunks_edges = [500, 7, 50]  # HDF chunk size for edges.\n",
+    "chunks_amplitudes = [500, 7, 50]  # HDF chunk size for amplitudes.\n",
     "chunks_hits = [50, 50]  # HDF chunk size for hits.\n",
     "chunks_signals = [50, 50]  # HDF chunk size for signals.\n",
     "dataset_compression = 'gzip'  # HDF compression method.\n",
@@ -44,17 +35,38 @@
     "ppt_source = 'SQS_RR_UTC/TSYS/TIMESERVER:outputBunchPattern'\n",
     "ignore_fel = False  # Ignore any FEL entries in the PPT.\n",
     "ignore_ppl = False  # Ignore any PPL entries in the PPT.\n",
+    "trailing_trigger = False  # Add a trigger after all regular pulses with the remaining trace.\n",
     "ppl_offset = 0  # In units of the PPT.\n",
     "laser_ppt_mask = -1  # Bit mask for used laser, negative to auto-detect from instrument. \n",
-    "instrument_sase = 3\n",
-    "first_pulse_offset = 1000\n",
-    "single_pulse_length = 25000\n",
+    "instrument_sase = 3  # Which SASE we're running at for PPT decoding.\n",
+    "first_pulse_offset = 10000  # Sample position where the first pulse begins, ignored when PPT is reconstructed.\n",
+    "single_pulse_length = 25000  # How many samples if there's only one pulse.\n",
+    "pulse_start_offset = 0  # Signal offset at the start of each pulse.\n",
+    "pulse_end_offset = 0  # Signal offset at the end of each pulse.\n",
+    "\n",
+    "# PPT reconstruction parameters.\n",
+    "reconstruct_ppt = False  # Reconstruct PPT from some trigger edges.\n",
+    "trigger_edge_channel = '4_D'  # Channel to use for triggering.\n",
+    "trigger_edge_offset = 0  # Offset to apply to the first trigger edge position to compute first pulse offset.\n",
+    "fake_ppt_offset = 0  # Offset in reconstructed PPT for pulses.\n",
     "\n",
     "# Parallelization parameters.\n",
     "mp_find_triggers = 0.5  # Parallelization for finding triggers.\n",
     "mp_find_edges = 0.5  # Parallelization for digitizing analog signal.\n",
     "mt_avg_trace = 2  # Parallelization for trace averaging.\n",
-    "mp_rec_hits = 1.0  # Parallelization for hit reconstruction."
+    "mp_rec_hits = 1.0  # Parallelization for hit reconstruction.\n",
+    "\n",
+    "# DEPRECATED AND IGNORED\n",
+    "# Left for compatibility with webservice or legacy configuration.\n",
+    "cycle = ''  # Proposal cycle, passed by webservice but not used.\n",
+    "karabo_da = 'bar'  # Karabo data aggregator name, passed by webservice but not used\n",
+    "cal_db_timeout = 0  # Calibration DB timeout, passed by webservice but not used.\n",
+    "cal_db_interface = 'foo'  # Calibration DB interface, passed by webservice but not used.\n",
+    "save_raw_triggers = True  # Whether to save trigger position in files, ignored and always enabled.\n",
+    "save_raw_edges = True  # Whether to save digitized edge positions in files, ignored and always enabled.\n",
+    "save_raw_amplitudes = True  # Whether to save analog pulse amplitudes in files, ignored and always enabled.\n",
+    "save_rec_signals = True  # Whether to save reconstructed signals (u1-w2, mcp) in files, ignored and always enabled.\n",
+    "save_rec_hits = True  # Whether to save reoncstructed hits (x,y,t,m) in files, ignored and always enabled."
    ]
   },
   {
@@ -63,19 +75,25 @@
    "metadata": {},
    "outputs": [],
    "source": [
+    "from datetime import datetime\n",
+    "from logging import warning\n",
+    "from pathlib import Path\n",
+    "import re\n",
+    "\n",
     "import numpy as np\n",
     "import matplotlib.pyplot as plt\n",
     "from matplotlib.colors import LogNorm\n",
+    "from matplotlib.patches import Circle\n",
     "from threadpoolctl import threadpool_limits\n",
     "\n",
-    "import re\n",
+    "import tabulate\n",
+    "from IPython.display import Latex, Markdown, display\n",
+    "\n",
     "import h5py\n",
-    "from pathlib import Path\n",
-    "from datetime import datetime\n",
     "\n",
     "import pasha as psh\n",
     "from euxfel_bunch_pattern import indices_at_sase, indices_at_laser\n",
-    "from extra_data import RunDirectory\n",
+    "from extra_data import RunDirectory, by_id\n",
     "from extra_remi import Analysis, trigger_dt\n",
     "from extra_remi.util import timing\n",
     "from extra_remi.rd_resort import signal_dt, hit_dt\n",
@@ -89,6 +107,18 @@
     "%matplotlib inline"
    ]
   },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def finite_flattened_slice(array, slice_=np.s_[:]):\n",
+    "    \"\"\"Return flattened and finite values for a given slice.\"\"\"\n",
+    "    sliced_array = array[slice_]\n",
+    "    return sliced_array[np.isfinite(sliced_array)]"
+   ]
+  },
   {
    "cell_type": "code",
    "execution_count": null,
@@ -107,8 +137,26 @@
     "\n",
     "remi = Analysis(calib_config_path, use_hex=not quad_anode)\n",
     "\n",
+    "# Collect required sources and keys required.\n",
+    "sourcekeys = set()\n",
+    "for det_name in remi['detector'].keys():\n",
+    "    sourcekeys |= remi.get_detector_sourcekeys(det_name)\n",
+    "        \n",
+    "if not reconstruct_ppt:\n",
+    "    sourcekeys.add((ppt_source, 'data.bunchPatternTable'))\n",
+    "\n",
     "with timing('open_run'):\n",
-    "    dc = remi.prepare_dc(RunDirectory(Path(in_folder) / f'r{run:04d}', inc_suspect_trains=True))"
+    "    # Initial opening of input data.\n",
+    "    base_dc = RunDirectory(Path(in_folder) / f'r{run:04d}', inc_suspect_trains=True)\n",
+    "    \n",
+    "with timing('select_data'):\n",
+    "    # Filter down to those trains with data for all required sources.\n",
+    "    filter_run = base_dc.select(sourcekeys, require_all=True)\n",
+    "\n",
+    "# Re-select entire data collection to the trains with data.\n",
+    "dc = base_dc.select_trains(by_id[filter_run.train_ids])\n",
+    "base_dc = None\n",
+    "filter_run = None"
    ]
   },
   {
@@ -118,21 +166,110 @@
     "# Transformation parameters"
    ]
   },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Additional parameters through the user-side configuration file for analog channels and detector settings. Parameters that are deprecated and ignored, but present in the file, are excluded."
+   ]
+  },
   {
    "cell_type": "code",
    "execution_count": null,
    "metadata": {},
    "outputs": [],
    "source": [
-    "def print_leaf(leaf, indent=0):\n",
+    "def print_leaf(leaf, indent=0, ignored_keys={}):\n",
     "    for key, value in leaf.items():\n",
+    "        if key in ignored_keys:\n",
+    "            continue\n",
+    "        \n",
     "        if isinstance(value, dict):\n",
     "            print(indent * 4 * ' ' + key)\n",
-    "            print_leaf(value, indent=indent+1)\n",
+    "            print_leaf(value, indent=indent+1, ignored_keys=ignored_keys)\n",
     "        else:\n",
     "            print(indent * 4 * ' ' + f'{key}: {value}')\n",
-    "        \n",
-    "print_leaf(remi.tree)"
+    "\n",
+    "print(calib_config_path.resolve())\n",
+    "print_leaf(remi.tree, ignored_keys={'instrument', 'trigger'})"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "discr_table = []\n",
+    "\n",
+    "if quad_anode:\n",
+    "    signals = ['u1', 'u2', 'v1', 'v2', 'mcp']\n",
+    "    wire_angles = [np.pi*(3/4), np.pi*(1/4)]\n",
+    "else:\n",
+    "    signals = ['u1', 'u2', 'v1', 'v2', 'w1', 'w2', 'mcp']\n",
+    "    wire_angles = [np.pi*(3/4), np.pi*(3/4+1/3), np.pi*(3/4+2/3)]\n",
+    "    \n",
+    "N = 15\n",
+    "shifts = np.linspace(-0.4, 0.4, N)\n",
+    "\n",
+    "for det_name, cur_det in remi['detector'].items():\n",
+    "    fig = plt.figure(num=f'wiring_{det_name}', figsize=(9, 5))\n",
+    "    fig.text(0.5, 1.0, det_name, ha='center', va='top', size='xx-large')\n",
+    "    ax = fig.add_axes([0.0, 0.0, 1.0, 1.0])\n",
+    "    ax.set_axis_off()\n",
+    "\n",
+    "    ax.add_patch(Circle((0,0), 1, ec='black', fc='none', lw=2))\n",
+    "    ax.set_xlim(-1.5*(9/5), 1.5*(9/5))\n",
+    "    ax.set_ylim(-1.5, 1.5)\n",
+    "\n",
+    "    _, params = remi.get_discriminator(cur_det['channels'])\n",
+    "    discr_header = params[0].keys()\n",
+    "\n",
+    "    for channel_idx in range(len(signals)):\n",
+    "        index = cur_det['indices'].index(channel_idx)\n",
+    "        discr_table.append((det_name, signals[channel_idx],\n",
+    "                            cur_det['channels'][index],\n",
+    "                            remi['digitizer']['discriminator'],\n",
+    "                            *params[index].values()))\n",
+    "\n",
+    "    for j, start_angle in enumerate(wire_angles):\n",
+    "        x1 = np.cos(start_angle+np.pi/4)\n",
+    "        x2 = np.cos(start_angle+5*np.pi/4)\n",
+    "\n",
+    "        y1 = np.sin(start_angle+np.pi/4)\n",
+    "        y2 = np.sin(start_angle+5*np.pi/4)\n",
+    "\n",
+    "        channel = cur_det['channels'][cur_det['indices'].index(2*j)]\n",
+    "        ax.text(x1*1.2, y1*1.2, f'{signals[2*j]}\\n{channel}',\n",
+    "                c=f'C{j}', fontsize='xx-large', va='center', ha='center')\n",
+    "\n",
+    "        channel = cur_det['channels'][cur_det['indices'].index(2*j+1)]\n",
+    "        ax.text(x2*1.2, y2*1.2, f'{signals[2*j+1]}\\n{channel}',\n",
+    "                c=f'C{j}', fontsize='xx-large', va='center', ha='center')\n",
+    "\n",
+    "        for k, shift in enumerate(shifts):\n",
+    "            x1 = np.cos(start_angle+np.pi/4+shifts[k])\n",
+    "            x2 = np.cos(start_angle+5*np.pi/4+shifts[N-k-1])\n",
+    "\n",
+    "            y1 = np.sin(start_angle+np.pi/4+shifts[k])\n",
+    "            y2 = np.sin(start_angle+5*np.pi/4+shifts[N-k-1])\n",
+    "\n",
+    "            ax.plot([x1, x2], [y1, y2], c=f'C{j}')\n",
+    "\n",
+    "    mcp_angle = np.pi/6\n",
+    "    channel = cur_det['channels'][cur_det['indices'].index(6)]\n",
+    "    ax.text(1.4*np.cos(mcp_angle), 1.2*np.sin(mcp_angle), f'mcp\\n{channel}',\n",
+    "            c='k', fontsize='xx-large', va='center', ha='center')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "display(Latex(tabulate.tabulate(\n",
+    "    discr_table, tablefmt='latex', headers=['', '', '', 'discriminator', *discr_header])))"
    ]
   },
   {
@@ -142,6 +279,99 @@
     "# Pulse and trigger information"
    ]
   },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Read PPT from file or reconstruct PPT for older data"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "if reconstruct_ppt:\n",
+    "    # Take up to the first hundred trains for now.\n",
+    "    # Could be done for each train individually, but likely not necessary for now.\n",
+    "    trigger_trace = dc[remi['digitizer']['source'], remi['digitizer']['key_pattern'].format(trigger_edge_channel)] \\\n",
+    "        [:100].ndarray().mean(axis=0).astype(np.float64)\n",
+    "    trigger_trace -= trigger_trace[0]  # Use simple offset correction.\n",
+    "\n",
+    "    fake_ppt = np.zeros(2700, dtype=np.uint32)\n",
+    "    \n",
+    "    discr_func, discr_params = remi.get_discriminator([trigger_edge_channel])\n",
+    "\n",
+    "    edges = np.zeros(1000, dtype=np.float64)\n",
+    "    num_pulses = discr_func(trigger_trace, edges=edges, **discr_params[0])\n",
+    "    edges = edges[:num_pulses]\n",
+    "\n",
+    "    first_edge = edges[0]\n",
+    "    rel_edges = np.round(edges - first_edge)\n",
+    "    edge_diff = rel_edges[1] - rel_edges[0]\n",
+    "\n",
+    "    if not np.allclose(rel_edges[1:] - rel_edges[:-1], edge_diff):\n",
+    "        raise ValueError('PPT reconstruction for unstable edge intervals not supported')\n",
+    "\n",
+    "    pulse_spacing = edge_diff / (2 * remi['digitizer']['clock_factor'])  # In units of PPT\n",
+    "\n",
+    "    if not float.is_integer(pulse_spacing):\n",
+    "        raise ValueError('PPT reconstruction encountered non-integer pulse spacing')\n",
+    "\n",
+    "    pulse_spacing = int(pulse_spacing)\n",
+    "\n",
+    "    # Taken from euxfel_bunch_pattern/__init__.py\n",
+    "    from euxfel_bunch_pattern import DESTINATION_T4D, DESTINATION_T5D, PHOTON_LINE_DEFLECTION\n",
+    "    if instrument_sase == 1:\n",
+    "        flag = DESTINATION_T4D\n",
+    "    elif instrument_sase == 2:\n",
+    "        flag = DESTINATION_T5D\n",
+    "    elif instrument_sase == 3:\n",
+    "        flag = DESTINATION_T4D | PHOTON_LINE_DEFLECTION\n",
+    "\n",
+    "    first_pulse_offset = int(first_edge + trigger_edge_offset)  # Overwrite notebook argument.\n",
+    "    fake_ppt[fake_ppt_offset:fake_ppt_offset + (pulse_spacing * num_pulses):pulse_spacing] = flag\n",
+    "\n",
+    "    from pasha.functor import Functor, gen_split_slices\n",
+    "    class FakeKeyDataFunctor(Functor):\n",
+    "        \"\"\"Functor appearing KeyData-like with constant data.\n",
+    "        \n",
+    "        This functor serves a constant data row for a given number\n",
+    "        of train IDs the same way a KeyData object would.\n",
+    "        \"\"\"\n",
+    "        \n",
+    "        def __init__(self, row, train_ids):\n",
+    "            self.row = row\n",
+    "            self.train_ids = train_ids\n",
+    "        \n",
+    "        def split(self, num_workers):\n",
+    "            return gen_split_slices(len(self.train_ids), n_parts=num_workers)\n",
+    "\n",
+    "        def iterate(self, share):\n",
+    "            it = zip(range(*share.indices(len(self.train_ids))), self.train_ids)\n",
+    "\n",
+    "            for index, train_id in it:\n",
+    "                yield index, train_id, self.row\n",
+    "    \n",
+    "    ppt_data = FakeKeyDataFunctor(fake_ppt, dc.train_ids)\n",
+    "    \n",
+    "    fig, ax = plt.subplots(num='reconstructed_ppt_triggers', figsize=(9, 6), clear=True, ncols=1, nrows=1)\n",
+    "\n",
+    "    ax.set_title('Edge trigger signal')\n",
+    "    ax.plot(trigger_trace, lw=1, label=f'Mean {trigger_edge_channel} trace')\n",
+    "    ax.vlines(edges, trigger_trace.min()*1.1, trigger_trace.max()*1.1,\n",
+    "              color='red', linewidth=3, alpha=0.3, label='Edge positions')\n",
+    "    \n",
+    "    ax.set_xlabel('Samples')\n",
+    "    ax.set_ylabel('Intensity / ADU')\n",
+    "    ax.legend()\n",
+    "    \n",
+    "else:\n",
+    "    ppt_data = dc[ppt_source, 'data.bunchPatternTable']\n",
+    "    print(f'Pulse pattern entries for {(ppt_data.data_counts() > 0).sum()} trains')"
+   ]
+  },
   {
    "cell_type": "markdown",
    "metadata": {},
@@ -161,8 +391,6 @@
     "# * `pulse_offsets [int32: len(dc.train_ids)]` containing the global offset for the first pulse of each train.\n",
     "# * `num_pulses = pulse_counts.sum(axis=0)`\n",
     "\n",
-    "ppt_data = dc[ppt_source, 'data.bunchPatternTable']\n",
-    "\n",
     "def get_pulse_positions(ppt, sase, laser, ppl_offset):\n",
     "    # Combine FEL and PPL positions.\n",
     "\n",
@@ -201,6 +429,11 @@
     "\n",
     "    # Fill any missing values with the highest.\n",
     "    pulse_counts[has_ppt == False] = pulse_counts.max()\n",
+    "    \n",
+    "    if trailing_trigger:\n",
+    "        # Add a single count to every train for trailing trigger.\n",
+    "        warning('Trailing trigger active, all pulse counts are one higher than expected')\n",
+    "        pulse_counts += 1\n",
     "\n",
     "    # Compute offsets based on pulse counts.\n",
     "    pulse_offsets = np.zeros_like(pulse_counts)\n",
@@ -218,7 +451,7 @@
    },
    "outputs": [],
    "source": [
-    "fig, ax = plt.subplots(num=1, ncols=1, nrows=1, figsize=(9, 4), clear=True)\n",
+    "fig, ax = plt.subplots(num='pulse_counts', ncols=1, nrows=1, figsize=(9, 4), clear=True)\n",
     "\n",
     "ax.set_title('Pulse count')\n",
     "ax.plot(dc.train_ids, pulse_counts, lw=1)\n",
@@ -233,9 +466,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "### Find triggers\n",
-    "\n",
-    "The trigger defines the boundary of a pulse on the digitizer trace, which is stored per train."
+    "### Find triggers"
    ]
   },
   {
@@ -260,42 +491,57 @@
     "\n",
     "clock_factor = remi['digitizer']['clock_factor']\n",
     "\n",
+    "min_trace_len = min([\n",
+    "    dc[src, key].entry_shape[0] for det_name in remi['detector'].keys()\n",
+    "    for src, key in remi.get_detector_sourcekeys(det_name)\n",
+    "])\n",
+    "\n",
     "def trigger_by_ppt(worker_id, index, train_id, ppt):\n",
     "    all_pos, fel_pos, ppl_pos = get_pulse_positions(ppt, instrument_sase, laser_ppt_mask, ppl_offset)\n",
     "    num_pulses = len(all_pos)\n",
     "    \n",
-    "    if num_pulses == 0:\n",
-    "        return\n",
-    "    elif len(ppl_pos) == 0 and ppl_offset < 0:\n",
-    "        # No PPL pulses, but a negative offset is configured. This will cause\n",
-    "        # first_pulse_offset to start early and most likely miss pulses at the\n",
-    "        # end, so we correct by adding the ppl_offset to relative positions\n",
-    "        # when computing trace positions.\n",
-    "        pos_corr = abs(ppl_offset)\n",
-    "    else:\n",
-    "        pos_corr = 0\n",
-    "        \n",
+    "    if num_pulses > 0:\n",
+    "        if len(ppl_pos) == 0 and ppl_offset < 0:\n",
+    "            # No PPL pulses, but a negative offset is configured. This will cause\n",
+    "            # first_pulse_offset to start early and most likely miss pulses at the\n",
+    "            # end, so we correct by adding the ppl_offset to relative positions\n",
+    "            # when computing trace positions.\n",
+    "            pos_corr = abs(ppl_offset)\n",
+    "        else:\n",
+    "            pos_corr = 0\n",
     "\n",
-    "    rel_pos = all_pos - all_pos[0]\n",
+    "        rel_pos = all_pos - all_pos[0]\n",
     "\n",
-    "    if num_pulses > 1:\n",
-    "        pulse_len = np.unique(rel_pos[1:] - rel_pos[:-1]).min()\n",
-    "    elif num_pulses == 1:\n",
-    "        pulse_len = single_pulse_length\n",
+    "        if num_pulses > 1:\n",
+    "            pulse_len = np.unique(rel_pos[1:] - rel_pos[:-1]).min()\n",
+    "        elif num_pulses == 1:\n",
+    "            pulse_len = single_pulse_length\n",
     "\n",
-    "    start_frac = first_pulse_offset + (rel_pos + pos_corr) * 2 * clock_factor\n",
-    "    start_int = start_frac.astype(int)\n",
+    "        start_frac = first_pulse_offset + (rel_pos + pos_corr) * 2 * clock_factor\n",
+    "        start_int = start_frac.astype(int)\n",
     "\n",
-    "    pulse_offset = pulse_offsets[index]\n",
-    "    pulse_count = pulse_counts[index]\n",
+    "        train_triggers = triggers[pulse_offsets[index]:int(pulse_offsets[index]+num_pulses)]\n",
+    "        train_triggers['start'] = start_int + pulse_start_offset\n",
+    "        train_triggers['stop'] = start_int + int(pulse_len * 2 * clock_factor) - 1 + pulse_end_offset\n",
+    "        train_triggers['offset'] = start_frac - start_int\n",
+    "        train_triggers['pulse'] = all_pos.astype(np.int16)\n",
+    "        train_triggers['fel'] = [pos in fel_pos for pos in all_pos]\n",
+    "        train_triggers['ppl'] = [pos in ppl_pos for pos in all_pos]\n",
+    "        \n",
+    "        last_sample = train_triggers['stop'].max()\n",
     "        \n",
-    "    train_triggers = triggers[pulse_offset:pulse_offset+pulse_count]\n",
-    "    train_triggers['start'] = start_int\n",
-    "    train_triggers['stop'] = start_int + int(pulse_len * 2 * clock_factor) - 1\n",
-    "    train_triggers['offset'] = start_frac - start_int\n",
-    "    train_triggers['pulse'] = all_pos.astype(np.int16)\n",
-    "    train_triggers['fel'] = [pos in fel_pos for pos in all_pos]\n",
-    "    train_triggers['ppl'] = [pos in ppl_pos for pos in all_pos]\n",
+    "    else:\n",
+    "        last_sample = first_pulse_offset\n",
+    "        \n",
+    "    if trailing_trigger:\n",
+    "        # Add trailing trigger if required.\n",
+    "        trigger = triggers[int(pulse_offsets[index]+pulse_counts[index]-1)]\n",
+    "        trigger['start'] = last_sample\n",
+    "        trigger['stop'] = min_trace_len\n",
+    "        trigger['offset'] = 0.0\n",
+    "        trigger['pulse'] = -1\n",
+    "        trigger['fel'] = False\n",
+    "        trigger['ppl'] = False\n",
     "\n",
     "with timing('find_triggers'):\n",
     "    psh.map(trigger_by_ppt, ppt_data)\n",
@@ -306,11 +552,13 @@
     "    # with each train being split properly.\n",
     "    # If there's more than one delta in a single train, this likely points to a mismatch\n",
     "    # of FEL and PPL repetition rate. This is most likely not intended.\n",
-    "    \n",
+    "\n",
     "    one = np.uint64(1)  # Because np.uint64 + int = np.float64\n",
     "    pulse_deltas = set()\n",
     "\n",
-    "    for pulse_id, (offset, count) in enumerate(zip(pulse_offsets, pulse_counts)):\n",
+    "    for pulse_id, (offset, count) in enumerate(zip(\n",
+    "        pulse_offsets, pulse_counts - one if trailing_trigger else pulse_counts\n",
+    "    )):\n",
     "        deltas = triggers['pulse'][offset+one:offset+count] - triggers['pulse'][offset:offset+count-one]\n",
     "\n",
     "        if len(np.unique(deltas)) > 1:\n",
@@ -319,10 +567,8 @@
     "\n",
     "    if len(pulse_deltas) > 1:\n",
     "        delta_str = ', '.join([str(x) for x in sorted(pulse_deltas)])\n",
-    "        print(f'WARNING: Different pulse lengths (PPT: {delta_str}) encountered within single trains, '\n",
-    "              f'separated pulse spectra may split up signals!')\n",
-    "    else:\n",
-    "        print('WARNING: Different pulse lengths encountered across trains, separation may be unstable!')"
+    "        warning(f'Different pulse lengths (PPT: {delta_str}) encountered within single trains, '\n",
+    "                f'separated pulse spectra may split up signals!')"
    ]
   },
   {
@@ -331,7 +577,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "fig, (lx, rx) = plt.subplots(num=2, ncols=2, nrows=1, figsize=(9, 4), clear=True,\n",
+    "fig, (lx, rx) = plt.subplots(num='trigger_positions', ncols=2, nrows=1, figsize=(9, 4), clear=True,\n",
     "                             gridspec_kw=dict(top=0.75))\n",
     "\n",
     "# Display ~400 pulses or 10 trains, whatever is lower\n",
@@ -376,6 +622,13 @@
     "pass"
    ]
   },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "The trigger defines the boundary of each pulse on the digitizer trace acquired by train. The starting position in samples of each found trigger is shown for the first few trains in detail on the left and all trains on the right."
+   ]
+  },
   {
    "cell_type": "markdown",
    "metadata": {},
@@ -401,8 +654,7 @@
     "psh.set_default_context('processes', num_workers=remi.get_num_workers(mp_find_edges))\n",
     "threadpool_limits(limits=remi.get_num_workers(mt_avg_trace))\n",
     "\n",
-    "edges_by_det = {}\n",
-    "avg_traces_by_det = {}\n",
+    "det_data = {}\n",
     "\n",
     "for det_name, det in remi['detector'].items():\n",
     "    det_sourcekeys = remi.get_detector_sourcekeys(det_name)\n",
@@ -411,6 +663,8 @@
     "    \n",
     "    edges = psh.alloc(shape=(num_pulses, 7, det['max_hits']),\n",
     "                      dtype=np.float64, fill=np.nan)\n",
+    "    amplitudes = psh.alloc(shape=(num_pulses, 7, det['max_hits']),\n",
+    "                           dtype=np.float64, fill=np.nan)\n",
     "    avg_traces = psh.alloc_per_worker(shape=(7, trace_len), dtype=np.float64)\n",
     "    \n",
     "    def prepare_edge_worker(worker_id):\n",
@@ -420,7 +674,8 @@
     "        source_name = remi['digitizer']['source']\n",
     "        bl_start, bl_stop, _ = remi.get_baseline_limits(trace_len)\n",
     "        bl_sym = remi['digitizer']['baseline_symmetry']\n",
-    "        time_cal = remi.get_time_calibration()\n",
+    "        \n",
+    "        time_cal = 1e9 / (2 * remi['digitizer']['clock_factor'] * (1.3e9 / 288))\n",
     "        \n",
     "        traces_corr = np.empty((7, trace_len), dtype=np.float64)\n",
     "        baselines = np.empty(bl_sym, dtype=np.float64)\n",
@@ -441,23 +696,81 @@
     "\n",
     "        pulses_slice = np.s_[pulse_offsets[index]:pulse_offsets[index]+pulse_counts[index]]\n",
     "\n",
-    "        for trigger, pulse_edges in zip(triggers[pulses_slice], edges[pulses_slice]):\n",
+    "        for trigger, pulse_edges, pulse_amplitudes in zip(\n",
+    "            triggers[pulses_slice], edges[pulses_slice], amplitudes[pulses_slice]\n",
+    "        ):\n",
     "            trigger_slice = np.s_[trigger['start']:trigger['stop']]\n",
     "                                                 \n",
-    "            for trace, channel_params, channel_edges in zip(traces_corr, discr_params, pulse_edges):\n",
-    "                discr_func(trace[trigger_slice], channel_edges, **channel_params)\n",
-    "\n",
-    "            pulse_edges += trigger['offset']\n",
-    "            pulse_edges *= time_cal\n",
+    "            for trace, channel_params, channel_edges, channel_amplitudes in zip(\n",
+    "                traces_corr, discr_params, pulse_edges, pulse_amplitudes\n",
+    "            ):\n",
+    "                discr_func(trace[trigger_slice], edges=channel_edges,\n",
+    "                           amplitudes=channel_amplitudes, **channel_params)\n",
+    "\n",
+    "            if np.isfinite(pulse_edges).sum(axis=1).max() == det['max_hits']:\n",
+    "                warning(f'Maximum number of edges reached in train {train_id}, pulse: {trigger[\"pulse\"]}')\n",
     "            \n",
     "    with timing(f'find_edges, {det_name}'):\n",
     "        psh.map(find_edges, dc.select(det_sourcekeys))\n",
     "    \n",
-    "    edges_by_det[det_name] = edges\n",
-    "    avg_traces_by_det[det_name] = avg_traces.sum(axis=0) / len(dc.train_ids)\n",
+    "    if not np.isfinite(edges).any():\n",
+    "        warning(f'No edges found for {det_name}')\n",
+    "    \n",
+    "    fig, (ux, bx) = plt.subplots(num=f'digitize_result_{det_name}', ncols=1, nrows=2, figsize=(9.5, 8), clear=True,\n",
+    "                                 gridspec_kw=dict(left=0.1, right=0.98, top=0.98, bottom=0.1, hspace=0.25))\n",
     "    \n",
-    "    with np.printoptions(precision=2, suppress=True):\n",
-    "        print(edges[:5, :, :8])"
+    "    fig.text(0.02, 0.98, det_name.upper(), rotation=90, ha='left', va='top', size='x-large')\n",
+    "    \n",
+    "    max_num = 0\n",
+    "\n",
+    "    for edge_idx, edge_name in enumerate(['u1', 'u2', 'v1', 'v2', 'w1', 'w2', 'mcp']):\n",
+    "        n, _, _ = ux.hist(finite_flattened_slice(amplitudes, np.s_[:, edge_idx, :]),\n",
+    "                          bins=1000, range=(0, 4096), histtype='step', lw=1,\n",
+    "                          color=f'C{edge_idx}' if edge_idx < 6 else 'k', label=edge_name)\n",
+    "        max_num = max(max_num, n.max())\n",
+    "        \n",
+    "        cur_edges = finite_flattened_slice(edges, np.s_[:, edge_idx, :])\n",
+    "        bx.hist(cur_edges - np.floor(cur_edges), bins=500, range=(0, 1), histtype='step',\n",
+    "                lw=1, color=f'C{edge_idx}' if edge_idx < 6 else 'k', label=edge_name)\n",
+    "    \n",
+    "    ux.legend()\n",
+    "    ux.set_title('Pulse height distributions')\n",
+    "    ux.set_xlabel('Pulse height')\n",
+    "    ux.set_yscale('log')\n",
+    "    ux.set_xlim(0, 4096)\n",
+    "    ux.set_ylim(10, 1.5*max(max_num, 10))\n",
+    "    \n",
+    "    if remi['digitizer']['discriminator'] == 'cfd':\n",
+    "        ux.text(1024, 12.5, 'No pulse height feedback for constant fraction discrimination',\n",
+    "                ha='center', va='center')\n",
+    "    \n",
+    "    bx.set_title('Fractional edge distributions')\n",
+    "    bx.set_xlabel('Edge positions - ⌊edge positions⌋')\n",
+    "    bx.set_yscale('log')\n",
+    "    bx.set_xlim(-0.05, 1.2)\n",
+    "    bx.legend()\n",
+    "    \n",
+    "    # Properly offset edges to their trigger offset and convert to time.\n",
+    "    # This is not done earlier to preserve the information for plotting.\n",
+    "    edges += triggers['offset'][:, None, None]\n",
+    "    edges *= remi.get_time_calibration()\n",
+    "    \n",
+    "    det_data[det_name] = {\n",
+    "        'edges': edges,\n",
+    "        'amplitudes': amplitudes,\n",
+    "        'avg_trace': avg_traces.sum(axis=0) / len(dc.train_ids)\n",
+    "    }"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "The analog signal is digitized into discrete edges using a fast timing discriminator. The result of this operation is available in files in the `raw.triggers` dataset.\n",
+    "\n",
+    "The pulse height distribution is an integral view about the chosen digitization thresholds. For more detail, please refer to the spectral pulse height distributions further below.\n",
+    "\n",
+    "The fractional edge distribution visualizes the interpolated component of edge positions, i.e. between discrete digitizer samples. This should in general be flat, in particular a convex shape indicates poor interpolation due to too fast rise times."
    ]
   },
   {
@@ -473,13 +786,13 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "for i, det_name in enumerate(remi['detector'].keys()):\n",
-    "    fig, axs = plt.subplots(num=10+i, nrows=7, figsize=(9.5, 8), clear=True,\n",
+    "for det_name in remi['detector'].keys():\n",
+    "    fig, axs = plt.subplots(num=f'global_average_{det_name}', nrows=7, figsize=(9.5, 8), clear=True,\n",
     "                            gridspec_kw=dict(left=0.1, right=0.98, top=0.98, bottom=0.1))\n",
     "    fig.text(0.02, 0.98, det_name.upper(), rotation=90, ha='left', va='top', size='x-large')\n",
     "\n",
     "    for edge_idx, edge_name in enumerate(['u1', 'u2', 'v1', 'v2', 'w1', 'w2', 'mcp']):\n",
-    "        axs[edge_idx].plot(avg_traces_by_det[det_name][edge_idx], lw=1)\n",
+    "        axs[edge_idx].plot(det_data[det_name]['avg_trace'][edge_idx], lw=1)\n",
     "        axs[edge_idx].tick_params(labelbottom=False)\n",
     "        axs[edge_idx].set_ylabel(edge_name)\n",
     "    \n",
@@ -491,7 +804,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "### Sample for digitized traces"
+    "### Sample for found edges"
    ]
   },
   {
@@ -500,10 +813,10 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "for i, det_name in enumerate(remi['detector'].keys()):\n",
-    "    edges = edges_by_det[det_name]\n",
+    "for det_name in remi['detector'].keys():\n",
+    "    edges = det_data[det_name]['edges']\n",
     "    \n",
-    "    fig = plt.figure(num=100+i, figsize=(9.5, 8))\n",
+    "    fig = plt.figure(num=f'edge_samples_{det_name}', figsize=(9.5, 8))\n",
     "    grid = fig.add_gridspec(ncols=2, nrows=4, left=0.1, right=0.98, top=0.98, bottom=0.1)\n",
     "\n",
     "    fig.text(0.02, 0.98, det_name.upper(), rotation=90, ha='left', va='top', size='x-large')\n",
@@ -515,6 +828,7 @@
     "        \n",
     "        finite_edges = np.isfinite(edges[:, signal_idx, 0])\n",
     "        if not finite_edges.any():\n",
+    "            warning(f'No edges found for {det_name}/{signal_name}')\n",
     "            continue\n",
     "            \n",
     "        pulse_idx = np.uint64(finite_edges.nonzero()[0][0])  # Is combined with other uint64 values below.\n",
@@ -563,13 +877,14 @@
    },
    "outputs": [],
    "source": [
-    "for i, det_name in enumerate(remi['detector'].keys()):\n",
-    "    fig = plt.figure(num=20+i, figsize=(9.5, 6))\n",
+    "for det_name in remi['detector'].keys():\n",
+    "    fig = plt.figure(num=f'digitized_spectra_{det_name}', figsize=(9.5, 6))\n",
     "    \n",
-    "    edges = edges_by_det[det_name]\n",
+    "    edges = det_data[det_name]['edges']\n",
+    "    amplitudes = det_data[det_name]['amplitudes']\n",
     "    \n",
-    "    min_edge = edges[np.isfinite(edges)].min()\n",
-    "    max_edge = edges[np.isfinite(edges)].max()\n",
+    "    min_edge = np.nanmin(edges)\n",
+    "    max_edge = np.nanmax(edges)\n",
     "\n",
     "    grid = fig.add_gridspec(ncols=3, nrows=3, left=0.08, right=0.98, top=0.95, hspace=0.4)\n",
     "\n",
@@ -584,9 +899,23 @@
     "    hist_axs = []\n",
     "\n",
     "    for edge_idx, edge_name in enumerate(['u1', 'u2', 'v1', 'v2', 'w1', 'w2', 'mcp']):\n",
+    "        if edge_idx < 6:\n",
+    "            row = 1 + edge_idx % 2\n",
+    "            col = edge_idx // 2\n",
+    "        else:\n",
+    "            row = 0\n",
+    "            col = np.s_[1:3]\n",
+    "\n",
+    "        ax = fig.add_subplot(grid[row, col])\n",
+    "        ax.set_title(f'TOF spectrum: {edge_name}')\n",
+    "        \n",
     "        num_edges = np.isfinite(edges[:, edge_idx, :]).sum(axis=1)\n",
     "        num_edges = num_edges[:((len(num_edges) // agg_window) * agg_window)]\n",
     "        num_edges = num_edges.reshape(-1, agg_window).mean(axis=1)\n",
+    "        \n",
+    "        if (num_edges == 0).all():\n",
+    "            warning(f'No edges found for {det_name}/{edge_name}')\n",
+    "            continue\n",
     "\n",
     "        if edge_idx < 6:\n",
     "            plot_kwargs = dict(c=f'C{edge_idx}', ls='solid', lw=1.0)\n",
@@ -596,19 +925,9 @@
     "        numx.plot(np.arange(len(num_edges)) * agg_window, num_edges, label=edge_name, **plot_kwargs)\n",
     "        max_num_edges = max(max_num_edges, num_edges.max())\n",
     "\n",
-    "        cur_edges = edges[:, edge_idx, :].flatten()\n",
-    "\n",
-    "        if edge_idx < 6:\n",
-    "            row = 1 + edge_idx % 2\n",
-    "            col = edge_idx // 2\n",
-    "        else:\n",
-    "            row = 0\n",
-    "            col = np.s_[1:3]\n",
-    "\n",
-    "        ax = fig.add_subplot(grid[row, col])\n",
-    "        ax.set_title(f'TOF spectrum: {edge_name}')\n",
-    "        y, _, _ = ax.hist(cur_edges[np.isfinite(cur_edges)], bins=int((max_edge - min_edge) // 5),\n",
-    "                          range=(min_edge, max_edge), color=plot_kwargs['c'], histtype='step', linewidth=1)\n",
+    "        y, _, _ = ax.hist(finite_flattened_slice(edges, np.s_[:, edge_idx, :]),\n",
+    "                          bins=int((max_edge - min_edge) // 5), range=(min_edge, max_edge),\n",
+    "                          color=plot_kwargs['c'], histtype='step', linewidth=1)\n",
     "        hist_axs.append(ax)\n",
     "\n",
     "        max_spectral_intensity = max(max_spectral_intensity, y.max())\n",
@@ -622,6 +941,63 @@
     "pass"
    ]
   },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Spectral pulse height distributions"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "for det_name in remi['detector'].keys():\n",
+    "    fig = plt.figure(num=f'spectral_pulse_heights_{det_name}', figsize=(9.5, 12.0))\n",
+    "    grid = fig.add_gridspec(ncols=2, nrows=4, left=0.08, right=0.98, top=0.95, hspace=0.3)\n",
+    "    fig.text(0.02, 0.98, det_name.upper(), rotation=90, ha='left', va='top', size='x-large')\n",
+    "    \n",
+    "    edges = det_data[det_name]['edges']\n",
+    "    amplitudes = det_data[det_name]['amplitudes']\n",
+    "    \n",
+    "    min_edge = np.nanmin(edges)\n",
+    "    max_edge = np.nanmax(edges)\n",
+    "    \n",
+    "    max_amplitude = np.nanmax(amplitudes)\n",
+    "\n",
+    "    for edge_idx, edge_name in enumerate(['u1', 'u2', 'v1', 'v2', 'w1', 'w2', 'mcp']):\n",
+    "        if edge_idx < 6:\n",
+    "            row = 1 + edge_idx // 2\n",
+    "            col = edge_idx % 2\n",
+    "            tof_bins = int((max_edge - min_edge) // 20)\n",
+    "        else:\n",
+    "            row = 0\n",
+    "            col = np.s_[:]\n",
+    "            tof_bins = int((max_edge - min_edge) // 10)\n",
+    "\n",
+    "        ax = fig.add_subplot(grid[row, col])\n",
+    "        ax.set_title(f'Spectral pulse amplitudes: {edge_name}')\n",
+    "\n",
+    "        flat_edges = finite_flattened_slice(edges, np.s_[:, edge_idx, :])\n",
+    "        flat_amplitudes = finite_flattened_slice(amplitudes, np.s_[:, edge_idx, :])\n",
+    "        ax.hist2d(flat_edges, flat_amplitudes,\n",
+    "                  bins=[tof_bins, 512], norm=LogNorm(),\n",
+    "                  range=[[min_edge, max_edge], [0, max_amplitude]])\n",
+    "        \n",
+    "        if edge_idx == 6:\n",
+    "            ax.set_ylabel('Pulse height')\n",
+    "    pass"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "A more detailed view into the distribution of pulse heights as a function of TOF, e.g. to indicate whether the spectrometer transmission may depend on the kinetic energy and/or (in the case of ions) mass."
+   ]
+  },
   {
    "cell_type": "markdown",
    "metadata": {},
@@ -637,8 +1013,8 @@
    },
    "outputs": [],
    "source": [
-    "for i, det_name in enumerate(remi['detector'].keys()):\n",
-    "    edges = edges_by_det[det_name]\n",
+    "for det_name in remi['detector'].keys():\n",
+    "    edges = det_data[det_name]['edges']\n",
     "    \n",
     "    sort = remi.get_dld_sorter(det_name)\n",
     "    \n",
@@ -647,9 +1023,14 @@
     "    is_valid = remi.get_presort_mask(edges, edge_idx=0, w=not quad_anode,\n",
     "                                     sum_limit=max(sort.uncorrected_time_sum_half_widths),\n",
     "                                     sum_shifts=sum_shifts)\n",
+    "    \n",
+    "    if not is_valid.any():\n",
+    "        warning(f'No valid preliminary edge combinations found for {det_name}')\n",
+    "    \n",
     "    signals, sums = remi.get_signals_and_sums(edges, indices=sort.channel_indices, sum_shifts=sum_shifts,\n",
     "                                              mask=is_valid)\n",
-    "    fig = plot_detector_diagnostics(signals=signals, sums=sums, fig_num=30+i, im_scale=1.5,\n",
+    "    fig = plot_detector_diagnostics(signals=signals, sums=sums,\n",
+    "                                    fig_num=f'diagnostics_{det_name}', im_scale=1.5,\n",
     "                                    sum_range=max(sort.uncorrected_time_sum_half_widths),\n",
     "                                    sorter=sort)\n",
     "    fig.text(0.02, 0.98, det_name.upper() + ' before corrections', rotation=90, ha='left', va='top', size='x-large')\n",
@@ -660,13 +1041,27 @@
     "        sums = np.full((n_masked, 3), np.nan, dtype=np.float64)\n",
     "\n",
     "        sort.correct(edges[is_valid], signals, sums)\n",
-    "        fig = plot_detector_diagnostics(signals=signals, sums=sums, fig_num=40+i, im_scale=1.5,\n",
+    "        fig = plot_detector_diagnostics(signals=signals, sums=sums,\n",
+    "                                        fig_num=f'corr_diagnostics_{det_name}', im_scale=1.5,\n",
     "                                        sum_range=max(sort.uncorrected_time_sum_half_widths),\n",
     "                                        sorter=sort)\n",
     "        fig.text(0.02, 0.98, det_name.upper() + ' after corrections', rotation=90, ha='left', va='top', size='x-large')\n",
     "pass"
    ]
   },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Overview of initial detector signal correlations before actual hit reconstruction takes place. Only the firsts edge on each channel occuring for each trigger is included, if their times are compatible with a rough time sum window.\n",
+    "\n",
+    "* The top row contains the spectrum of time differences on each wire in temporal coordinates on the left and spatial coordinates on the right (according to configured scale factors).\n",
+    "* The middle row depicts time sums, first integrated and then as a function of time difference. The time sum should generally be somewhat constant, a spectrum-like appearance indicates wire ends have been swapped entirely.\n",
+    "* [HEX-only] The bottom row shows the detector image for each combination of wires based on this limited dataset. There should be no deformations or rotations in any of the wire pairs, else likely channels are misassigned.\n",
+    "\n",
+    "The plot occurs twice if signal-level corrections for time sum or position are enabled."
+   ]
+  },
   {
    "cell_type": "markdown",
    "metadata": {},
@@ -682,12 +1077,8 @@
    "source": [
     "psh.set_default_context('processes', num_workers=remi.get_num_workers(mp_rec_hits))\n",
     "\n",
-    "signals_by_det = {}\n",
-    "hits_by_det = {}\n",
-    "hit_counts_by_det = {}\n",
-    "\n",
     "for det_name, det in remi['detector'].items():\n",
-    "    edges = edges_by_det[det_name]\n",
+    "    edges = det_data[det_name]['edges']\n",
     "    \n",
     "    signals = psh.alloc(shape=(num_pulses, 50), dtype=signal_dt, fill=np.nan)\n",
     "    hits = psh.alloc(shape=(num_pulses, 50), dtype=hit_dt, fill=(np.nan, np.nan, np.nan, -1))\n",
@@ -705,9 +1096,7 @@
     "    with timing(f'rec_hits, {det_name}'):\n",
     "        psh.map(reconstruct_hits, dc.train_ids)\n",
     "        \n",
-    "    signals_by_det[det_name] = signals\n",
-    "    hits_by_det[det_name] = hits\n",
-    "    hit_counts_by_det[det_name] = hit_counts"
+    "    det_data[det_name].update(signals=signals, hits=hits, hit_counts=hit_counts)"
    ]
   },
   {
@@ -716,15 +1105,15 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "fig, ax = plt.subplots(num=50+i, figsize=(9.5, 4), ncols=1, clear=True,\n",
+    "fig, ax = plt.subplots(num='hit_count_per_trigger', figsize=(9.5, 4), ncols=1, clear=True,\n",
     "                       gridspec_kw=dict(top=0.92, right=0.98, left=0.05, bottom=0.12))\n",
     "    \n",
     "max_num_hits = 0.0\n",
     "    \n",
     "for det_name in remi['detector'].keys():\n",
-    "    agg_window = num_pulses // 1000\n",
+    "    agg_window = num_pulses // min(1000, num_pulses)\n",
     "    \n",
-    "    num_hits = np.isfinite(hits_by_det[det_name]['x']).sum(axis=1)\n",
+    "    num_hits = np.isfinite(det_data[det_name]['hits']['x']).sum(axis=1)\n",
     "    num_hits = num_hits[:(len(num_hits) // agg_window) * agg_window]\n",
     "    num_hits = num_hits.reshape(-1, agg_window).mean(axis=1)\n",
     "    max_num_hits = max(max_num_hits, num_hits.max())\n",
@@ -748,7 +1137,7 @@
     "\n",
     "* `0`: All six anode signals and the corresponding MCP signal were found.\n",
     "* `4`: One signal on layer `u` is missing, all other signals for this event were found.\n",
-    "* `18`: Only one anode signal on each layer was found and the MCP signal is missing. There is no way to check whether this combination of signals is actually valid.\n",
+    "* `18`: Only one anode signal on each layer was found and the MCP signal is missing. There is no way to check whether this combination of signals is actually valid based on the detector data alone.\n",
     "\n",
     "| Method | `u+v+w +mcp` |\n",
     "| - | - |\n",
@@ -785,13 +1174,17 @@
    },
    "outputs": [],
    "source": [
-    "for i, det_name in enumerate(remi['detector'].keys()):\n",
-    "    hits = hits_by_det[det_name]\n",
+    "for det_name in remi['detector'].keys():\n",
+    "    hits = det_data[det_name]['hits']\n",
     "    \n",
-    "    fig, ax = plt.subplots(num=60+i, figsize=(9.5, 5), ncols=1, clear=True,\n",
+    "    fig, ax = plt.subplots(num=f'reconstruction_methods_{det_name}', figsize=(9.5, 5), ncols=1, clear=True,\n",
     "                           gridspec_kw=dict(left=0.08, right=0.91, top=0.8))\n",
     "    \n",
     "    fig.text(0.02, 0.98, det_name.upper(), rotation=90, ha='left', va='top', size='x-large')\n",
+    "    \n",
+    "    if not (hits['m'] >= 0).any():\n",
+    "        warning(f'No hits found for {det_name}')\n",
+    "        continue\n",
     "\n",
     "    method_bins = np.bincount(hits['m'][hits['m'] >= 0], minlength=20)\n",
     "    ax.bar(np.arange(20), method_bins, width=0.5)\n",
@@ -860,36 +1253,53 @@
    },
    "outputs": [],
    "source": [
-    "for i, det_name in enumerate(remi['detector'].keys()):\n",
-    "    flat_hits = hits_by_det[det_name].reshape(-1)\n",
+    "for det_name in remi['detector'].keys():\n",
+    "    flat_hits = det_data[det_name]['hits'].reshape(-1)\n",
     "    flat_hits = flat_hits[np.isfinite(flat_hits[:]['x'])]\n",
-    "    flat_hits = flat_hits[flat_hits['m'] < 10]\n",
+    "    flat_hits = flat_hits[flat_hits['m'] <= 10]\n",
     "\n",
-    "    fig = plt.figure(num=70+i, figsize=(9, 13.5))\n",
+    "    fig = plt.figure(num=f'detector_results_{det_name}', figsize=(9, 10.5))\n",
     "    \n",
     "    fig.text(0.02, 0.98, det_name.upper(), rotation=90, ha='left', va='top', size='x-large')\n",
     "    fig.text(0.02, 0.02, det_name.upper(), rotation=90, ha='left', va='bottom', size='x-large')\n",
     "    \n",
-    "    imp = fig.add_axes([0.1 + 0.25/2, 0.56, 0.6, 0.4])\n",
-    "    txp = fig.add_axes([0.1, 0.28, 0.85, 0.22])\n",
-    "    typ = fig.add_axes([0.1, 0.04, 0.85, 0.22])\n",
+    "    imp = fig.add_axes([0.1 + 0.25/2, 0.56, 0.5, 0.45])\n",
+    "    txp = fig.add_axes([0.1, 0.27, 0.85, 0.23])\n",
+    "    typ = fig.add_axes([0.1, 0.02, 0.85, 0.23])\n",
     "    \n",
-    "    im_radius = remi['detector'][det_name]['mcp_radius']*1.1\n",
+    "    if flat_hits.size == 0:\n",
+    "        warning(f'No hits found for {det_name}')\n",
+    "        continue\n",
     "    \n",
-    "    min_tof = flat_hits['t'].min()\n",
-    "    max_tof = flat_hits['t'].max()\n",
+    "    mcp_radius = remi['detector'][det_name]['mcp_radius']\n",
+    "    im_radius = mcp_radius * 1.1\n",
     "    \n",
     "    imp.hist2d(flat_hits['x'], flat_hits['y'], bins=(256, 256),\n",
     "               range=[[-im_radius, im_radius], [-im_radius, im_radius]], norm=LogNorm())\n",
+    "    imp.add_patch(Circle(\n",
+    "        (0, 0), mcp_radius,\n",
+    "        linestyle='dashed', edgecolor='red', facecolor='none', linewidth=1))\n",
     "    imp.xaxis.set_label_position('top')\n",
     "    imp.set_xlabel('X / mm')\n",
     "    imp.set_ylabel('Y / mm')\n",
     "    imp.tick_params(right=True, labelright=True, top=True, labeltop=True)\n",
     "    imp.grid()\n",
+    "    \n",
+    "    text_pos = 1.05*mcp_radius*np.sin(np.pi/4)\n",
+    "    imp.text(text_pos, text_pos, 'MCP', c='red', ha='left', va='bottom')\n",
+    "    \n",
+    "    min_tof = flat_hits['t'].min()\n",
+    "    max_tof = flat_hits['t'].max()\n",
+    "    \n",
+    "    num_tof_bins = min(int((max_tof - min_tof) // 10), 500)\n",
+    "    \n",
+    "    if num_tof_bins == 0:\n",
+    "        warning(f'All TOFs limited to single bin for {det_name}')\n",
+    "        continue\n",
     "\n",
     "    for ax, dim_label in zip([txp, typ], ['x', 'y']):\n",
-    "        ax.hist2d(flat_hits['t'], flat_hits[dim_label], bins=(int((max_tof - min_tof) // 5), 256),\n",
-    "                   range=[[min_tof, max_tof], [-im_radius, im_radius]], norm=LogNorm())\n",
+    "        ax.hist2d(flat_hits['t'], flat_hits[dim_label], bins=(num_tof_bins, 256),\n",
+    "                  range=[[min_tof, max_tof], [-im_radius, im_radius]], norm=LogNorm())\n",
     "        ax.set_ylabel(f'{dim_label.upper()} / mm')\n",
     "        \n",
     "    typ.set_xlabel('Time-of-flight / ns')\n",
@@ -911,9 +1321,29 @@
    "metadata": {},
    "outputs": [],
    "source": [
+    "# Try to figure out proposal number from in_folder to work with older files.\n",
+    "m = re.match(r'p(\\d{6})', Path(in_folder).parts[-2])\n",
+    "if not proposal and m is not None:\n",
+    "    proposal = int(m[1])\n",
+    "\n",
     "seq_len = out_seq_len if out_seq_len > 0 else len(dc.files[0].train_ids)\n",
     "dataset_kwargs = {k[8:]: v for k, v in locals().items() if k.startswith('dataset_compression')}\n",
     "\n",
+    "control_sources = [det_device_id.format(karabo_id=karabo_id, det_name=det_name.upper())\n",
+    "                   for det_name in remi['detector']]\n",
+    "instrument_channels = [\n",
+    "    f'{device_id}:{det_output_key}/{channel}'\n",
+    "    for device_id in control_sources\n",
+    "    for channel in ['raw', 'rec']\n",
+    "]"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
     "Path(out_folder).mkdir(parents=True, exist_ok=True)\n",
     "print('Writing sequence files', flush=True, end='')\n",
     "\n",
@@ -924,10 +1354,16 @@
     "    seq_train_ids = dc.train_ids[train_mask]\n",
     "\n",
     "    with DataFile.from_details(out_folder, out_aggregator, run, seq_id) as outp:\n",
-    "        outp.create_index(seq_train_ids)\n",
+    "        outp.create_metadata(like=dc, proposal=proposal, run=run, sequence=seq_id,\n",
+    "                             control_sources=control_sources, instrument_channels=instrument_channels)\n",
+    "        outp.create_index(\n",
+    "            seq_train_ids, \n",
+    "            timestamps=dc.select_trains(by_id[seq_train_ids]).train_timestamps().astype(np.uint64)\n",
+    "        )\n",
     "        \n",
     "        for det_name in remi['detector']:\n",
     "            cur_device_id = det_device_id.format(karabo_id=karabo_id, det_name=det_name.upper())\n",
+    "            cur_max_hits = remi['detector'][det_name]['max_hits']\n",
     "                \n",
     "            cur_control_data = outp.create_control_source(cur_device_id)\n",
     "            # Manually manipulate the file here, still creates the index properly.\n",
@@ -936,25 +1372,37 @@
     "            \n",
     "            cur_fast_data = outp.create_instrument_source(f'{cur_device_id}:{det_output_key}')\n",
     "            \n",
-    "            if save_raw_triggers:\n",
-    "                cur_fast_data.create_key('raw.triggers', triggers[pulse_mask],\n",
-    "                                         chunks=tuple(chunks_triggers), **dataset_kwargs)\n",
+    "            cur_data = det_data[det_name]\n",
+    "            \n",
+    "            cur_fast_data.create_key('raw.triggers', triggers[pulse_mask],\n",
+    "                                     maxshape=(None,) + triggers.shape[1:],\n",
+    "                                     chunks=tuple(chunks_triggers), **dataset_kwargs)\n",
     "                \n",
-    "            if save_raw_edges:\n",
-    "                cur_fast_data.create_key('raw.edges', edges_by_det[det_name][pulse_mask],\n",
-    "                                         chunks=tuple(chunks_edges), **dataset_kwargs)\n",
+    "            cur_fast_data.create_key('raw.edges', cur_data['edges'][pulse_mask],\n",
+    "                                     maxshape=(None,) + cur_data['edges'].shape[1:],\n",
+    "                                     chunks=tuple(chunks_edges if chunks_edges[-1] <= cur_max_hits\n",
+    "                                                 else chunks_edges[:-1] + [cur_max_hits]),\n",
+    "                                     **dataset_kwargs)\n",
     "                \n",
-    "            if save_rec_signals:\n",
-    "                cur_fast_data.create_key('rec.signals', signals_by_det[det_name][pulse_mask],\n",
-    "                                         chunks=tuple(chunks_signals), **dataset_kwargs)\n",
+    "            cur_fast_data.create_key('raw.amplitudes', cur_data['amplitudes'][pulse_mask],\n",
+    "                                     maxshape=(None,) + cur_data['amplitudes'].shape[1:],\n",
+    "                                     chunks=tuple(chunks_amplitudes if chunks_amplitudes[-1] <= cur_max_hits\n",
+    "                                                 else chunks_amplitudes[:-1] + [cur_max_hits]),\n",
+    "                                     **dataset_kwargs)\n",
     "                \n",
-    "            if save_rec_hits:\n",
-    "                cur_fast_data.create_key('rec.hits', hits_by_det[det_name][pulse_mask],\n",
-    "                                         chunks=tuple(chunks_hits), **dataset_kwargs)\n",
+    "            cur_fast_data.create_key('rec.signals', cur_data['signals'][pulse_mask],\n",
+    "                                     maxshape=(None,) + cur_data['signals'].shape[1:],\n",
+    "                                     chunks=tuple(chunks_signals if chunks_signals[-1] <= cur_max_hits\n",
+    "                                                  else chunks_signals[:-1] + [cur_max_hits]),\n",
+    "                                     **dataset_kwargs)\n",
     "                \n",
-    "            cur_fast_data.create_index(raw=pulse_counts[train_mask], rec=pulse_counts[train_mask])\n",
+    "            cur_fast_data.create_key('rec.hits', cur_data['hits'][pulse_mask],\n",
+    "                                     maxshape=(None,) + hits.shape[1:],\n",
+    "                                     chunks=tuple(chunks_hits if chunks_hits[-1] <= cur_max_hits\n",
+    "                                                  else chunks_hits[:-1] + [cur_max_hits]),\n",
+    "                                     **dataset_kwargs)\n",
     "                \n",
-    "        outp.create_metadata(like=dc)\n",
+    "            cur_fast_data.create_index(raw=pulse_counts[train_mask], rec=pulse_counts[train_mask])\n",
     "        \n",
     "    print('.', flush=True, end='')\n",
     "    \n",
diff --git a/notebooks/Timepix/Compute_Timepix_Event_Centroids.ipynb b/notebooks/Timepix/Compute_Timepix_Event_Centroids.ipynb
new file mode 100755
index 0000000000000000000000000000000000000000..08c7fdb5d85d1f6c4c5099c8637e78adf426152a
--- /dev/null
+++ b/notebooks/Timepix/Compute_Timepix_Event_Centroids.ipynb
@@ -0,0 +1,453 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "id": "0393ed18-b4e5-499b-bdd3-c9e5f24b9627",
+   "metadata": {},
+   "source": [
+    "# Timepix3\n",
+    "\n",
+    "Author: Björn Senfftleben / Philipp Schmidt, Version: 1.0\n",
+    "\n",
+    "The following notebook provides centroiding for data acquired with the Timepix3 camera detector (ASI TPX3CAM)."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "9484ee10",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Data selection parameters.\n",
+    "run = 420  # required\n",
+    "in_folder = '/gpfs/exfel/exp/SQS/202230/p900256/raw'  # required\n",
+    "out_folder = '/gpfs/exfel/exp/SQS/202230/p900256/scratch/cal_test'  # required\n",
+    "proposal = ''  # Proposal, leave empty for auto detection based on in_folder\n",
+    "\n",
+    "# These parameters are required by xfel-calibrate but ignored in this notebook.\n",
+    "cal_db_timeout = 0  # Calibration DB timeout, currently not used.\n",
+    "cal_db_interface = 'foo'  # Calibration DB interface, currently not used.\n",
+    "karabo_da = 'bar'  # Karabo data aggregator name, currently not used\n",
+    "\n",
+    "karabo_id = 'SQS_AQS_CAM'\n",
+    "in_fast_data = '{karabo_id}/CAM/TIMEPIX3:daqEventOutput'\n",
+    "out_device_id = '{karabo_id}/CAL/TIMEPIX3'\n",
+    "out_fast_data = '{karabo_id}/CAL/TIMEPIX3:output'\n",
+    "out_aggregator = 'TPX01'\n",
+    "out_seq_len = 2000\n",
+    "\n",
+    "max_num_centroids = 10000  # Maximum number of centroids per train\n",
+    "chunks_centroids = [1, 5000]  # Chunking of centroid data\n",
+    "dataset_compression = 'gzip'  # HDF compression method.\n",
+    "dataset_compression_opts = 3  # HDF GZIP compression level.\n",
+    "\n",
+    "clustering_epsilon = 2.0  # centroiding: The maximum distance between two samples for one to be considered as in the neighborhood of the other\n",
+    "clustering_tof_scale = 1e7  # centroiding: Scaling factor for the ToA axis so that the epsilon parameter in DB scan works in all 3 dimensions\n",
+    "clustering_min_samples = 2  # centroiding: minimum number of samples necessary for a cluster\n",
+    "clustering_n_jobs = 1  # centroiding: (DBSCAN) The number of parallel jobs to run.\n",
+    "threshold_tot = 0 # raw data: minimum ToT necessary for a pixel to contain valid data\n",
+    "\n",
+    "raw_timewalk_lut_filepath = ''  # fpath to look up table for timewalk correction relative to proposal path or empty string,\n",
+    "centroiding_timewalk_lut_filepath = ''  # fpath to look up table for timewalk correction relative to proposal path or empty string."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "524fe654-e112-4abe-813c-a0be9b3a3034",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from datetime import datetime\n",
+    "from pathlib import Path\n",
+    "from time import monotonic\n",
+    "from warnings import warn\n",
+    "\n",
+    "import numpy as np\n",
+    "import scipy.ndimage as nd\n",
+    "import h5py\n",
+    "\n",
+    "from sklearn.cluster import DBSCAN\n",
+    "from extra_data import RunDirectory\n",
+    "from extra_data.read_machinery import find_proposal\n",
+    "\n",
+    "from cal_tools.files import DataFile, sequence_pulses\n",
+    "\n",
+    "%matplotlib inline"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "e36f997c-4b66-4b11-99a8-5887e3572f56",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# centroiding\n",
+    "error_msgs = {\n",
+    "    -1: \"tpx_data has an invalid structure - ignore provided data\",\n",
+    "    -2: \"tpx_data arrays are of invalid lengths - ignore provided data\",\n",
+    "    -3: \"tpx_data arrays are empty\"\n",
+    "}\n",
+    "\n",
+    "\n",
+    "def check_data(tpx_data):\n",
+    "    required_keys = [\"x\", \"y\", \"toa\", \"tot\"]\n",
+    "    for key in required_keys:\n",
+    "        if key not in tpx_data.keys():\n",
+    "            warn(\"tpx data must contain the keys %s, but key %s not in tpx data keys (%s)\" % (required_keys, key, list(tpx_data.keys())),\n",
+    "                 category=UserWarning)\n",
+    "            return -1\n",
+    "\n",
+    "    reference_n_samples_key = \"x\"\n",
+    "    n_samples = len(tpx_data[reference_n_samples_key])\n",
+    "    for key in tpx_data.keys():\n",
+    "        if n_samples != len(tpx_data[key]):\n",
+    "            warn(\"arrays in tpx data must be of same length ( len(tpx_data[%s])=%i!=%i=(len(tpx_data[%s]) )\" % (reference_n_samples_key, n_samples, len(tpx_data[key]), key),\n",
+    "                 category=UserWarning)\n",
+    "            return -2\n",
+    "    if n_samples == 0:\n",
+    "        warn(\"no samples were provides with tpx data\", category=UserWarning)\n",
+    "        return -3\n",
+    "    return 0\n",
+    "\n",
+    "\n",
+    "def apply_single_filter(tpx_data, _filter):\n",
+    "    \"\"\"\n",
+    "    Simple function to apply a selecting or sorting filter to a dictionary of equally sized arrays\n",
+    "    Note: at no point a copy of the dictionary is made, as they are mutable, the input array is changed in memory!\n",
+    "\n",
+    "    Parameters\n",
+    "    ----------\n",
+    "    tpx_data: dictionary with timepix data, all arrays behind each key must be of same length\n",
+    "    _filter:  1d array or list of integers or booleans or np.s_ to select or sort data like a = a[_filter]\n",
+    "\n",
+    "    Returns\n",
+    "    -------\n",
+    "    tpx_data: like input tpx_data but with applied filter\n",
+    "\n",
+    "    \"\"\"\n",
+    "    try:\n",
+    "        for key in tpx_data.keys():\n",
+    "            tpx_data[key] = np.array(tpx_data[key])[_filter]\n",
+    "    except Exception as e:\n",
+    "        print(_filter)\n",
+    "        print(_filter.dtype)\n",
+    "        print(_filter.shape)\n",
+    "        print(tpx_data[key].shape)\n",
+    "        raise e\n",
+    "    return tpx_data\n",
+    "\n",
+    "\n",
+    "def pre_clustering_filter(tpx_data, tot_threshold=0):\n",
+    "    \"\"\"\n",
+    "    Collection of filters directly applied before clustering.\n",
+    "    Note: at no point a copy of the dictionary is made, as they are mutable, the input array is changed in memory!\n",
+    "\n",
+    "    Parameters\n",
+    "    ----------\n",
+    "    tpx_data:      Dictionary with timepix data, all arrays behind each key must be of same length\n",
+    "    tot_threshold: minimum ToT required for a pixel to contain valid data\n",
+    "\n",
+    "    Returns\n",
+    "    -------\n",
+    "    tpx_data: like input tpx_data but with applied filters\n",
+    "    \"\"\"\n",
+    "    if tot_threshold > 0:\n",
+    "        tpx_data = apply_single_filter(tpx_data, tpx_data[\"tot\"] >= tot_threshold)\n",
+    "\n",
+    "    return tpx_data\n",
+    "\n",
+    "\n",
+    "def post_clustering_filter(tpx_data):\n",
+    "    \"\"\"\n",
+    "    Collection of filters directly applied after clustering.\n",
+    "    Note: at no point a copy of the dictionary is made, as they are mutable, the input array is changed in memory!\n",
+    "\n",
+    "    Parameters\n",
+    "    ----------\n",
+    "    tpx_data:    Dictionary with timepix data, all arrays behind each key must be of same length, now with key labels\n",
+    "\n",
+    "    Returns\n",
+    "    -------\n",
+    "    tpx_data: like input tpx_data but with applied filters\n",
+    "    \"\"\"\n",
+    "    if tpx_data[\"labels\"] is not None:\n",
+    "        tpx_data = apply_single_filter(tpx_data, tpx_data[\"labels\"] != 0)\n",
+    "\n",
+    "    return tpx_data\n",
+    "\n",
+    "\n",
+    "def clustering(tpx_data, epsilon=2, tof_scale=1e7, min_samples=3, n_jobs=1):\n",
+    "    \"\"\"\n",
+    "\n",
+    "    Parameters\n",
+    "    ----------\n",
+    "    tpx_data       Dictionary with timepix data, all arrays behind each key must be of same length, now with key labels\n",
+    "    epsilon        The maximum distance between two samples for one to be considered as in the neighborhood of the other. \n",
+    "                   This is not a maximum bound on the distances of points within a cluster. This is the most important \n",
+    "                   DBSCAN parameter to choose appropriately for your data set and distance function.\n",
+    "    tof_scale      Scaling factor for the ToA data so that the epsilon parameter in DB scan works not only in the x/y \n",
+    "                   axes, but also in the ToA axis. So it converts ToA in s into \"ToA pixels\" -> e.g. tof_scale=1e7 means,\n",
+    "                   that 100 ns is considered comparable to 1 spatial pixel. \n",
+    "    min_samples    The number of samples (or total weight) in a neighborhood for a point to be considered as a core point. \n",
+    "                   This includes the point itself.\n",
+    "    n_jobs         The number of parallel jobs to run. None means 1 unless in a joblib.parallel_backend context. \n",
+    "                   -1 means using all processors. See Glossary for more details.\n",
+    "\n",
+    "    Returns\n",
+    "    -------\n",
+    "\n",
+    "    \"\"\"\n",
+    "    coords = np.column_stack((tpx_data[\"x\"], tpx_data[\"y\"], tpx_data[\"toa\"]*tof_scale))\n",
+    "    dist = DBSCAN(eps=epsilon, min_samples=min_samples, metric=\"euclidean\", n_jobs=n_jobs).fit(coords)\n",
+    "    return dist.labels_ + 1\n",
+    "\n",
+    "def empty_centroid_data():\n",
+    "    return {\n",
+    "        \"x\": np.array([]),\n",
+    "        \"y\": np.array([]),\n",
+    "        \"toa\": np.array([]),\n",
+    "        \"tot\": np.array([]),\n",
+    "        \"tot_avg\": np.array([]),\n",
+    "        \"tot_max\": np.array([]),\n",
+    "        \"size\": np.array([]),\n",
+    "    }\n",
+    "\n",
+    "def get_centroids(tpx_data, timewalk_lut=None):\n",
+    "    centroid_data = empty_centroid_data()\n",
+    "    cluster_labels, cluster_size = np.unique(tpx_data[\"labels\"], return_counts=True)\n",
+    "\n",
+    "    cluster_tot_peaks = np.array(nd.maximum_position(tpx_data[\"tot\"], labels=tpx_data[\"labels\"], index=cluster_labels)).ravel()\n",
+    "    cluster_tot_integrals = nd.sum(tpx_data[\"tot\"], labels=tpx_data[\"labels\"], index=cluster_labels)\n",
+    "\n",
+    "    # compute centroid center through weighted average\n",
+    "    centroid_data[\"x\"] = np.array(nd.sum(tpx_data[\"x\"] * tpx_data[\"tot\"], labels=tpx_data[\"labels\"], index=cluster_labels) / cluster_tot_integrals).ravel()\n",
+    "    centroid_data[\"y\"] = np.array(nd.sum(tpx_data[\"y\"] * tpx_data[\"tot\"], labels=tpx_data[\"labels\"], index=cluster_labels) / cluster_tot_integrals).ravel()\n",
+    "    centroid_data[\"toa\"] = np.array(nd.sum(tpx_data[\"toa\"] * tpx_data[\"tot\"], labels=tpx_data[\"labels\"], index=cluster_labels) / cluster_tot_integrals).ravel()\n",
+    "\n",
+    "    # intensity & size information\n",
+    "    centroid_data[\"tot_avg\"] = np.array(nd.mean(tpx_data[\"tot\"], labels=tpx_data[\"labels\"], index=cluster_labels))\n",
+    "    centroid_data[\"tot_max\"] = tpx_data[\"tot\"][cluster_tot_peaks]\n",
+    "    centroid_data[\"tot\"] = np.array(cluster_tot_integrals)\n",
+    "    centroid_data[\"size\"] = cluster_size\n",
+    "\n",
+    "    # train ID information\n",
+    "    # ~ centroid_data[\"tid\"] = tpx_data[\"tid\"][cluster_tot_peaks]\n",
+    "\n",
+    "    # correct for timewalk if provided\n",
+    "    if timewalk_lut is not None:\n",
+    "        centroid_data[\"toa\"] -= timewalk_lut[np.int_(centroid_data[\"tot_max\"] // 25) - 1] * 1e3\n",
+    "    return centroid_data\n",
+    "\n",
+    "\n",
+    "def compute_centroids(x, y, tof, tot,\n",
+    "                      threshold_tot=0,\n",
+    "                      clustering_epsilon=2,\n",
+    "                      clustering_tof_scale=1e7,\n",
+    "                      clustering_min_samples=3,\n",
+    "                      clustering_n_jobs=1,\n",
+    "                      centroiding_timewalk_lut=None):\n",
+    "    # format input data\n",
+    "    _tpx_data = {\n",
+    "        \"x\": x,\n",
+    "        \"y\": y,\n",
+    "        \"toa\": tof,\n",
+    "        \"tot\": tot\n",
+    "    }\n",
+    "\n",
+    "    # ensure that valid data is available\n",
+    "    data_validation = check_data(_tpx_data)\n",
+    "    if data_validation < 0:\n",
+    "        if data_validation in error_msgs.keys():\n",
+    "            print(\"Data validation failed with message: %s\" % error_msgs[data_validation])\n",
+    "        else:\n",
+    "            print(\"Data validation failed: unknown reason\")\n",
+    "        return None\n",
+    "\n",
+    "    # clustering (identify clusters in 2d data (x,y,tof) that belong to a single hit,\n",
+    "    # each sample belonging to a cluster is labeled with an integer cluster id no)\n",
+    "    _tpx_data = pre_clustering_filter(_tpx_data, tot_threshold=threshold_tot)\n",
+    "    _tpx_data[\"labels\"] = clustering(_tpx_data, epsilon=clustering_epsilon, tof_scale=clustering_tof_scale, min_samples=clustering_min_samples, n_jobs=clustering_n_jobs)\n",
+    "    _tpx_data = post_clustering_filter(_tpx_data)\n",
+    "    # compute centroid data (reduce cluster of samples to a single point with properties)\n",
+    "    if _tpx_data[\"labels\"] is None or _tpx_data[\"labels\"].size == 0:\n",
+    "        # handle case of no identified clusters, return empty dictionary with expected keys\n",
+    "        return empty_centroid_data()\n",
+    "    _centroids = get_centroids(_tpx_data, timewalk_lut=centroiding_timewalk_lut)\n",
+    "    return _centroids"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "56306c15-513e-4e7f-9c47-c52ca61b27a8",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "dc = RunDirectory(Path(in_folder) / f'r{run:04d}', inc_suspect_trains=True)\n",
+    "\n",
+    "base_path=find_proposal(\"p%06i\" % int(dc.run_metadata()['proposalNumber']))\n",
+    "\n",
+    "if raw_timewalk_lut_filepath:\n",
+    "    raw_timewalk_lut_filepath_full = (Path(base_path) / Path(raw_timewalk_lut_filepath)).resolve()\n",
+    "    raw_timewalk_lut = np.load(raw_timewalk_lut_filepath_full)\n",
+    "else:\n",
+    "    raw_timewalk_lut = None\n",
+    "\n",
+    "if centroiding_timewalk_lut_filepath:\n",
+    "    centroiding_timewalk_lut_filepath_full = (Path(base_path) / Path(centroiding_timewalk_lut_filepath)).resolve()\n",
+    "    centroiding_timewalk_lut = np.load(centroiding_timewalk_lut_filepath_full)\n",
+    "else:\n",
+    "    centroiding_timewalk_lut = None"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "8c28a4c8-961c-496b-80da-7fd867e5b0d3",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "in_fast_data = in_fast_data.format(karabo_id=karabo_id)\n",
+    "out_device_id = out_device_id.format(karabo_id=karabo_id)\n",
+    "out_fast_data = out_fast_data.format(karabo_id=karabo_id)\n",
+    "\n",
+    "Path(out_folder).mkdir(exist_ok=True, parents=True)\n",
+    "\n",
+    "in_dc = dc.select(in_fast_data, require_all=True)\n",
+    "\n",
+    "dataset_kwargs = {k[8:]: v for k, v in locals().items() if k.startswith('dataset_compression')}\n",
+    "\n",
+    "centroid_dt = np.dtype([('x', np.float64),\n",
+    "                        ('y', np.float64),\n",
+    "                        ('toa', np.float64),\n",
+    "                        ('tot', np.float64),\n",
+    "                        ('tot_avg', np.float64),\n",
+    "                        ('tot_max', np.uint16),\n",
+    "                        ('size', np.int16)])\n",
+    "\n",
+    "\n",
+    "centroid_settings_template = {\n",
+    "    'timewalk_correction.raw_applied': (np.bool, bool(raw_timewalk_lut_filepath)),\n",
+    "    'timewalk_correction.raw_file': (\"S100\", str(raw_timewalk_lut_filepath)[-100:]),\n",
+    "    'timewalk_correction.centroiding_applied': (np.bool, bool(centroiding_timewalk_lut_filepath)),\n",
+    "    'timewalk_correction.centroiding_file': (\"S100\", str(centroiding_timewalk_lut_filepath)[-100:]),\n",
+    "    'clustering.epsilon': (np.float64, float(clustering_epsilon)),\n",
+    "    'clustering.tof_scale': (np.float64, float(clustering_tof_scale)),\n",
+    "    'clustering.min_samples': (np.int16, int(clustering_min_samples)),\n",
+    "    'threshold_tot': (np.int16, int(threshold_tot)),\n",
+    "}\n",
+    "\n",
+    "centroid_stats_template = {\n",
+    "    'N_centroids': (np.int, -1),\n",
+    "    'missing_centroids': (np.bool, False),\n",
+    "    'fraction_px_in_centroids': (np.float64, np.nan),\n",
+    "}\n",
+    "\n",
+    "centroid_settings_dt = np.dtype([(key, centroid_settings_template[key][0]) for key in centroid_settings_template])\n",
+    "centroid_stats_dt = np.dtype([(key, centroid_stats_template[key][0]) for key in centroid_stats_template])\n",
+    "\n",
+    "centroiding_kwargs = dict(\n",
+    "    threshold_tot=threshold_tot,\n",
+    "    clustering_epsilon=clustering_epsilon,\n",
+    "    clustering_tof_scale=clustering_tof_scale,\n",
+    "    clustering_min_samples=clustering_min_samples,\n",
+    "    clustering_n_jobs=clustering_n_jobs,\n",
+    "    centroiding_timewalk_lut=centroiding_timewalk_lut)\n",
+    "\n",
+    "print('Computing centroids and writing to file', flush=True, end='')\n",
+    "start = monotonic()\n",
+    "\n",
+    "for seq_id, seq_dc in enumerate(in_dc.split_trains(trains_per_part=out_seq_len)):\n",
+    "    train_ids = seq_dc.train_ids\n",
+    "    m_data_sources = []\n",
+    "    \n",
+    "    with DataFile.from_details(out_folder, out_aggregator, run, seq_id) as seq_file:                                                                                                    \n",
+    "        # No support needed for old EXDF files.\n",
+    "        seq_file.create_metadata(like=in_dc, sequence=seq_id,\n",
+    "                                 control_sources=[out_device_id],\n",
+    "                                 instrument_channels=[f'{out_fast_data}/data'])\n",
+    "        seq_file.create_index(train_ids)\n",
+    "            \n",
+    "        out_data = np.empty((len(train_ids), max_num_centroids), dtype=centroid_dt)\n",
+    "        out_data[:] = (np.nan, np.nan, np.nan, np.nan, np.nan, 0, -1)\n",
+    "        out_stats = np.empty((len(train_ids),), dtype=centroid_stats_dt)\n",
+    "        out_stats[:] = tuple([centroid_stats_template[key][1] for key in centroid_stats_template])\n",
+    "        \n",
+    "        for index, (train_id, data) in enumerate(seq_dc.trains()):\n",
+    "            events = data[in_fast_data]\n",
+    "\n",
+    "            sel = np.s_[:events['data.size']]\n",
+    "\n",
+    "            x = events['data.x'][sel]\n",
+    "            y = events['data.y'][sel]\n",
+    "            tot = events['data.tot'][sel]\n",
+    "            toa = events['data.toa'][sel]\n",
+    "\n",
+    "            if raw_timewalk_lut is not None:\n",
+    "                toa -= raw_timewalk_lut[np.int_(tot // 25) - 1] * 1e3\n",
+    "\n",
+    "            centroids = compute_centroids(x, y, toa, tot, **centroiding_kwargs)\n",
+    "\n",
+    "            num_centroids = len(centroids['x'])\n",
+    "            fraction_centroids = np.sum(centroids[\"size\"])/events['data.size'] if events['data.size']>0 else np.nan\n",
+    "            missing_centroids = num_centroids > max_num_centroids\n",
+    "    \n",
+    "            if num_centroids > max_num_centroids:\n",
+    "                warn('number of centroids larger than definde maximum, some data cannot be written to disk')\n",
+    "            \n",
+    "            for key in centroid_dt.names:\n",
+    "                out_data[key][index, :num_centroids] = centroids[key]\n",
+    "            out_stats[\"fraction_px_in_centroids\"][index] = fraction_centroids\n",
+    "            out_stats[\"N_centroids\"][index] = num_centroids\n",
+    "            out_stats[\"missing_centroids\"][index] = missing_centroids\n",
+    "        \n",
+    "        # Create sources.\n",
+    "        cur_slow_data = seq_file.create_control_source(out_device_id)\n",
+    "        cur_fast_data = seq_file.create_instrument_source(out_fast_data)\n",
+    "\n",
+    "        # Add source indices.\n",
+    "        cur_slow_data.create_index(len(train_ids))\n",
+    "        cur_fast_data.create_index(data=np.ones_like(train_ids))\n",
+    "        \n",
+    "        for key, (type_, data) in centroid_settings_template.items():\n",
+    "            cur_slow_data.create_run_key(f'settings.{key}', data)\n",
+    "        \n",
+    "        cur_fast_data.create_key('data.centroids', out_data,\n",
+    "                                 chunks=tuple(chunks_centroids),\n",
+    "                                 **dataset_kwargs)\n",
+    "        cur_fast_data.create_key('data.stats', out_stats)\n",
+    "        \n",
+    "    print('.', flush=True, end='')\n",
+    "    \n",
+    "end = monotonic()\n",
+    "print('')\n",
+    "\n",
+    "print(f'{end-start:.01f}s')"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "pycal",
+   "language": "python",
+   "name": "pycal"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.8.11"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/notebooks/ePix100/Characterize_Darks_ePix100_NBC.ipynb b/notebooks/ePix100/Characterize_Darks_ePix100_NBC.ipynb
index 7a174bdd77310197f6da7ba559a3eb700af70136..cfaf022060c1028f47fccfaa3c13931751f6aa77 100644
--- a/notebooks/ePix100/Characterize_Darks_ePix100_NBC.ipynb
+++ b/notebooks/ePix100/Characterize_Darks_ePix100_NBC.ipynb
@@ -27,14 +27,14 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "in_folder = '/gpfs/exfel/exp/HED/202201/p002804/raw' # input folder, required\n",
+    "in_folder = '/gpfs/exfel/exp/MID/202330/p900329/raw' # input folder, required\n",
     "out_folder = '' # output folder, required\n",
     "metadata_folder = ''  # Directory containing calibration_metadata.yml when run by xfel-calibrate\n",
     "sequence = 0 # sequence file to use\n",
-    "run = 281 # which run to read data from, required\n",
+    "run = 106 # which run to read data from, required\n",
     "\n",
     "# Parameters for accessing the raw data.\n",
-    "karabo_id = \"HED_IA1_EPX100-1\" # karabo karabo_id\n",
+    "karabo_id = \"MID_EXP_EPIX-1\" # karabo karabo_id\n",
     "karabo_da = [\"EPIX01\"]  # data aggregators\n",
     "receiver_template = \"RECEIVER\" # detector receiver template for accessing raw data files\n",
     "path_template = 'RAW-R{:04d}-{}-S{{:05d}}.h5' # the template to use to access data\n",
@@ -42,7 +42,7 @@
     "\n",
     "# Parameters for the calibration database.\n",
     "use_dir_creation_date = True\n",
-    "cal_db_interface = \"tcp://max-exfl016:8020\" # calibration DB interface to use\n",
+    "cal_db_interface = \"tcp://max-exfl-cal001:8020\" # calibration DB interface to use\n",
     "cal_db_timeout = 300000 # timeout on caldb requests\n",
     "db_output = False # Output constants to the calibration database\n",
     "local_output = True # Output constants locally\n",
@@ -53,7 +53,8 @@
     "fix_integration_time = -1 # Integration time. Set to -1 to read from .h5 file\n",
     "fix_temperature = -1 # Fixed temperature in Kelvin. Set to -1 to read from .h5 file\n",
     "temp_limits = 5 # Limit for parameter Operational temperature\n",
-    "badpixel_threshold_sigma = 5.  # Bad pixels defined by values outside n times this std from median. Default: 5\n",
+    "badpixel_noise_sigma = 5  # Bad pixels defined by noise value outside n * std from median. Default: 5\n",
+    "badpixel_offset_sigma = 2  # Bad pixels defined by offset value outside n * std from median. Default: 2\n",
     "CM_N_iterations = 2  # Number of iterations for common mode correction. Set to 0 to skip it\n",
     "\n",
     "# Parameters used during selecting raw data trains.\n",
@@ -359,15 +360,15 @@
     "    lut_label='[ADU]',\n",
     "    x_label='Column', \n",
     "    y_label='Row',\n",
-    "    vmin=max(0, np.round((stats['median'] - badpixel_threshold_sigma*stats['std']))), \n",
-    "    vmax=np.round(stats['median'] + badpixel_threshold_sigma*stats['std'])\n",
+    "    vmin=max(0, np.round((stats['median'] - badpixel_noise_sigma*stats['std']))), \n",
+    "    vmax=np.round(stats['median'] + badpixel_noise_sigma*stats['std'])\n",
     ")\n",
     "fig.suptitle('Noise Map', x=.5, y=.9, fontsize=16)\n",
     "fig.set_size_inches(h=15, w=15)\n",
     "\n",
     "# Calculate overall noise histogram\n",
-    "bins = np.arange(max(0, stats['mean'] - badpixel_threshold_sigma*stats['std']),\n",
-    "                 stats['mean'] + badpixel_threshold_sigma*stats['std'], \n",
+    "bins = np.arange(max(0, stats['mean'] - badpixel_noise_sigma*stats['std']),\n",
+    "                 stats['mean'] + badpixel_noise_sigma*stats['std'], \n",
     "                 stats['std']/100)\n",
     "\n",
     "h, c = np.histogram(\n",
@@ -415,8 +416,8 @@
     "    aspect=1.5,\n",
     "    x_label='Noise [ADU]',\n",
     "    y_label='Counts',\n",
-    "    x_range=(max(0, stats['median'] - badpixel_threshold_sigma*stats['std']),\n",
-    "             stats['median'] + badpixel_threshold_sigma*stats['std']),\n",
+    "    x_range=(max(0, stats['median'] - badpixel_noise_sigma*stats['std']),\n",
+    "             stats['median'] + badpixel_noise_sigma*stats['std']),\n",
     "    y_range=(0, max(d[0]['y'])*1.1),\n",
     ")\n",
     "plt.grid(linestyle = ':')\n",
@@ -571,11 +572,11 @@
     "constant_maps['BadPixelsDark'] = np.zeros(constant_maps['Offset'].shape, np.uint32)\n",
     "\n",
     "# Find noise related bad pixels\n",
-    "constant_maps['BadPixelsDark'][eval_bpidx(constant_maps['Noise'], badpixel_threshold_sigma, sensor_size//2)] = BadPixels.NOISE_OUT_OF_THRESHOLD.value\n",
+    "constant_maps['BadPixelsDark'][eval_bpidx(constant_maps['Noise'], badpixel_noise_sigma, sensor_size//2)] = BadPixels.NOISE_OUT_OF_THRESHOLD.value\n",
     "constant_maps['BadPixelsDark'][~np.isfinite(constant_maps['Noise'])] = BadPixels.OFFSET_NOISE_EVAL_ERROR.value\n",
     "\n",
     "# Find offset related bad pixels\n",
-    "constant_maps['BadPixelsDark'][eval_bpidx(constant_maps['Offset'], badpixel_threshold_sigma, sensor_size//2)] = BadPixels.OFFSET_OUT_OF_THRESHOLD.value\n",
+    "constant_maps['BadPixelsDark'][eval_bpidx(constant_maps['Offset'], badpixel_offset_sigma, sensor_size//2)] = BadPixels.OFFSET_OUT_OF_THRESHOLD.value\n",
     "constant_maps['BadPixelsDark'][~np.isfinite(constant_maps['Offset'])] = BadPixels.OFFSET_NOISE_EVAL_ERROR.value\n",
     "\n",
     "# Plot Bad Pixels Map\n",
@@ -620,7 +621,7 @@
     "    print('Common mode correction not applied.')\n",
     "else:\n",
     "    \n",
-    "    commonModeBlockSize = sensor_size//2\n",
+    "    commonModeBlockSize = (sensor_size//[8,2]).astype(int) # bank size (x=96,y=354) pixels\n",
     "\n",
     "    # Instantiate common mode calculators for column and row CM correction\n",
     "    cmCorrection_col = xcal.CommonModeCorrection(\n",
@@ -672,7 +673,7 @@
     "        noise_map_corrected = np.nanstd(data, axis=0)[..., np.newaxis]\n",
     "\n",
     "        # Update bad pixels map \n",
-    "        constant_maps['BadPixelsDark'][eval_bpidx(noise_map_corrected, badpixel_threshold_sigma, sensor_size//2)] = BadPixels.NOISE_OUT_OF_THRESHOLD.value\n",
+    "        constant_maps['BadPixelsDark'][eval_bpidx(noise_map_corrected, badpixel_noise_sigma, sensor_size//2)] = BadPixels.NOISE_OUT_OF_THRESHOLD.value\n",
     "        bp_offset.append(np.sum(constant_maps['BadPixelsDark']==1))\n",
     "        bp_noise.append(np.sum(constant_maps['BadPixelsDark']==2))\n",
     "\n",
diff --git a/notebooks/ePix100/Characterize_FlatFields_ePix100_NBC.ipynb b/notebooks/ePix100/Characterize_FlatFields_ePix100_NBC.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..948ec45f46349254e5cbe7084843dcd62dfc1935
--- /dev/null
+++ b/notebooks/ePix100/Characterize_FlatFields_ePix100_NBC.ipynb
@@ -0,0 +1,1411 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "id": "826b869a",
+   "metadata": {},
+   "source": [
+    "#  ePix100 Flat Field Characterization\n",
+    "\n",
+    "Author: European XFEL Detector Group, Version 1.0\n",
+    "\n",
+    "Generate gain maps from flat-field runs."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "7439b810",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "in_folder = '/gpfs/exfel/exp/MID/202231/p900310/raw' # input folder, required\n",
+    "out_folder = '' # output folder, required\n",
+    "metadata_folder = ''  # Directory containing calibration_metadata.yml when run by xfel-calibrate\n",
+    "run = 29 # which run to read data from, required\n",
+    "\n",
+    "# Parameters for accessing the raw data.\n",
+    "karabo_id = \"MID_EXP_EPIX-2\"  # karabo ID\n",
+    "karabo_da = \"EPIX02\"  # data aggregator\n",
+    "receiver_template = \"RECEIVER\" # detector receiver template for accessing raw data files\n",
+    "instrument_source_template = '{}/DET/{}:daqOutput' # instrument detector data source in h5files\n",
+    "\n",
+    "# Fit parameters\n",
+    "peak_fitting = 'gauss' # method to find the peak position per pixel: 'median' or 'gauss'\n",
+    "N_sigma_interval = 5   # sigma interval to find singles peak in each per pixel \n",
+    "peak_energy = 8.048    # [keV] Cu K$\\alpha$1\n",
+    "\n",
+    "# ADU range\n",
+    "ADU_range = [-50,500] # expected range that encloses the raw signal from the FF run \n",
+    "\n",
+    "# Cluster calculators (given in N times sigma noise)\n",
+    "split_evt_primary_threshold = 7   # Split event primary threshold \n",
+    "split_evt_secondary_threshold = 3  # Split event secondary threshold\n",
+    "split_evt_mip_threshold = 1000     # Threshold for rejection of MIP events (e.g, cosmic-rays)\n",
+    "\n",
+    "# Parameters for the calibration database.\n",
+    "cal_db_interface = \"tcp://max-exfl-cal001:8020\" # calibration DB interface to use\n",
+    "cal_db_timeout = 300000 # timeout on caldb requests\n",
+    "creation_time = \"\"  # The timestamp to use with Calibration DB. Required Format: \"YYYY-MM-DD hh:mm:ss\" e.g. 2019-07-04 11:02:41\n",
+    "db_output = False # Output constants to the calibration database\n",
+    "local_output = True # Output constants locally\n",
+    "\n",
+    "# Conditions used for injected calibration constants.\n",
+    "bias_voltage = 200 # Bias voltage\n",
+    "in_vacuum = False # Detector operated in vacuum\n",
+    "fix_integration_time = -1 # Integration time. Set to -1 to read from .h5 file\n",
+    "fix_temperature = -1 # Fixed temperature in Kelvin. Set to -1 to read from .h5 file\n",
+    "temp_limits = 5 # Limit for parameter Operational temperature\n",
+    "\n",
+    "# Parameters used during selecting raw data trains.\n",
+    "min_trains = 1 # Minimum number of trains that should be available. Default 1.\n",
+    "max_trains = 0 # Maximum number of trains to use for processing. Set to 0 to use all available trains."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "43791d97",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import warnings\n",
+    "\n",
+    "import matplotlib.pyplot as plt\n",
+    "from matplotlib.colors import LogNorm\n",
+    "import numpy as np\n",
+    "import pasha as psh\n",
+    "from extra_data import RunDirectory\n",
+    "from pathlib import Path\n",
+    "from prettytable import PrettyTable\n",
+    "from scipy.optimize import curve_fit\n",
+    "\n",
+    "import XFELDetAna.xfelprofiler as xprof\n",
+    "from XFELDetAna import xfelpyanatools as xana\n",
+    "from XFELDetAna import xfelpycaltools as xcal\n",
+    "from XFELDetAna.plotting.util import prettyPlotting\n",
+    "\n",
+    "from cal_tools.enums import BadPixels\n",
+    "from cal_tools.step_timing import StepTimer\n",
+    "from cal_tools.epix100 import epix100lib\n",
+    "from cal_tools.tools import (\n",
+    "    calcat_creation_time,\n",
+    "    get_pdu_from_db,\n",
+    "    get_constant_from_db,\n",
+    "    get_report,\n",
+    "    save_const_to_h5,\n",
+    "    send_to_db,\n",
+    ")\n",
+    "from iCalibrationDB import Conditions, Constants"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "4f4d9f62",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%matplotlib inline\n",
+    "\n",
+    "warnings.filterwarnings('ignore')\n",
+    "\n",
+    "prettyPlotting = True\n",
+    "\n",
+    "profiler = xprof.Profiler()\n",
+    "profiler.disable()\n",
+    "\n",
+    "step_timer = StepTimer()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "6571ae1c",
+   "metadata": {},
+   "source": [
+    "## Load Data"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "9c93190b",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "instrument_src = instrument_source_template.format(karabo_id, receiver_template)\n",
+    "\n",
+    "# Run directory\n",
+    "proposal = list(filter(None, in_folder.strip('/').split('/')))[-2]\n",
+    "file_loc = f'proposal:{proposal} runs:{run}'\n",
+    "report = get_report(metadata_folder)\n",
+    "\n",
+    "ped_dir = Path(in_folder) / f'r{run:04d}'\n",
+    "run_dc = RunDirectory(ped_dir)\n",
+    "\n",
+    "print(f\"Run is: {run}\")\n",
+    "print(f\"Instrument H5File source: {instrument_src}\")\n",
+    "\n",
+    "creation_time = calcat_creation_time(in_folder, run, creation_time)\n",
+    "print(f\"Using {creation_time.isoformat()} as creation time\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "08d6fef2",
+   "metadata": {
+    "slideshow": {
+     "slide_type": "-"
+    }
+   },
+   "outputs": [],
+   "source": [
+    "# Path to pixels ADC values\n",
+    "pixels_src = (instrument_src, \"data.image.pixels\")\n",
+    "\n",
+    "# Specify the total number of images to process\n",
+    "n_trains = run_dc.get_data_counts(*pixels_src).shape[0]\n",
+    "\n",
+    "# Modify n_trains to process based on the given maximum and minimum number of trains.\n",
+    "if max_trains:\n",
+    "    n_trains = min(max_trains, n_trains)\n",
+    "    \n",
+    "if n_trains < min_trains:\n",
+    "    raise ValueError(\n",
+    "        f\"Less than {min_trains} trains are available in RAW data.\"\n",
+    "         \" Not enough data to process flat fields.\")\n",
+    "\n",
+    "all_trains = len(run_dc.select(instrument_src).train_ids)\n",
+    "if n_trains != all_trains:\n",
+    "    print(f\"Warning: {all_trains - n_trains} trains with empty data.\")\n",
+    "\n",
+    "print(f'Images to analyze: {n_trains}')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "9fdf1715",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Read sensor size\n",
+    "sensor_size = run_dc[instrument_src, 'data.image.dims'].as_single_value(reduce_by='first') # (x=768, y=708) expected\n",
+    "sensor_size = sensor_size[sensor_size != 1].tolist()  # data.image.dims for old data is [768, 708, 1]\n",
+    "assert sensor_size == [768,708], 'Unexpected sensor dimensions.' \n",
+    "\n",
+    "ctrl_data = epix100lib.epix100Ctrl(\n",
+    "    run_dc=run_dc,\n",
+    "    instrument_src=instrument_src,\n",
+    "    ctrl_src=f\"{karabo_id}/DET/CONTROL\",\n",
+    "    )\n",
+    "# Read integration time\n",
+    "if fix_integration_time == -1:\n",
+    "    integration_time = ctrl_data.get_integration_time()\n",
+    "    integration_time_str_add = ''\n",
+    "else:\n",
+    "    integration_time = fix_integration_time\n",
+    "    integration_time_str_add = '(manual input)'\n",
+    "    \n",
+    "# Read temperature    \n",
+    "if fix_temperature == -1:\n",
+    "    temperature = ctrl_data.get_temprature()\n",
+    "    temperature_k = temperature + 273.15\n",
+    "    temp_str_add = ''\n",
+    "else:\n",
+    "    temperature_k = fix_temperature\n",
+    "    temperature = fix_temperature - 273.15\n",
+    "    temp_str_add = '(manual input)'\n",
+    "    \n",
+    "# Print operating conditions\n",
+    "print(f\"Bias voltage: {bias_voltage} V\")\n",
+    "print(f\"Detector integration time: {integration_time} \\u03BCs {integration_time_str_add}\")\n",
+    "print(f\"Mean temperature: {temperature:0.2f}\\u00B0C / {temperature_k:0.2f} K {temp_str_add}\")\n",
+    "print(f\"Operated in vacuum: {in_vacuum}\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "a4dd3d8d",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "step_timer = StepTimer()\n",
+    "step_timer.start()\n",
+    "\n",
+    "# Read data\n",
+    "data_dc = run_dc.select(*pixels_src, require_all=True).select_trains(np.s_[:n_trains])\n",
+    "dshape = data_dc[pixels_src].shape\n",
+    "\n",
+    "step_timer.done_step('Flat-fields loaded. Elapsed Time')"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "7920cb0b",
+   "metadata": {
+    "tags": []
+   },
+   "source": [
+    "## Retrieve Necessary Calibration Constants"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "593964be",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "const_data = dict()\n",
+    "constants = ['Offset', 'Noise', 'BadPixelsDark']\n",
+    "\n",
+    "condition =  Conditions.Dark.ePix100(bias_voltage=bias_voltage,\n",
+    "                                     integration_time=integration_time,\n",
+    "                                     temperature=temperature_k,\n",
+    "                                     in_vacuum=in_vacuum)\n",
+    "\n",
+    "for cname in constants:        \n",
+    "    const_data[cname] = get_constant_from_db(\n",
+    "        karabo_id=karabo_id,\n",
+    "        karabo_da=karabo_da,\n",
+    "        constant=getattr(Constants.ePix100, cname)(),\n",
+    "        condition=condition,\n",
+    "        empty_constant=None,\n",
+    "        cal_db_interface=cal_db_interface,\n",
+    "        creation_time=creation_time,\n",
+    "        timeout=cal_db_timeout\n",
+    "    )"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "ea05e961",
+   "metadata": {},
+   "source": [
+    "## Instantiate calculators"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "f05e8297",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "block_size = [sensor_size[0]//2, sensor_size[1]//2]\n",
+    "noiseSigma = 5\n",
+    "\n",
+    "cmCorrection_block = xcal.CommonModeCorrection(\n",
+    "    sensor_size,\n",
+    "    block_size,\n",
+    "    'block',\n",
+    "    noiseMap=const_data['Noise'].swapaxes(0,1),\n",
+    "    noiseSigma=noiseSigma,\n",
+    "    parallel=False)\n",
+    "cmCorrection_col = xcal.CommonModeCorrection(\n",
+    "    sensor_size,\n",
+    "    block_size,\n",
+    "    'col',\n",
+    "    noiseMap=const_data['Noise'].swapaxes(0,1),\n",
+    "    noiseSigma=noiseSigma,\n",
+    "    parallel=False)\n",
+    "cmCorrection_row = xcal.CommonModeCorrection(\n",
+    "    sensor_size,\n",
+    "    block_size,\n",
+    "    'row',\n",
+    "    noiseMap=const_data['Noise'].swapaxes(0,1),\n",
+    "    noiseSigma=noiseSigma,\n",
+    "    parallel=False)\n",
+    "  \n",
+    "patternClassifier = xcal.PatternClassifier(\n",
+    "    shape=sensor_size,\n",
+    "    noisemap=const_data['Noise'].swapaxes(0,1),\n",
+    "    primaryThreshold=split_evt_primary_threshold,\n",
+    "    secondaryThreshold=split_evt_secondary_threshold,\n",
+    "    upperThreshold=split_evt_mip_threshold,\n",
+    "    blockSize=block_size,\n",
+    "    setPixelMask = const_data['BadPixelsDark'].flatten(),\n",
+    "    parallel=False\n",
+    ")\n",
+    "\n",
+    "patternSelector = xcal.PatternSelector(\n",
+    "    sensor_size, \n",
+    "    selectionList = [100, 101], # singles patterns\n",
+    "    blockSize=block_size, \n",
+    "    parallel=False)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "8977c9ff",
+   "metadata": {},
+   "source": [
+    "## Correct data"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "de145b05",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "bin_min = ADU_range[0]\n",
+    "bin_max = ADU_range[1]\n",
+    "bin_width = 1\n",
+    "\n",
+    "bins = np.arange(bin_min,bin_max,bin_width)\n",
+    "hist = {'O': 0,'CM': 0,'CS': 0, 'S': 0}"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "1765d3f8",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def correct_train(worker_id, index, train_id, dc):\n",
+    "\n",
+    "    d = dc[pixels_src[0]][pixels_src[1]].astype(np.float32)\n",
+    "\n",
+    "    # Offset correction\n",
+    "    d -= const_data['Offset'].squeeze()\n",
+    "    hist['O'] += np.histogram(d.flatten(),bins=bins)[0]\n",
+    "     \n",
+    "    # Common Mode correction\n",
+    "    d = d.swapaxes(0,-1)\n",
+    "    d = cmCorrection_block.correct(d)\n",
+    "    d = cmCorrection_col.correct(d)\n",
+    "    d = cmCorrection_row.correct(d)\n",
+    "    d = d.swapaxes(0,-1)\n",
+    "    hist['CM'] += np.histogram(d.flatten(),bins=bins)[0]\n",
+    "    \n",
+    "    # Charge Sharing correction\n",
+    "    d = d.swapaxes(0,-1)\n",
+    "    d, patterns = patternClassifier.classify(d)\n",
+    "    sing,fs = patternSelector.select(d,patterns)\n",
+    "    d = d.swapaxes(0,-1)\n",
+    "    hist['CS'] += np.histogram(d[d>0].flatten(),bins=bins)[0]\n",
+    "    hist['S'] += np.histogram(sing[sing>0].flatten(),bins=bins)[0]\n",
+    "    \n",
+    "    data_corr[index+prev_chunk] = d\n",
+    "    data_singles[index+prev_chunk] = sing.swapaxes(0,-1)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "7c4dcd5b",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "step_timer.start()\n",
+    "\n",
+    "chunk_size = 1000\n",
+    "\n",
+    "psh.set_default_context('threads', num_workers=35) # num_workers=35 was found to be optimal\n",
+    "data_corr = psh.alloc(shape=dshape, dtype=np.float32)\n",
+    "data_singles = psh.alloc(shape=dshape, dtype=int)\n",
+    "\n",
+    "chunk = 0\n",
+    "while chunk < dshape[0]-1:\n",
+    "    \n",
+    "    prev_chunk = chunk\n",
+    "    chunk+=chunk_size\n",
+    "    if chunk > dshape[0]: # last chunk may have different size\n",
+    "        chunk = dshape[0]-1\n",
+    "        \n",
+    "    psh.map(correct_train, data_dc.select_trains(np.arange(prev_chunk,chunk)))\n",
+    "        \n",
+    "    print(f'Corrected trains: {chunk} ({round(chunk/dshape[0]*100)}%)',end='\\r')\n",
+    "\n",
+    "step_timer.done_step('Corrected data. Elapsed Time')"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "923a5ac4",
+   "metadata": {},
+   "source": [
+    "## Plot histograms"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "c43ae1dd",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "bins_c = bins[:-1]+np.diff(bins)[0]/2 # center of bins\n",
+    "\n",
+    "plt.figure(figsize=(12,8))\n",
+    "plt.plot(bins_c,hist['O'], label='Offset corrected')\n",
+    "plt.plot(bins_c,hist['CM'], label='Common Mode corrected')\n",
+    "plt.plot(bins_c,hist['CS'], label='Charge Sharing corrected')\n",
+    "plt.plot(bins_c,hist['S'], label='Singles')\n",
+    "plt.xlim(ADU_range)\n",
+    "plt.yscale('log')\n",
+    "plt.xlabel('ADU',fontsize=12)\n",
+    "plt.title(f'{karabo_id} | {proposal} - r{run}', fontsize=14)\n",
+    "plt.legend(fontsize=12);\n",
+    "plt.grid(ls=':')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "46d9fe80",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "print(f'Primary threshold: {split_evt_primary_threshold}')\n",
+    "print(f'Secondary threshold: {split_evt_secondary_threshold}')\n",
+    "\n",
+    "patternStats = patternClassifier.getPatternStats()\n",
+    "\n",
+    "n_singles = np.sum(patternStats['singles'])\n",
+    "n_doubles = np.sum(patternStats['doubles'])\n",
+    "n_triples = np.sum(patternStats['triples'])\n",
+    "n_quads = np.sum(patternStats['quads'])\n",
+    "n_clusters = np.sum(patternStats['clusters'])\n",
+    "known_patterns = np.sum((n_singles, n_doubles, n_triples, n_quads))\n",
+    "\n",
+    "t1,t2 = PrettyTable(),PrettyTable()\n",
+    "t1.field_names = ['Photon Hits', 'Frequency']\n",
+    "t1.add_row(['Big Clusters', f'{n_clusters/(known_patterns+n_clusters)*100: .2f} %'])\n",
+    "t1.add_row(['Listed Patterns', f'{known_patterns/(known_patterns+n_clusters)*100: .2f} %'])\n",
+    "\n",
+    "print(t1)\n",
+    "\n",
+    "t2.field_names = ['Listed Patterns', 'Frequency']\n",
+    "t2.add_row(['Singles', f'{n_singles/known_patterns*100: .2f} %'])\n",
+    "t2.add_row(['Doubles', f'{n_doubles/known_patterns*100: .2f} %'])\n",
+    "t2.add_row(['Triples', f'{n_triples/known_patterns*100: .2f} %'])\n",
+    "t2.add_row(['Quadruplets', f'{n_quads/known_patterns*100: .2f} %'])\n",
+    "\n",
+    "print(t2)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "7739d666",
+   "metadata": {},
+   "source": [
+    "## Flat-Field Statistics"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "ed100e6a",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Definition of gaussian function for fitting\n",
+    "def gauss(x, *p):\n",
+    "    A, mu, sigma = p\n",
+    "    return A*np.exp(-(x-mu)**2/(2.*sigma**2))\n",
+    "\n",
+    "# rough initial estimate of fit parameters\n",
+    "fit_estimates = [np.max(hist['S']),           # amplitude\n",
+    "                 bins[np.argmax(hist['S'])],  # centroid\n",
+    "                 10]                          # sigma"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "a649666b",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "coeff, _ = curve_fit(gauss, bins_c, hist['S'], p0=fit_estimates)\n",
+    "singles_mu = coeff[1]\n",
+    "singles_sig = abs(coeff[2])\n",
+    "ROI = np.round([singles_mu-N_sigma_interval*singles_sig, # region of interest to find first photopeak per pixel\n",
+    "                singles_mu+N_sigma_interval*singles_sig]).astype(int)\n",
+    "y_fit = gauss(bins_c, *coeff)\n",
+    "\n",
+    "plt.figure(figsize=(9,6))\n",
+    "plt.plot(bins_c,hist['S'],'k',label = 'singles')\n",
+    "plt.plot(bins_c,y_fit,'g--',label = 'gauss fit') \n",
+    "plt.ylim(1,max(hist['S'])*1.5);\n",
+    "plt.xlim(ADU_range)\n",
+    "plt.vlines(coeff[1],0,plt.gca().get_ylim()[1],color='g',ls=':')\n",
+    "\n",
+    "plt.axvspan(ROI[0],\n",
+    "            ROI[1],\n",
+    "            alpha = .2,\n",
+    "            color = 'green',\n",
+    "            label = f'\\u03BC ± {N_sigma_interval}\\u03c3')\n",
+    "\n",
+    "plt.legend(fontsize=12);\n",
+    "plt.xlabel('ADU',fontsize=12)\n",
+    "plt.yscale('log')\n",
+    "plt.grid(ls=':')\n",
+    "plt.show()\n",
+    "\n",
+    "print('--------------------')\n",
+    "print('Fit parameters:')\n",
+    "print(f'  centroid = {np.round(singles_mu,3)}')\n",
+    "print(f'     sigma = {np.round(singles_sig,3)}')\n",
+    "print('---------------------')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "d4bba07d",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Calculate singles per pixel\n",
+    "step_timer.start()\n",
+    "\n",
+    "singles_per_pixel = np.empty(np.flip(sensor_size))\n",
+    "\n",
+    "for py in range(0,int(sensor_size[1])):\n",
+    "    for px in range(0,int(sensor_size[0])):\n",
+    "        singles_per_pixel[py,px] = np.sum((data_singles[:,py,px]>=ROI[0]) & (data_singles[:,py,px]<ROI[1]))\n",
+    "\n",
+    "step_timer.done_step('Calculated singles per pixel. Elapsed Time')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "c5986694",
+   "metadata": {
+    "scrolled": false
+   },
+   "outputs": [],
+   "source": [
+    "mask_bins = np.unique(singles_per_pixel,return_counts=True)[1] > np.max(np.unique(singles_per_pixel,return_counts=True)[1])*.01\n",
+    "last_bin = np.max(np.unique(singles_per_pixel)[mask_bins]) # xlim on bin that has less than 1% of max counts\n",
+    "\n",
+    "# Plot singles distribution\n",
+    "fig = xana.heatmapPlot(\n",
+    "    singles_per_pixel,\n",
+    "    lut_label='# singles',\n",
+    "    x_label='Column',\n",
+    "    y_label='Row',\n",
+    "    vmax = last_bin\n",
+    ")\n",
+    "fig.suptitle(f'Singles Distribution', x=.48, y=.9, fontsize=14)\n",
+    "fig.set_size_inches(h=10, w=10);\n",
+    "\n",
+    "plt.figure(figsize=(7,5))\n",
+    "plt.hist(singles_per_pixel.flatten(),bins=np.arange(0,last_bin,1),\n",
+    "         align = 'left',\n",
+    "         histtype = 'bar',\n",
+    "         edgecolor='black', \n",
+    "         linewidth=1.2)\n",
+    "plt.xlabel('Singles per pixel',fontsize=12)\n",
+    "plt.grid(ls='--',axis='y',color='b',alpha=.5)\n",
+    "plt.show()\n",
+    "\n",
+    "print(f'Average number of singles per pixel: {np.round(np.sum(data_singles>0)/np.prod(sensor_size),2)}')"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "3d58fc82",
+   "metadata": {},
+   "source": [
+    "## Plot random sample pixels "
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "62b2650e",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "N_sample_pixels = 16\n",
+    "\n",
+    "# Plot some random pixels, avoiding bad ones\n",
+    "np.random.seed(0)\n",
+    "sample_pixels = np.transpose([np.random.randint(0, sensor_size[0], N_sample_pixels),\n",
+    "                              np.random.randint(0, sensor_size[1], N_sample_pixels)])\n",
+    "while np.sum(const_data['BadPixelsDark'][sample_pixels[:,1],sample_pixels[:,0]]):\n",
+    "    sample_pixels = np.transpose([np.random.randint(0, sensor_size[0], N_sample_pixels),\n",
+    "                                  np.random.randint(0, sensor_size[1], N_sample_pixels)])\n",
+    "\n",
+    "fig = plt.figure(figsize=(20,20))\n",
+    "roi_bins = np.arange(ROI[0], ROI[1])\n",
+    "it_counter = 0\n",
+    "for px,py in sample_pixels:\n",
+    "    it_counter+=1    \n",
+    "    \n",
+    "    plt.subplot(int(np.sqrt(N_sample_pixels)),int(np.sqrt(N_sample_pixels)),it_counter)\n",
+    "    \n",
+    "    h,ADU = np.histogram(data_singles[:,py,px],bins=roi_bins)\n",
+    "    ADU_c = ADU[:-1] + np.diff(ADU)[0]/2 # center of bins\n",
+    "    \n",
+    "    p1 = plt.plot([],[],' ',label = f'({px},{py})')\n",
+    "    p2 = plt.scatter(ADU_c[h>0], h[h>0],marker = 'x',c = 'k', label = 'singles')\n",
+    "\n",
+    "    mdn = np.median(ADU_c[h>0])\n",
+    "    if ~np.isnan(mdn):\n",
+    "        p3 = plt.plot([mdn, mdn],[0,plt.gca().get_ylim()[1]],color='g', label = f'median={int(mdn)}')\n",
+    "    else:\n",
+    "        p3 = plt.plot([],[],' ', label = 'empty')\n",
+    "        \n",
+    "    try:\n",
+    "        coeff, _ = curve_fit(gauss, ADU_c, h, p0=[0, np.median(ADU_c[h>0]), singles_sig]) \n",
+    "        y_fit = gauss(ADU_c, *coeff)\n",
+    "        p4 = plt.plot(ADU_c, y_fit, label = f'fit: \\u03BC={int(np.round(coeff[1]))}')\n",
+    "\n",
+    "    except (RuntimeError, ValueError):\n",
+    "        p4 = plt.plot([],[],' ', label = 'fit error')\n",
+    "    \n",
+    "    plt.grid(ls=':')\n",
+    "    plt.xlabel('ADU')\n",
+    "    plt.xlim(ROI)\n",
+    "    plt.ylim(bottom=0)\n",
+    "    plt.legend()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "a968c8df",
+   "metadata": {},
+   "source": [
+    "## Fit single photon peaks per pixel"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "49d52f2b",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "step_timer.start()\n",
+    "peak_map = np.zeros(np.flip(sensor_size))[...,np.newaxis]\n",
+    "\n",
+    "for py in range(0,int(sensor_size[1])):\n",
+    "    for px in range(0,int(sensor_size[0])):            \n",
+    "        h,ADU = np.histogram(data_singles[:,py,px],bins=np.arange(ROI[0],ROI[1]))\n",
+    "        ADU_c = ADU[:-1] + np.diff(ADU)[0]/2 # center of bins\n",
+    "        \n",
+    "        if np.sum(h):\n",
+    "            if peak_fitting=='median':\n",
+    "                peak_map[py,px] = np.median(ADU_c[h>0])\n",
+    "            elif peak_fitting=='gauss':\n",
+    "                try:\n",
+    "                    coeff, _ = curve_fit(gauss, ADU_c, h, p0=[0, np.median(ADU_c[h>0]), singles_sig]) \n",
+    "                    peak_map[py,px] = coeff[1]\n",
+    "                except RuntimeError:\n",
+    "                    pass         # Failed fits remain 0 \n",
+    "        else:\n",
+    "            peak_map[py,px] = -1 # Assign -1 to empty pixels\n",
+    "\n",
+    "peak_map[np.isnan(peak_map)] = 0 # Failed fits can throw no expection but return nan coeffs\n",
+    "step_timer.done_step(f'Calculated relative gain map using {peak_fitting} fit. Elapsed Time')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "8891bcd4",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "plt.figure(figsize=(7,5))\n",
+    "plt.hist(peak_map.flatten(),bins=np.arange(ROI[0],ROI[1]),\n",
+    "         histtype = 'bar',\n",
+    "         edgecolor='black',\n",
+    "         alpha = .5,\n",
+    "         linewidth=1.2);\n",
+    "\n",
+    "h,ADU = np.histogram(peak_map.flatten(),bins=np.arange(ROI[0],ROI[1]))\n",
+    "ADU_c = ADU[:-1] + np.diff(ADU)[0]/2 # center of bins\n",
+    "\n",
+    "coeff, _ = curve_fit(gauss, ADU_c, h, p0=[h.max()/2, singles_mu, singles_sig])\n",
+    "BP_fit_threshold = [coeff[1]-N_sigma_interval*abs(coeff[2]),\n",
+    "                    coeff[1]+N_sigma_interval*abs(coeff[2])]\n",
+    "y_fit = gauss(ADU_c, *coeff)\n",
+    "plt.plot(ADU_c,y_fit, label = f'fit: \\u03BC={int(np.round(coeff[1]))}')\n",
+    "plt.vlines(coeff[1],0,plt.gca().get_ylim()[1],color='orange',ls=':')\n",
+    "plt.axvspan(BP_fit_threshold[0],\n",
+    "            BP_fit_threshold[1],\n",
+    "            alpha = .3,\n",
+    "            color = 'orange',\n",
+    "            label = f'\\u03BC ± {N_sigma_interval}\\u03c3')\n",
+    "\n",
+    "plt.grid(ls=':')\n",
+    "plt.xlim(np.array(BP_fit_threshold)*[.9,1.1])\n",
+    "plt.xlabel('Peak position [ADU]',fontsize=12);\n",
+    "plt.legend(fontsize=12)\n",
+    "plt.title(f'{karabo_id} | {proposal} - r{run}', fontsize=12)\n",
+    "plt.ylim((1, coeff[0]*1.2))\n",
+    "plt.show()\n",
+    "\n",
+    "print('--------------------')\n",
+    "print('Fit parameters:')\n",
+    "print(f'  centroid = {np.round(coeff[1],3)}')\n",
+    "print(f'     sigma = {np.round(abs(coeff[2]),3)}')\n",
+    "print('---------------------')"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "e17b27ef",
+   "metadata": {},
+   "source": [
+    "## Flat-Field Bad Pixels"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "0816af0f",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "const_data['BadPixelsFF'] = np.zeros(np.flip(sensor_size))[...,np.newaxis]\n",
+    "\n",
+    "# Empty Pixels\n",
+    "const_data['BadPixelsFF'][peak_map==-1] = BadPixels.FF_NO_ENTRIES.value\n",
+    "\n",
+    "# Failed Fits\n",
+    "const_data['BadPixelsFF'][peak_map==0] = BadPixels.FF_GAIN_EVAL_ERROR.value\n",
+    "\n",
+    "# Gain out of range\n",
+    "const_data['BadPixelsFF'][(peak_map!=0) & (peak_map!=-1) & ((peak_map<BP_fit_threshold[0]) | (peak_map>BP_fit_threshold[1]))] = BadPixels.FF_GAIN_DEVIATION.value\n",
+    "\n",
+    "# Plot Bad Pixels Map\n",
+    "fig = xana.heatmapPlot(\n",
+    "    np.nan_to_num(np.log2(const_data['BadPixelsFF'].squeeze())+1, neginf=np.nan),\n",
+    "    cb_label='Bad pixel bit',\n",
+    "    x_label='Column',\n",
+    "    y_label='Row',\n",
+    ")\n",
+    "fig.suptitle(f'FF Bad Pixels Map({karabo_id} | {proposal} - r{run})', x=.5, y=.9, fontsize=16)\n",
+    "fig.set_size_inches(h=12, w=12)\n",
+    "\n",
+    "t = PrettyTable()\n",
+    "t.title = 'Flat-Field Bad Pixel Analysis'\n",
+    "t.field_names = ['Bit', 'Value', 'Type       ', 'Counts', '%']\n",
+    "t.align['Type       '] = 'r'\n",
+    "\n",
+    "for BP_type in [BadPixels.FF_GAIN_DEVIATION, BadPixels.FF_GAIN_EVAL_ERROR, BadPixels.FF_NO_ENTRIES]:\n",
+    "    t.add_row([BP_type.bit_length(),\n",
+    "               BP_type.value,\n",
+    "               BP_type.name,\n",
+    "               np.sum(const_data['BadPixelsFF']==BP_type.value),\n",
+    "               np.round(100*np.sum(const_data['BadPixelsFF']==BP_type.value)/np.prod(sensor_size),2)\n",
+    "              ])\n",
+    "t.add_row(['-','-',\n",
+    "           'Total',\n",
+    "           np.sum(const_data['BadPixelsFF']>0),\n",
+    "           np.round(100*np.sum(const_data['BadPixelsFF']>0)/np.prod(sensor_size),2)\n",
+    "          ])\n",
+    "print(t)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "e97610e2",
+   "metadata": {},
+   "source": [
+    "## Relative Gain Map"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "1ea03d36",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Replace FF bad pixels with mean peak value\n",
+    "peak_map[const_data['BadPixelsFF']>0] = np.nanmean(peak_map[const_data['BadPixelsFF']==0])\n",
+    "\n",
+    "# Calculate relative gain\n",
+    "rel_gain_map = 1/(peak_map.squeeze()/np.mean(peak_map))\n",
+    "\n",
+    "fig = xana.heatmapPlot(\n",
+    "    rel_gain_map,\n",
+    "    cb_label='Relative gain',\n",
+    "    x_label='Column',\n",
+    "    y_label='Row',\n",
+    "    vmin=np.floor(np.min(rel_gain_map)/.2)*.2, # force cb limits to be multiples of 0.2 \n",
+    "    vmax=np.ceil(np.max(rel_gain_map)/.2)*.2\n",
+    ")\n",
+    "fig.suptitle(f'Relative Gain Map ({karabo_id} | {proposal} - r{run})', x=.48, y=.9, fontsize=16)\n",
+    "fig.set_size_inches(h=12, w=12)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "c2870edc",
+   "metadata": {},
+   "source": [
+    "## Absolute Gain Conversion Constant"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "282ad58a",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "step_timer.start()\n",
+    "\n",
+    "# Correct data with calculated gain map\n",
+    "data_gain_corrected = data_corr*rel_gain_map\n",
+    "\n",
+    "h,ADU = np.histogram(data_gain_corrected.flatten(),\n",
+    "                     bins=np.arange(BP_fit_threshold[0],BP_fit_threshold[1]).astype(int))\n",
+    "ADU_c = ADU[:-1] + np.diff(ADU)[0]/2 # center of bins\n",
+    "\n",
+    "coeff, _ = curve_fit(gauss, ADU_c, h, p0=[h.max()/2, singles_mu, singles_sig])\n",
+    "y_fit = gauss(ADU_c, *coeff)\n",
+    "\n",
+    "gain_conv_const = coeff[1] / peak_energy\n",
+    "\n",
+    "abs_gain_map = rel_gain_map / gain_conv_const\n",
+    "\n",
+    "step_timer.done_step('Calculated Gain Conversion Constant. Elapsed Time')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "3a0daabf",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "plt.figure(figsize=(7,5))\n",
+    "\n",
+    "plt.scatter(ADU_c/gain_conv_const, h, color='k', marker='x', label='Gain Corrected')\n",
+    "plt.plot(ADU_c/gain_conv_const, y_fit, color='orange', label = f'fit: \\u03BC={(np.round(coeff[1],2))} ADU');\n",
+    "\n",
+    "plt.ylim(bottom=0)\n",
+    "plt.legend()\n",
+    "plt.grid(ls=':')\n",
+    "\n",
+    "plt.plot([peak_energy, peak_energy],[0,plt.gca().get_ylim()[1]],color='orange', ls = '--')\n",
+    "\n",
+    "ax1 = plt.gca()\n",
+    "ax2 = ax1.twiny()\n",
+    "ax2.set_xticks(ax1.get_xticks())\n",
+    "ax2.set_xbound(ax1.get_xbound())\n",
+    "ax2.set_xticklabels((ax1.get_xticks()*gain_conv_const).astype(int))\n",
+    "ax2.set_xlabel('ADU',fontsize=12)\n",
+    "ax1.set_xlabel('keV',fontsize=12)\n",
+    "\n",
+    "ax1.xaxis.label.set_color('red')\n",
+    "ax1.tick_params(axis='x', colors='red')\n",
+    "ax2.xaxis.label.set_color('blue')\n",
+    "ax2.tick_params(axis='x', colors='blue')\n",
+    "\n",
+    "plt.suptitle(f'Absolute Gain Conversion ({karabo_id} | {proposal} - r{run})',y =1.02,fontsize = 12)\n",
+    "plt.show()\n",
+    "\n",
+    "print(f'Gain conversion constant: {np.round(gain_conv_const,4)} ADU/keV')"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "c93fb9ac",
+   "metadata": {},
+   "source": [
+    "## Gain Map Validation\n",
+    "\n",
+    "Validation tests:\n",
+    "1. Inspect correlation between calculated gain map and gain map loaded from DB\n",
+    "2. Perform gain correction of current FF with calculated gain map and DB gain map and compare energy resolution and linearity"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "8792ff72",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Retrieve DB RelativeGain Map\n",
+    "illum_condition_db = Conditions.Illuminated.ePix100(\n",
+    "    bias_voltage=bias_voltage,\n",
+    "    integration_time=integration_time,\n",
+    "    temperature=temperature_k,\n",
+    "    in_vacuum=in_vacuum,\n",
+    "    photon_energy=peak_energy\n",
+    ")\n",
+    "\n",
+    "db_gain_map = get_constant_from_db(\n",
+    "    karabo_id=karabo_id,\n",
+    "    karabo_da=karabo_da,\n",
+    "    constant=getattr(Constants.ePix100, 'RelativeGain')(),\n",
+    "    condition=illum_condition_db,\n",
+    "    empty_constant=None,\n",
+    "    cal_db_interface=cal_db_interface,\n",
+    "    creation_time=creation_time,\n",
+    "    timeout=cal_db_timeout\n",
+    ")\n",
+    "\n",
+    "if db_gain_map is None:\n",
+    "    print('Waring: No previous RelativeGain map was found for this detector conditions.')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "1150be55",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "if db_gain_map is not None:\n",
+    "    \n",
+    "    # Calculate gain conversion constant of DB gain map\n",
+    "    gain_conv_const_db = 1/np.median(db_gain_map[const_data['BadPixelsDark'].squeeze()>0])\n",
+    "    \n",
+    "    # Correlate new and DB gain maps\n",
+    "    plt.figure(figsize=(7,7))\n",
+    "\n",
+    "    plt.hist2d(db_gain_map.flatten(),\n",
+    "               abs_gain_map.flatten(),\n",
+    "               bins = 200,\n",
+    "               norm=LogNorm(),\n",
+    "              );\n",
+    "    plt.xlabel('DB noise map',fontsize=12)\n",
+    "    plt.ylabel('New noise map',fontsize=12)\n",
+    "\n",
+    "    plt.xlim(np.min([db_gain_map,abs_gain_map]),np.max([db_gain_map,abs_gain_map]))\n",
+    "    plt.ylim(np.min([db_gain_map,abs_gain_map]),np.max([db_gain_map,abs_gain_map]))\n",
+    "    plt.grid(ls=':')\n",
+    "\n",
+    "    rel_change = np.mean(abs(abs_gain_map-db_gain_map)/abs_gain_map)\n",
+    "    print(f'Average relative change of new gain map: {np.round(rel_change*100,3)} %')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "e55aa651",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def correct_validation_train(worker_id, index, train_id, dc):\n",
+    "\n",
+    "    d = dc[pixels_src[0]][pixels_src[1]].astype(np.float32)\n",
+    "\n",
+    "    # Offset correction\n",
+    "    d -= const_data['Offset'].squeeze()\n",
+    "\n",
+    "    # Common Mode correction\n",
+    "    d = d.swapaxes(0,-1)\n",
+    "    d = cmCorrection_block.correct(d)\n",
+    "    d = cmCorrection_col.correct(d)\n",
+    "    d = cmCorrection_row.correct(d)\n",
+    "    d = d.swapaxes(0,-1)\n",
+    "\n",
+    "    # Relative Gain correction\n",
+    "    d_new_map = d*rel_gain_map\n",
+    "    if db_gain_map is not None:\n",
+    "        d_db_map  = d*db_gain_map*gain_conv_const_db\n",
+    "\n",
+    "    # Charge Sharing correction\n",
+    "    d, patterns = patternClassifier.classify(d.swapaxes(0,-1))\n",
+    "    FF_data[index] = d.swapaxes(0,-1) # no gain correction\n",
+    "    \n",
+    "    d_new_map, patterns = patternClassifier.classify(d_new_map.swapaxes(0,-1))\n",
+    "    FF_data_new_map[index] = d_new_map.swapaxes(0,-1) # gain correction with new gain map\n",
+    "    \n",
+    "    if db_gain_map is not None:\n",
+    "        d_db_map, patterns = patternClassifier.classify(d_db_map.swapaxes(0,-1))\n",
+    "        FF_data_db_map[index] = d_db_map.swapaxes(0,-1) # gain correction with DB gain map"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "a1319015",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Correct validation trains\n",
+    "step_timer.start()\n",
+    "\n",
+    "N_validation_trains = 1000\n",
+    "\n",
+    "FF_data = psh.alloc(shape=(N_validation_trains,dshape[1],dshape[2]), dtype=np.float32)\n",
+    "FF_data_new_map = psh.alloc(shape=(N_validation_trains,dshape[1],dshape[2]), dtype=np.float32)\n",
+    "if db_gain_map is not None:\n",
+    "    FF_data_db_map = psh.alloc(shape=(N_validation_trains,dshape[1],dshape[2]), dtype=np.float32)\n",
+    "\n",
+    "psh.map(correct_validation_train, data_dc.select_trains(np.s_[:N_validation_trains]))\n",
+    "\n",
+    "step_timer.done_step('Corrected evaluation data. Elapsed Time')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "20f9faa5",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Calculate histograms\n",
+    "bins_FF = np.arange(-50,800)\n",
+    "FF_hist_CS = np.histogram(FF_data[FF_data>0].flatten(),bins=bins_FF)[0]\n",
+    "\n",
+    "bins_keV_new = bins_FF/gain_conv_const\n",
+    "FF_hist_GC_new_map = np.histogram(FF_data_new_map/gain_conv_const,bins=bins_keV_new)[0]\n",
+    "\n",
+    "plt.figure(figsize=(12,8))\n",
+    "bins_ADU = bins_FF[:-1] + np.diff(bins_FF)[0]/2 # center of bins\n",
+    "bins_keV_new = bins_keV_new[:-1] + np.diff(bins_keV_new)[0]/2 # center of bins\n",
+    "plt.plot(bins_ADU,FF_hist_CS, color='black', label='Before gain correction')\n",
+    "plt.plot(bins_keV_new*gain_conv_const, FF_hist_GC_new_map, color='b', label='Gain correction with new map')\n",
+    "\n",
+    "if db_gain_map is not None:\n",
+    "    bins_keV_db = bins_FF/gain_conv_const_db\n",
+    "    FF_hist_GC_db_map = np.histogram(FF_data_db_map/gain_conv_const_db,bins=bins_keV_db)[0]\n",
+    "    bins_keV_db = bins_keV_db[:-1] + np.diff(bins_keV_db)[0]/2 # center of bins\n",
+    "    plt.plot(bins_keV_db*gain_conv_const_db, FF_hist_GC_db_map, color='r', label='Gain correction with DB map')\n",
+    "\n",
+    "plt.yscale('log')\n",
+    "plt.xlim(1,bins_FF[-1]+1)\n",
+    "\n",
+    "plt.xlabel('ADU',fontsize=12)\n",
+    "plt.legend(fontsize=12)\n",
+    "plt.title(f'{karabo_id} | {proposal} - r{run}', fontsize=14)\n",
+    "plt.grid(ls=':')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "c35bddec",
+   "metadata": {
+    "scrolled": false
+   },
+   "outputs": [],
+   "source": [
+    "N_peaks = 4\n",
+    "sigma_tol = 2 # sigma tolerance to show in gauss fit\n",
+    "\n",
+    "# Ignore split events below primary energy threshold\n",
+    "E_cut = np.mean(const_data['Noise'])*split_evt_primary_threshold/gain_conv_const\n",
+    "\n",
+    "too_many_peaks = True\n",
+    "while too_many_peaks: # Iterate backwards on number of peaks until no exception is thrown\n",
+    "    try:\n",
+    "        FF_hist_AGC_new = FF_hist_GC_new_map/gain_conv_const\n",
+    "        if db_gain_map is not None:\n",
+    "            FF_hist_AGC_db = FF_hist_GC_db_map/gain_conv_const_db\n",
+    "        else:\n",
+    "            FF_hist_AGC_db=None\n",
+    "            bins_keV_db=None\n",
+    "\n",
+    "        E_res,rel_dev,abs_dev = [],[],[]\n",
+    "        colors = ['blue','red']\n",
+    "\n",
+    "        for FF_hist_AGC,bins_keV,leg in zip([FF_hist_AGC_new,FF_hist_AGC_db],[bins_keV_new,bins_keV_db],['new','DB']):\n",
+    "\n",
+    "            if FF_hist_AGC is None:\n",
+    "                continue\n",
+    "\n",
+    "            FF_hist_AGC = FF_hist_AGC[bins_keV>E_cut]\n",
+    "            FF_hist_AGC[0]=0\n",
+    "\n",
+    "            bins_keV = bins_keV[bins_keV>E_cut]\n",
+    "            c = colors[0]\n",
+    "            colors.pop(0)\n",
+    "\n",
+    "            fig = plt.figure(figsize=(12,6))\n",
+    "            plt.suptitle('Correction with '+leg+' gain map',fontsize = 15)\n",
+    "\n",
+    "            plt.fill(bins_keV,FF_hist_AGC, color='k',alpha=.2,label='Corrected data')\n",
+    "            plt.title(f'{karabo_id} | {proposal} - r{run}', fontsize=14)\n",
+    "\n",
+    "            ylim_top = plt.gca().get_ylim()[1]\n",
+    "\n",
+    "            ROI_shift = 0\n",
+    "            for p in range(1,N_peaks+1):\n",
+    "\n",
+    "                peak_ROI = np.array([p*peak_energy-peak_energy/2, p*peak_energy+peak_energy/2]) + ROI_shift\n",
+    "                xx = (bins_keV>peak_ROI[0]) & (bins_keV<peak_ROI[1])\n",
+    "\n",
+    "                coeff, _ = curve_fit(gauss, bins_keV[xx], FF_hist_AGC[xx], p0=[FF_hist_AGC[xx].max(), p*peak_energy, 1])\n",
+    "                y_fit = gauss(bins_keV[xx], *coeff)\n",
+    "\n",
+    "                xx_sigma_lim = (bins_keV>coeff[1]-abs(coeff[2])*sigma_tol) & (bins_keV<coeff[1]+abs(coeff[2])*sigma_tol)\n",
+    "\n",
+    "                plt.vlines(p*peak_energy,0,ylim_top,ls='-',color='grey',label=f'expected peaks')\n",
+    "                plt.fill_between(bins_keV[xx_sigma_lim],\n",
+    "                                 FF_hist_AGC[xx_sigma_lim],\n",
+    "                                 color='orange',\n",
+    "                                 alpha=.5,\n",
+    "                                 label=f'\\u03BC ± {sigma_tol}\\u03c3')\n",
+    "                plt.plot(bins_keV[xx],y_fit,color=c)\n",
+    "                plt.vlines(coeff[1],0,ylim_top,ls='--',color=c,label=f'peak {p}: {coeff[1]:,.2f} keV')\n",
+    "\n",
+    "                ROI_shift = coeff[1] - p*peak_energy   \n",
+    "\n",
+    "                E_res.append(abs(2*np.sqrt(2*np.log(2))*coeff[2]/coeff[1])*100)\n",
+    "                abs_dev.append(coeff[1]-peak_energy*p)\n",
+    "                rel_dev.append(abs(abs_dev[-1])/(peak_energy*p)*100)\n",
+    "\n",
+    "            plt.yscale('log')    \n",
+    "            plt.xlabel('keV',fontsize=12)\n",
+    "            plt.xlim(left=0)\n",
+    "            plt.ylim(.1,ylim_top)\n",
+    "\n",
+    "            # Remove repeated entries from legend\n",
+    "            handles, labels = plt.gca().get_legend_handles_labels()\n",
+    "            by_label = dict(zip(labels, handles))\n",
+    "            plt.legend(by_label.values(), by_label.keys())\n",
+    "            plt.grid(ls=':')\n",
+    "\n",
+    "            t = PrettyTable()\n",
+    "            t.field_names = ['Peak','Energy Resolution','Rel. Deviation','Abs. Deviation']\n",
+    "            t.title = f'{leg} gain map'\n",
+    "            for p in range(-N_peaks,0):\n",
+    "                t.add_row([f'#{p+N_peaks+1}: {peak_energy*(p+N_peaks+1):,.3f} keV',\n",
+    "                            f'{E_res[p]:,.2f} %', \n",
+    "                            f'{rel_dev[p]:,.2f} %',\n",
+    "                            f'{abs_dev[p]:,.2f} keV'])        \n",
+    "            print(t)\n",
+    "            \n",
+    "            too_many_peaks = False\n",
+    "            plt.show()\n",
+    "\n",
+    "    # throw exception if fit fails due to wrong estimate of number of peaks\n",
+    "    except RuntimeError: \n",
+    "        N_peaks -= 1\n",
+    "        plt.close(fig) # delete plots if exception was found due to wrong number of peaks"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "205f27b9",
+   "metadata": {},
+   "source": [
+    "## Linearity Analysis"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "d6cf1264",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "peaks = np.arange(1,N_peaks+1)\n",
+    "plt.figure(figsize=(15,6))\n",
+    "plt.subplot(1,2,1)\n",
+    "plt.plot(peaks,[peak_energy*p for p in peaks], '-', c='k', label='expected')\n",
+    "plt.plot(peaks,[peak_energy*p for p in peaks]+np.array(abs_dev[:N_peaks]), 'o', c='b')\n",
+    "fit_coeffs= np.polyfit(peaks,[peak_energy*p for p in peaks]+np.array(abs_dev[:N_peaks]),1)\n",
+    "\n",
+    "plt.plot(peaks,fit_coeffs[0]*peaks+fit_coeffs[1], '--', c='b', label='New gain map')\n",
+    "str_theo  = f'$a_1$={peak_energy :,.4f}, $a_0$=0'\n",
+    "str_new = f'$a_1$={fit_coeffs[0]:,.4f}, $a_0$={fit_coeffs[1]:,.4f}'\n",
+    "plt.annotate(s=str_theo,xy=(.36,.94),xycoords='axes fraction',fontsize=11,bbox=dict(facecolor='k',alpha=.2,pad=1))\n",
+    "plt.annotate(s=str_new ,xy=(.36,.88),xycoords='axes fraction',fontsize=11,bbox=dict(facecolor='b',alpha=.2,pad=1))\n",
+    "\n",
+    "xx = np.arange(1,100,.1) # in photons\n",
+    "y_fit_new = fit_coeffs[0]*xx+fit_coeffs[1] # extrapolation for 100 photons\n",
+    "\n",
+    "plt.xticks(peaks)\n",
+    "plt.title(f'Linearity ({karabo_id} | {proposal} - r{run})')\n",
+    "plt.xlabel('# Photons')\n",
+    "plt.ylabel('Energy (keV)')\n",
+    "plt.legend(fontsize=12)\n",
+    "plt.grid(ls=':')\n",
+    "\n",
+    "plt.subplot(1,2,2)\n",
+    "dev_new = (y_fit_new-(peak_energy*xx))/(peak_energy*xx)*100\n",
+    "plt.plot(xx*peak_energy,dev_new,c='b', label='New gain map')\n",
+    "plt.xscale('log')\n",
+    "plt.xlim(right=100)\n",
+    "plt.xlabel('Energy (keV)')\n",
+    "plt.ylabel('Linearity Deviation (%)')\n",
+    "plt.title(f'Linearity extrapolation ({karabo_id} | {proposal} - r{run})')\n",
+    "plt.grid(ls=':',which='both')\n",
+    "\n",
+    "if db_gain_map is not None:\n",
+    "    plt.subplot(1,2,1)\n",
+    "    \n",
+    "    db_fit = np.polyfit(peaks,[peak_energy*p for p in peaks]+np.array(abs_dev[N_peaks:]),1)\n",
+    "    plt.plot(peaks,[peak_energy*p for p in peaks]+np.array(abs_dev[N_peaks:]), 'o', c='r')\n",
+    "    plt.plot(peaks,db_fit[0]*peaks+db_fit[1], '--', c='r', label='DB gain map')\n",
+    "    \n",
+    "    str_db  = f'$a_1$={db_fit[0] :,.4f}, $a_0$={db_fit[1] :,.4f}'\n",
+    "    y_fit_db = db_fit[0]*xx+db_fit[1] # extrapolation for 100 photons\n",
+    "    plt.annotate(s=str_db  ,xy=(.36,.82),xycoords='axes fraction',fontsize=11,bbox=dict(facecolor='r',alpha=.2,pad=1))\n",
+    "\n",
+    "    plt.subplot(1,2,2)\n",
+    "    dev_db = (y_fit_db-(peak_energy*xx))/(peak_energy*xx)*100\n",
+    "    plt.plot(xx*peak_energy,dev_db,c='r', label='DB gain map')\n",
+    "    plt.legend(fontsize=12)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "441e426a",
+   "metadata": {},
+   "source": [
+    "## Energy Resolution Analysis"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "feb7a5bf",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def power_function(x,*p):\n",
+    "    a,b,c = p\n",
+    "    return a*x**b + c\n",
+    "# rough initial estimate of fit parameters\n",
+    "fit_estimates = [20,-.5,0]"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "25b3f89a",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Linearity of the visualized peaks\n",
+    "plt.figure(figsize=(8,6))\n",
+    "\n",
+    "xx = np.arange(0,50,.1)\n",
+    "if db_gain_map is not None:\n",
+    "    plt.plot(peaks*peak_energy,E_res[N_peaks:], 'o', c='r', label='DB gain map')\n",
+    "    coeff,_ = curve_fit(power_function,peaks*peak_energy,E_res[N_peaks:],p0=fit_estimates)\n",
+    "    power_fit = power_function(xx,*coeff)\n",
+    "    plt.plot(xx,power_fit, '--', c='r')\n",
+    "\n",
+    "plt.plot(peaks*peak_energy,E_res[:N_peaks], 'o', c='b', label='New gain map')\n",
+    "coeff,_ = curve_fit(power_function,peaks*peak_energy,E_res[:N_peaks],p0=fit_estimates)\n",
+    "power_fit = power_function(xx,*coeff)\n",
+    "plt.plot(xx,power_fit, '--', c='b')\n",
+    "\n",
+    "plt.title(f'Energy Resolution ({karabo_id} | {proposal} - r{run})')\n",
+    "plt.xlabel('Energy (keV)')\n",
+    "plt.ylabel('Energy Resolution (%)')\n",
+    "plt.legend(fontsize=12)\n",
+    "plt.xlim(1,np.ceil(xx[-1]))\n",
+    "plt.ylim(0,30)\n",
+    "plt.grid(ls=':')"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "f85f601d",
+   "metadata": {},
+   "source": [
+    "## Calibration Constants DB\n",
+    "Send the flat-field constants (RelativeGain and BadPixelsIlluminated) to the database and/or save them locally."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "b898799f",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Save constants to DB\n",
+    "\n",
+    "md = None\n",
+    "\n",
+    "constant_maps = {'RelativeGain': abs_gain_map,\n",
+    "                 'BadPixelsIlluminated': const_data['BadPixelsFF']\n",
+    "                } \n",
+    "\n",
+    "for const_name in constant_maps.keys():\n",
+    "    const = getattr(Constants.ePix100, const_name)()\n",
+    "    const.data = constant_maps[const_name].data\n",
+    "\n",
+    "    for parm in illum_condition_db.parameters:\n",
+    "        if parm.name == \"Sensor Temperature\":\n",
+    "            parm.lower_deviation = temp_limits\n",
+    "            parm.upper_deviation = temp_limits\n",
+    "\n",
+    "    # Get physical detector unit\n",
+    "    db_module = get_pdu_from_db(\n",
+    "        karabo_id=karabo_id,\n",
+    "        karabo_da=karabo_da,\n",
+    "        constant=const,\n",
+    "        condition=illum_condition_db,\n",
+    "        cal_db_interface=cal_db_interface,\n",
+    "        snapshot_at=creation_time)[0]\n",
+    "\n",
+    "    # Inject or save calibration constants\n",
+    "    if db_output:\n",
+    "        md = send_to_db(\n",
+    "            db_module=db_module,\n",
+    "            karabo_id=karabo_id,\n",
+    "            constant=const,\n",
+    "            condition=illum_condition_db,\n",
+    "            file_loc=file_loc,\n",
+    "            report_path=report,\n",
+    "            cal_db_interface=cal_db_interface,\n",
+    "            creation_time=creation_time,\n",
+    "            timeout=cal_db_timeout\n",
+    "        )\n",
+    "\n",
+    "    if local_output:\n",
+    "        Path(out_folder).mkdir(parents=True, exist_ok=True)\n",
+    "        md = save_const_to_h5(\n",
+    "            db_module=db_module,\n",
+    "            karabo_id=karabo_id,\n",
+    "            constant=const,\n",
+    "            condition=illum_condition_db,\n",
+    "            data=const.data,\n",
+    "            file_loc=file_loc,\n",
+    "            report=report,\n",
+    "            creation_time=creation_time,\n",
+    "            out_folder=out_folder\n",
+    "        )\n",
+    "        print(f\"Calibration constant {const_name} is stored locally at {out_folder} \\n\")\n",
+    "\n",
+    "print(\"Constants parameter conditions are:\\n\"\n",
+    "      f\"• Bias voltage: {bias_voltage}\\n\"\n",
+    "      f\"• Integration time: {integration_time}\\n\"\n",
+    "      f\"• Temperature: {temperature_k}\\n\"\n",
+    "      f\"• Source Energy: {peak_energy}\\n\"      \n",
+    "      f\"• In Vacuum: {in_vacuum}\\n\"\n",
+    "      f\"• Creation time: {md.calibration_constant_version.begin_at if md is not None else creation_time}\\n\")"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "cal_venv",
+   "language": "python",
+   "name": "cal_venv"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.8.11"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/notebooks/ePix100/Correction_ePix100_NBC.ipynb b/notebooks/ePix100/Correction_ePix100_NBC.ipynb
index 499a0d2cf135bcb39648862220f5ad4fd7ba0c8b..b7db6722739c7d81852329e296cacf48005a8221 100644
--- a/notebooks/ePix100/Correction_ePix100_NBC.ipynb
+++ b/notebooks/ePix100/Correction_ePix100_NBC.ipynb
@@ -24,12 +24,12 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "in_folder = \"/gpfs/exfel/exp/HED/202202/p003121/raw\" # input folder, required\n",
+    "in_folder = \"/gpfs/exfel/exp/HED/202102/p002739/raw\" # input folder, required\n",
     "out_folder = \"\"  # output folder, required\n",
     "metadata_folder = \"\"  # Directory containing calibration_metadata.yml when run by xfel-calibrate\n",
     "sequences = [-1]  # sequences to correct, set to -1 for all, range allowed\n",
     "sequences_per_node = 1  # number of sequence files per cluster node if run as slurm job, set to 0 to not run SLURM parallel\n",
-    "run = 156  # which run to read data from, required\n",
+    "run = 38  # which run to read data from, required\n",
     "\n",
     "# Parameters for accessing the raw data.\n",
     "karabo_id = \"HED_IA1_EPX100-1\"  # karabo karabo_id\n",
@@ -44,7 +44,7 @@
     "limit_trains = 0  # Process only first N images, 0 - process all.\n",
     "\n",
     "# Parameters for the calibration database.\n",
-    "cal_db_interface = \"tcp://max-exfl016:8015#8025\"  # calibration DB interface to use\n",
+    "cal_db_interface = \"tcp://max-exfl-cal001:8015#8025\"  # calibration DB interface to use\n",
     "cal_db_timeout = 300000  # timeout on caldb requests\n",
     "creation_time = \"\"  # The timestamp to use with Calibration DBe. Required Format: \"YYYY-MM-DD hh:mm:ss\" e.g. 2019-07-04 11:02:41\n",
     "\n",
@@ -90,20 +90,20 @@
     "import pasha as psh\n",
     "import numpy as np\n",
     "import matplotlib.pyplot as plt\n",
-    "from IPython.display import Latex, display\n",
+    "from IPython.display import Latex, Markdown, display\n",
     "from extra_data import RunDirectory, H5File\n",
+    "from extra_geom import Epix100Geometry\n",
+    "from mpl_toolkits.axes_grid1 import make_axes_locatable\n",
     "from pathlib import Path\n",
     "\n",
     "import cal_tools.restful_config as rest_cfg\n",
-    "from XFELDetAna import xfelpyanatools as xana\n",
     "from XFELDetAna import xfelpycaltools as xcal\n",
-    "from cal_tools.calcat_interface import EPIX100_CalibrationData\n",
+    "from cal_tools.calcat_interface import EPIX100_CalibrationData, CalCatError\n",
     "from cal_tools.epix100 import epix100lib\n",
     "from cal_tools.files import DataFile\n",
-    "from cal_tools.restful_config import restful_config\n",
     "from cal_tools.tools import (\n",
     "    calcat_creation_time,\n",
-    "    CalibrationMetadata,\n",
+    "    write_constants_fragment,\n",
     ")\n",
     "from cal_tools.step_timing import StepTimer\n",
     "\n",
@@ -111,6 +111,7 @@
     "\n",
     "prettyPlotting = True\n",
     "\n",
+    "\n",
     "%matplotlib inline"
    ]
   },
@@ -135,6 +136,8 @@
    "metadata": {},
    "outputs": [],
    "source": [
+    "prop_str = in_folder[in_folder.find('/p')+1:in_folder.find('/p')+8]\n",
+    "\n",
     "in_folder = Path(in_folder)\n",
     "out_folder = Path(out_folder)\n",
     "\n",
@@ -157,12 +160,7 @@
    "outputs": [],
    "source": [
     "creation_time = calcat_creation_time(in_folder, run, creation_time)\n",
-    "print(f\"Using {creation_time.isoformat()} as creation time\")\n",
-    "\n",
-    "metadata = CalibrationMetadata(metadata_folder or out_folder)\n",
-    "# Constant paths are saved under retrieved-constants in calibration_metadata.yml.\n",
-    "# NOTE: this notebook shouldn't overwrite calibration metadata file.\n",
-    "const_yaml = metadata.get(\"retrieved-constants\", {})"
+    "print(f\"Using {creation_time.isoformat()} as creation time\")"
    ]
   },
   {
@@ -270,34 +268,31 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "constant_names = [\"OffsetEPix100\", \"NoiseEPix100\"]\n",
+    "epix_cal = EPIX100_CalibrationData(\n",
+    "    detector_name=karabo_id,\n",
+    "    sensor_bias_voltage=bias_voltage,\n",
+    "    integration_time=integration_time,\n",
+    "    sensor_temperature=temperature_k,\n",
+    "    in_vacuum=in_vacuum,\n",
+    "    source_energy=gain_photon_energy,\n",
+    "    event_at=creation_time,\n",
+    "    client=rest_cfg.calibration_client(),\n",
+    ")\n",
+    "\n",
+    "const_metadata = epix_cal.metadata(calibrations=epix_cal.dark_calibrations)\n",
+    "\n",
     "if relative_gain:\n",
-    "    constant_names += [\"RelativeGainEPix100\"]\n",
-    "\n",
-    "const_data = dict()\n",
-    "\n",
-    "if const_yaml:  #  Used while reproducing corrected data.\n",
-    "    print(f\"Using stored constants in {metadata.filename}\")\n",
-    "    when = dict()\n",
-    "    for cname, mdata in const_yaml[karabo_da][\"constants\"].items():\n",
-    "        const_data[cname] = dict()\n",
-    "        when[cname] = mdata[\"creation-time\"]\n",
-    "        if when[cname]:\n",
-    "            with h5py.File(mdata[\"path\"], \"r\") as cf:\n",
-    "                const_data[cname] = np.copy(\n",
-    "                    cf[f\"{mdata['dataset']}/data\"])\n",
-    "else:\n",
-    "    epix_cal = EPIX100_CalibrationData(\n",
-    "        detector_name=karabo_id,\n",
-    "        sensor_bias_voltage=bias_voltage,\n",
-    "        integration_time=integration_time,\n",
-    "        sensor_temperature=temperature_k,\n",
-    "        in_vacuum=in_vacuum,\n",
-    "        source_energy=gain_photon_energy,\n",
-    "        event_at=creation_time,\n",
-    "        client=rest_cfg.calibration_client(),\n",
-    "    )\n",
-    "    const_data = epix_cal.ndarray_map(calibrations=constant_names)[karabo_da]\n",
+    "    try:\n",
+    "        metadata = epix_cal.metadata(epix_cal.illuminated_calibrations)\n",
+    "        for key, value in metadata.items():\n",
+    "            const_metadata.setdefault(key, {}).update(value)\n",
+    "    except CalCatError as e:\n",
+    "        warning(f\"CalCatError: {e}\")\n",
+    "\n",
+    "# Display retrieved calibration constants timestamps\n",
+    "epix_cal.display_markdown_retrieved_constants(metadata=const_metadata)\n",
+    "# Load the constant data from files\n",
+    "const_data = epix_cal.ndarray_map(metadata=const_metadata)[karabo_da]\n",
     "\n",
     "# Validate the constants availability and raise/warn correspondingly. \n",
     "missing_dark_constants = {\"OffsetEPix100\", \"NoiseEPix100\"} - set(const_data)\n",
@@ -318,12 +313,12 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "# Initializing some parameters.\n",
-    "hscale = 1\n",
-    "stats = True\n",
-    "hrange = np.array([-50, 1000])\n",
-    "nbins = hrange[1] - hrange[0]\n",
-    "commonModeBlockSize = [x//2, y//2]"
+    "# Record constant details in YAML metadata\n",
+    "write_constants_fragment(\n",
+    "    out_folder=(metadata_folder or out_folder),\n",
+    "    det_metadata=const_metadata,\n",
+    "    caldb_root=epix_cal.caldb_root,\n",
+    "    )"
    ]
   },
   {
@@ -332,24 +327,11 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "histCalOffsetCor = xcal.HistogramCalculator(\n",
-    "    sensorSize,\n",
-    "    bins=nbins,\n",
-    "    range=hrange,\n",
-    "    parallel=run_parallel,\n",
-    "    nCells=memoryCells,\n",
-    "    blockSize=blockSize\n",
-    ")\n",
-    "\n",
-    "# *****************Histogram Calculators****************** #\n",
-    "histCalCor = xcal.HistogramCalculator(\n",
-    "    sensorSize,\n",
-    "    bins=1050,\n",
-    "    range=[-50, 1000],\n",
-    "    parallel=run_parallel,\n",
-    "    nCells=memoryCells,\n",
-    "    blockSize=blockSize\n",
-    ")"
+    "# Initializing some parameters.\n",
+    "hscale = 1\n",
+    "stats = True\n",
+    "bins = np.arange(-50,1000)\n",
+    "hist = {'O': 0} # dictionary to store histograms"
    ]
   },
   {
@@ -359,14 +341,9 @@
    "outputs": [],
    "source": [
     "if common_mode:\n",
-    "    histCalCMCor = xcal.HistogramCalculator(\n",
-    "        sensorSize,\n",
-    "        bins=nbins,\n",
-    "        range=hrange,\n",
-    "        parallel=run_parallel,\n",
-    "        nCells=memoryCells,\n",
-    "        blockSize=blockSize,\n",
-    "    )\n",
+    "    \n",
+    "    commonModeBlockSize = [x//2, y//8]\n",
+    "    \n",
     "    cmCorrectionB = xcal.CommonModeCorrection(\n",
     "        shape=sensorSize,\n",
     "        blockSize=commonModeBlockSize, \n",
@@ -402,7 +379,9 @@
     "        stats=stats,\n",
     "        minFrac=cm_min_frac,\n",
     "        noiseSigma=cm_noise_sigma,\n",
-    "    )"
+    "    )\n",
+    "    \n",
+    "    hist['CM'] = 0"
    ]
   },
   {
@@ -412,12 +391,11 @@
    "outputs": [],
    "source": [
     "if relative_gain:\n",
-    "    gain_cnst = np.median(const_data[\"RelativeGainEPix100\"])\n",
-    "    hscale = gain_cnst\n",
-    "    plot_unit = 'keV'\n",
-    "    if photon_energy > 0:\n",
-    "        plot_unit = '$\\gamma$'\n",
-    "        hscale /= photon_energy\n",
+    "    \n",
+    "    # gain constant is given by the mode of the gain map \n",
+    "    # because all bad pixels are masked using this value\n",
+    "    _vals,_counts = np.unique(const_data[\"RelativeGainEPix100\"], return_counts=True)\n",
+    "    gain_cnst = _vals[np.argmax(_counts)] \n",
     "    \n",
     "    gainCorrection = xcal.RelativeGainCorrection(\n",
     "        sensorSize,\n",
@@ -427,25 +405,16 @@
     "        blockSize=blockSize,\n",
     "        gains=None,\n",
     "    )\n",
-    "\n",
-    "    histCalRelGainCor = xcal.HistogramCalculator(\n",
-    "        sensorSize,\n",
-    "        bins=nbins,\n",
-    "        range=hrange,\n",
-    "        parallel=run_parallel,\n",
-    "        nCells=memoryCells,\n",
-    "        blockSize=blockSize\n",
-    "    )\n",
+    "    \n",
+    "    hist['RG'] = 0\n",
     "\n",
     "    if absolute_gain:\n",
-    "        histCalAbsGainCor = xcal.HistogramCalculator(\n",
-    "            sensorSize,\n",
-    "            bins=nbins,\n",
-    "            range=hrange*hscale,\n",
-    "            parallel=run_parallel,\n",
-    "            nCells=memoryCells,\n",
-    "            blockSize=blockSize\n",
-    "        )"
+    "        hscale = gain_cnst\n",
+    "        plot_unit = 'keV'\n",
+    "        if photon_energy > 0:\n",
+    "            plot_unit = '$\\gamma$'\n",
+    "            hscale /= photon_energy\n",
+    "        hist['AG'] = 0"
    ]
   },
   {
@@ -467,30 +436,8 @@
     "        blockSize=[x, y],\n",
     "        parallel=run_parallel,\n",
     "    )\n",
-    "    histCalCSCor = xcal.HistogramCalculator(\n",
-    "        sensorSize,\n",
-    "        bins=nbins,\n",
-    "        range=hrange,\n",
-    "        parallel=run_parallel,\n",
-    "        nCells=memoryCells,\n",
-    "        blockSize=blockSize,\n",
-    "    )\n",
-    "    histCalGainCorClusters = xcal.HistogramCalculator(\n",
-    "        sensorSize,\n",
-    "        bins=nbins,\n",
-    "        range=hrange*hscale,\n",
-    "        parallel=run_parallel,\n",
-    "        nCells=memoryCells,\n",
-    "        blockSize=blockSize\n",
-    "    )\n",
-    "    histCalGainCorSingles = xcal.HistogramCalculator(\n",
-    "        sensorSize,\n",
-    "        bins=nbins,\n",
-    "        range=hrange*hscale,\n",
-    "        parallel=run_parallel,\n",
-    "        nCells=memoryCells,\n",
-    "        blockSize=blockSize\n",
-    "    )"
+    "    hist['CS'] = 0\n",
+    "    hist['S'] = 0"
    ]
   },
   {
@@ -511,11 +458,11 @@
     "    d = d[..., np.newaxis].astype(np.float32)\n",
     "    d = np.compress(\n",
     "        np.any(d > 0, axis=(0, 1)), d, axis=2)\n",
-    "    \n",
+    "\n",
     "    # Offset correction.\n",
     "    d -= const_data[\"OffsetEPix100\"]\n",
+    "    hist['O'] += np.histogram(d,bins=bins)[0]\n",
     "\n",
-    "    histCalOffsetCor.fill(d)\n",
     "    # Common Mode correction.\n",
     "    if common_mode:\n",
     "        # Block CM\n",
@@ -524,12 +471,15 @@
     "        d = cmCorrectionR.correct(d)\n",
     "        # COL CM\n",
     "        d = cmCorrectionC.correct(d)\n",
-    "        histCalCMCor.fill(d)\n",
     "\n",
-    "    # relative gain correction.\n",
+    "        hist['CM'] += np.histogram(d,bins=bins)[0]\n",
+    "\n",
+    "\n",
+    "    # Relative gain correction.\n",
     "    if relative_gain:\n",
     "        d = gainCorrection.correct(d)\n",
-    "        histCalRelGainCor.fill(d)\n",
+    "\n",
+    "        hist['RG'] += np.histogram(d,bins=bins)[0]\n",
     "\n",
     "    \"\"\"The gain correction is currently applying\n",
     "    an absolute correction (not a relative correction\n",
@@ -552,34 +502,31 @@
     "        data_clu[index, ...] = np.squeeze(d_clu)\n",
     "        data_patterns[index, ...] = np.squeeze(patterns)\n",
     "\n",
-    "        histCalCSCor.fill(d_clu)\n",
+    "        hist['CS'] += np.histogram(d_clu,bins=bins)[0]\n",
+    "\n",
+    "        d_sing = d_clu[patterns==100] # pattern 100 corresponds to single photons events\n",
+    "        if len(d_sing):\n",
+    "            hist['S'] += np.histogram(d_sing,bins=bins)[0]\n",
     "\n",
-    "    # absolute gain correction\n",
+    "    # Absolute gain correction\n",
     "    # changes data from ADU to keV (or n. of photons)\n",
     "    if absolute_gain:\n",
     "\n",
     "        d = d * gain_cnst\n",
     "        if photon_energy > 0:\n",
     "            d /= photon_energy\n",
-    "        histCalAbsGainCor.fill(d)\n",
+    "\n",
+    "        hist['AG'] += np.histogram(d,bins=bins)[0]\n",
     "\n",
     "        if pattern_classification:\n",
     "            # Modify pattern classification.\n",
     "            d_clu = d_clu * gain_cnst\n",
-    "            \n",
     "            if photon_energy > 0:\n",
     "                d_clu /= photon_energy\n",
     "\n",
     "            data_clu[index, ...] = np.squeeze(d_clu)\n",
     "\n",
-    "            histCalGainCorClusters.fill(d_clu)\n",
-    "            \n",
-    "            d_sing = d_clu[patterns==100] # pattern 100 corresponds to single photons events\n",
-    "            if len(d_sing):\n",
-    "                histCalGainCorSingles.fill(d_sing)\n",
-    "\n",
-    "    data[index, ...] = np.squeeze(d)\n",
-    "    histCalCor.fill(d)"
+    "    data[index, ...] = np.squeeze(d)"
    ]
   },
   {
@@ -670,19 +617,6 @@
     "        # Create count/first datasets at INDEX source.\n",
     "        outp_source.create_index(data=image_counts)\n",
     "\n",
-    "        # Store uncorrected RAW image datasets for the corrected trains.\n",
-    "\n",
-    "        data_raw_fields = [  # /data/\n",
-    "            \"ambTemp\", \"analogCurr\", \"analogInputVolt\", \"backTemp\",\n",
-    "            \"digitalInputVolt\", \"guardCurr\", \"relHumidity\", \"digitalCurr\"\n",
-    "        ]\n",
-    "        for field in data_raw_fields:\n",
-    "            field_arr = seq_dc[instrument_src, f\"data.{field}\"].ndarray()\n",
-    "\n",
-    "            outp_source.create_key(\n",
-    "                f\"data.{field}\", data=field_arr,\n",
-    "                chunks=(chunk_size_idim, *field_arr.shape[1:]))\n",
-    "\n",
     "        image_raw_fields = [  # /data/image/\n",
     "            \"binning\", \"bitsPerPixel\", \"dimTypes\", \"dims\",\n",
     "            \"encoding\", \"flipX\", \"flipY\", \"roiOffsets\", \"rotation\",\n",
@@ -699,6 +633,11 @@
     "            \"data.image.pixels\", data=data, chunks=dataset_chunk)\n",
     "        outp_source.create_key(\n",
     "            \"data.trainId\", data=seq_dc.train_ids, chunks=min(50, len(seq_dc.train_ids)))\n",
+    "        \n",
+    "        if np.isin('data.pulseId', list(seq_dc[instrument_src].keys())): # some runs are missing 'data.pulseId'\n",
+    "            outp_source.create_key(\n",
+    "                \"data.pulseId\", data=list(seq_dc[instrument_src]['data.pulseId'].ndarray().squeeze()), chunks=min(50, len(seq_dc.train_ids)))\n",
+    "        \n",
     "        if pattern_classification:\n",
     "            # Add main corrected `data.image.pixels` dataset and store corrected data.\n",
     "            outp_source.create_key(\n",
@@ -712,80 +651,52 @@
     "    exit(0)"
    ]
   },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Plot Histograms"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "bins_ADU = bins[:-1]+np.diff(bins)[0]/2\n",
+    "bins_keV = bins_ADU*hscale"
+   ]
+  },
   {
    "cell_type": "code",
    "execution_count": null,
    "metadata": {},
    "outputs": [],
    "source": [
-    "ho, eo, co, so = histCalCor.get()\n",
-    "\n",
-    "d = [{\n",
-    "    'x': co,\n",
-    "    'y': ho,\n",
-    "    'y_err': np.sqrt(ho[:]),\n",
-    "    'drawstyle': 'steps-mid',\n",
-    "    'errorstyle': 'bars',\n",
-    "    'errorcoarsing': 2,\n",
-    "    'label': 'Total corr.'\n",
-    "}]\n",
-    "\n",
-    "ho, eo, co, so = histCalOffsetCor.get()\n",
-    "\n",
-    "d.append({\n",
-    "    'x': co,\n",
-    "    'y': ho,\n",
-    "    'y_err': np.sqrt(ho[:]),\n",
-    "    'drawstyle': 'steps-mid',\n",
-    "    'errorstyle': 'bars',\n",
-    "    'errorcoarsing': 2,\n",
-    "    'label': 'Offset corr.'\n",
-    "})\n",
+    "# Histogram in ADU\n",
     "\n",
-    "if common_mode:\n",
-    "    ho, eo, co, so = histCalCMCor.get()\n",
-    "    d.append({\n",
-    "        'x': co,\n",
-    "        'y': ho,\n",
-    "        'y_err': np.sqrt(ho[:]),\n",
-    "        'drawstyle': 'steps-mid',\n",
-    "        'errorstyle': 'bars',\n",
-    "        'errorcoarsing': 2,\n",
-    "        'label': 'CM corr.'\n",
-    "    })\n",
-    "    \n",
-    "if relative_gain :\n",
-    "    ho, eo, co, so = histCalRelGainCor.get()\n",
-    "    d.append({\n",
-    "        'x': co,\n",
-    "        'y': ho,\n",
-    "        'y_err': np.sqrt(ho[:]),\n",
-    "        'drawstyle': 'steps-mid',\n",
-    "        'errorstyle': 'bars',\n",
-    "        'errorcoarsing': 2,\n",
-    "        'label': 'Relative gain corr.'\n",
-    "    })\n",
+    "plt.figure(figsize=(12,8))\n",
+    "plt.plot(bins_ADU,hist['O'], label='Offset corr')\n",
     "\n",
+    "if common_mode:\n",
+    "    plt.plot(bins_ADU,hist['CM'], label='CM corr')\n",
+    "if relative_gain:\n",
+    "    plt.plot(bins_ADU,hist['RG'], label='Relative Gain corr')\n",
     "if pattern_classification:\n",
-    "    ho, eo, co, so = histCalCSCor.get()\n",
-    "    d.append({\n",
-    "        'x': co,\n",
-    "        'y': ho,\n",
-    "        'y_err': np.sqrt(ho[:]),\n",
-    "        'drawstyle': 'steps-mid',\n",
-    "        'errorstyle': 'bars',\n",
-    "        'errorcoarsing': 2,\n",
-    "        'label': 'Charge sharing corr.'\n",
-    "    })\n",
-    "\n",
-    "fig = xana.simplePlot(\n",
-    "    d, aspect=1, x_label=f'Energy (ADU)',\n",
-    "    y_label='Number of occurrences', figsize='2col',\n",
-    "    y_log=True, x_range=(-50, 500),\n",
-    "    legend='top-center-frame-2col',\n",
-    ")\n",
-    "plt.title(f'run {run} - {karabo_da}')\n",
-    "plt.grid()"
+    "    plt.plot(bins_ADU[bins_ADU>10],hist['CS'][bins_ADU>10], label='Charge Sharing corr')\n",
+    "    if np.any(hist['S']):\n",
+    "        plt.plot(bins_ADU,hist['S'], label='Singles')\n",
+    "\n",
+    "xtick_step = 50\n",
+    "plt.xlim(bins[0], bins[-1]+1)\n",
+    "plt.xticks(np.arange(bins[0],bins[-1]+2,xtick_step))\n",
+    "plt.xlabel('ADU',fontsize=12)\n",
+    "\n",
+    "plt.yscale('log')\n",
+    "plt.title(f'{karabo_id} | {prop_str}, r{run}', fontsize=14, fontweight='bold')\n",
+    "plt.legend(fontsize=12)\n",
+    "plt.grid(ls=':')"
    ]
   },
   {
@@ -794,51 +705,30 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "if absolute_gain :\n",
-    "    d=[]\n",
-    "    ho, eo, co, so = histCalAbsGainCor.get()\n",
-    "    d.append({\n",
-    "        'x': co,\n",
-    "        'y': ho,\n",
-    "        'y_err': np.sqrt(ho[:]),\n",
-    "        'drawstyle': 'steps-mid',\n",
-    "        'errorstyle': 'bars',\n",
-    "        'errorcoarsing': 2,\n",
-    "        'label': 'Absolute gain corr.'\n",
-    "    })\n",
+    "# Histogram in keV/number of photons\n",
+    "\n",
+    "if absolute_gain:\n",
+    "    colors = plt.rcParams['axes.prop_cycle'].by_key()['color']\n",
+    "    plt.figure(figsize=(12,8))\n",
     "\n",
+    "    if relative_gain:\n",
+    "        plt.plot(bins_keV,hist['RG'], label='Absolute Gain corr', c=colors[2])\n",
+    "    \n",
     "    if pattern_classification:\n",
-    "        ho, eo, co, so = histCalGainCorClusters.get()\n",
-    "        d.append({\n",
-    "            'x': co,\n",
-    "            'y': ho,\n",
-    "            'y_err': np.sqrt(ho[:]),\n",
-    "            'drawstyle': 'steps-mid',\n",
-    "            'errorstyle': 'bars',\n",
-    "            'errorcoarsing': 2,\n",
-    "            'label': 'Charge sharing corr.'\n",
-    "        })\n",
-    "        \n",
-    "        ho, eo, co, so = histCalGainCorSingles.get()\n",
-    "        d.append({\n",
-    "            'x': co,\n",
-    "            'y': ho,\n",
-    "            'y_err': np.sqrt(ho[:]),\n",
-    "            'drawstyle': 'steps-mid',\n",
-    "            'errorstyle': 'bars',\n",
-    "            'errorcoarsing': 2,\n",
-    "            'label': 'Isolated photons (singles)'\n",
-    "        })\n",
-    "        \n",
-    "    fig = xana.simplePlot(\n",
-    "        d, aspect=1, x_label=f'Energy ({plot_unit})',\n",
-    "        y_label='Number of occurrences', figsize='2col',\n",
-    "        y_log=True, \n",
-    "        x_range=np.array((-50, 500))*hscale,\n",
-    "        legend='top-center-frame-2col',\n",
-    "    )\n",
-    "    plt.grid()\n",
-    "    plt.title(f'run {run} - {karabo_da}')"
+    "        plt.plot(bins_keV[bins_keV>.5],hist['CS'][bins_keV>.5], label='Charge Sharing corr', c=colors[3])\n",
+    "        if np.any(hist['S']):\n",
+    "            plt.plot(bins_keV[bins_keV>.5],hist['S'][bins_keV>.5], label='Singles', c=colors[4])\n",
+    "    \n",
+    "    if photon_energy==0: # if keV instead of #photons\n",
+    "        xtick_step = 5\n",
+    "        plt.xlim(left=-2)\n",
+    "        plt.xticks(np.arange(0,plt.gca().get_xlim()[1],xtick_step))\n",
+    "    plt.xlabel(plot_unit,fontsize=12)\n",
+    "\n",
+    "    plt.yscale('log')\n",
+    "    plt.title(f'{karabo_id} | {prop_str}, r{run}', fontsize=14, fontweight='bold')\n",
+    "    plt.legend(fontsize=12)\n",
+    "    plt.grid(ls=':')"
    ]
   },
   {
@@ -854,15 +744,48 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "step_timer.start()\n",
-    "fig = xana.heatmapPlot(\n",
-    "    np.nanmedian(data, axis=0),\n",
-    "    x_label='Columns', y_label='Rows',\n",
-    "    lut_label=f'Signal ({plot_unit})',\n",
-    "    x_range=(0, y),\n",
-    "    y_range=(0, x),\n",
-    "    vmin=-50, vmax=50)\n",
-    "step_timer.done_step(f'Plotting mean image of {data.shape[0]} trains.')"
+    "geom = Epix100Geometry.from_relative_positions(top=[386.5, 364.5, 0.], bottom=[386.5, -12.5, 0.])\n",
+    "\n",
+    "if pattern_classification:\n",
+    "    plt.subplots(1,2,figsize=(18,18)) if pattern_classification else plt.subplots(1,1,figsize=(9,9))\n",
+    "    ax = plt.subplot(1,2,1)\n",
+    "    ax.set_title(f'Before CS correction',fontsize=12,fontweight='bold');\n",
+    "else:\n",
+    "    plt.subplots(1,1,figsize=(9,9))\n",
+    "    ax = plt.subplot(1,1,1)\n",
+    "    ax.set_title(f'{karabo_id} | {prop_str}, r{run} | Average of {data.shape[0]} trains',fontsize=12,fontweight='bold');\n",
+    "    \n",
+    "# Average image before charge sharing corrcetion\n",
+    "divider = make_axes_locatable(ax)\n",
+    "cax = divider.append_axes('bottom', size='5%', pad=0.5)\n",
+    "\n",
+    "image = data.mean(axis=0)\n",
+    "vmin = max(image.mean()-2*image.std(),0)\n",
+    "vmax = image.mean()+3*image.std()\n",
+    "geom.plot_data(image,\n",
+    "               ax=ax,\n",
+    "               colorbar={'cax': cax, 'label': plot_unit, 'orientation': 'horizontal'},\n",
+    "               origin='upper',\n",
+    "               vmin=vmin,\n",
+    "               vmax=vmax)\n",
+    "\n",
+    "# Average image after charge sharing corrcetion\n",
+    "if pattern_classification:\n",
+    "\n",
+    "    ax = plt.subplot(1,2,2)\n",
+    "    divider = make_axes_locatable(ax)\n",
+    "    cax = divider.append_axes('bottom', size='5%', pad=0.5)\n",
+    "\n",
+    "    image = data_clu.mean(axis=0)\n",
+    "    geom.plot_data(image,\n",
+    "                   ax=ax,\n",
+    "                   colorbar={'cax': cax, 'label': plot_unit, 'orientation': 'horizontal'},\n",
+    "                   origin='upper',\n",
+    "                   vmin=vmin,\n",
+    "                   vmax=vmax)\n",
+    "    ax.set_title(f'After CS correction',fontsize=12,fontweight='bold');\n",
+    "\n",
+    "    plt.suptitle(f'{karabo_id} | {prop_str}, r{run} | Average of {data.shape[0]} trains',fontsize=14,fontweight='bold',y=.72);"
    ]
   },
   {
@@ -878,23 +801,56 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "step_timer.start()\n",
-    "fig = xana.heatmapPlot(\n",
-    "    data[0, ...],\n",
-    "    x_label='Columns', y_label='Rows',\n",
-    "    lut_label=f'Signal ({plot_unit})',\n",
-    "    x_range=(0, y),\n",
-    "    y_range=(0, x),\n",
-    "    vmin=-50, vmax=50)\n",
-    "step_timer.done_step(f'Plotting single shot of corrected data.')"
+    "train_idx = -1\n",
+    "\n",
+    "if pattern_classification:\n",
+    "    plt.subplots(1,2,figsize=(18,18)) if pattern_classification else plt.subplots(1,1,figsize=(9,9))\n",
+    "    ax = plt.subplot(1,2,1)\n",
+    "    ax.set_title(f'Before CS correction',fontsize=12,fontweight='bold');\n",
+    "else:\n",
+    "    plt.subplots(1,1,figsize=(9,9))\n",
+    "    ax = plt.subplot(1,1,1)\n",
+    "    ax.set_title(f'{karabo_id} | {prop_str}, r{run} | Single frame',fontsize=12,fontweight='bold');\n",
+    "    \n",
+    "# Average image before charge sharing corrcetion\n",
+    "divider = make_axes_locatable(ax)\n",
+    "cax = divider.append_axes('bottom', size='5%', pad=0.5)\n",
+    "\n",
+    "image = data[train_idx]\n",
+    "vmin = max(image.mean()-2*image.std(),0)\n",
+    "vmax = image.mean()+3*image.std()\n",
+    "geom.plot_data(image,\n",
+    "               ax=ax,\n",
+    "               colorbar={'cax': cax, 'label': plot_unit, 'orientation': 'horizontal'},\n",
+    "               origin='upper',\n",
+    "               vmin=vmin,\n",
+    "               vmax=vmax)\n",
+    "\n",
+    "# Average image after charge sharing corrcetion\n",
+    "if pattern_classification:\n",
+    "\n",
+    "    ax = plt.subplot(1,2,2)\n",
+    "    divider = make_axes_locatable(ax)\n",
+    "    cax = divider.append_axes('bottom', size='5%', pad=0.5)\n",
+    "\n",
+    "    image = data_clu[train_idx]\n",
+    "    geom.plot_data(image,\n",
+    "                   ax=ax,\n",
+    "                   colorbar={'cax': cax, 'label': plot_unit, 'orientation': 'horizontal'},\n",
+    "                   origin='upper',\n",
+    "                   vmin=vmin,\n",
+    "                   vmax=vmax)\n",
+    "    ax.set_title(f'After CS correction',fontsize=12,fontweight='bold');\n",
+    "\n",
+    "    plt.suptitle(f'{karabo_id} | {prop_str}, r{run} | Single frame',fontsize=14,fontweight='bold',y=.72);"
    ]
   }
  ],
  "metadata": {
   "kernelspec": {
-   "display_name": "Python 3.8.11 ('.cal3_venv')",
+   "display_name": "cal_venv",
    "language": "python",
-   "name": "python3"
+   "name": "cal_venv"
   },
   "language_info": {
    "codemirror_mode": {
diff --git a/notebooks/ePix100/ePix100_retrieve_constants_precorrection.ipynb b/notebooks/ePix100/ePix100_retrieve_constants_precorrection.ipynb
deleted file mode 100644
index cf506f0e4a1bf79d355cf7e241871730f01bc4b4..0000000000000000000000000000000000000000
--- a/notebooks/ePix100/ePix100_retrieve_constants_precorrection.ipynb
+++ /dev/null
@@ -1,214 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "# ePix100 retrieve constants precorrection\n",
-    "\n",
-    "Author: European XFEL Detector Group, Version: 1.0\n",
-    "\n",
-    "The following notebook provides constants for the selected ePix100 modules before executing correction on the selected sequence files."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "in_folder = \"/gpfs/exfel/exp/CALLAB/202031/p900113/raw\"  # input folder, required\n",
-    "out_folder = \"/gpfs/exfel/data/scratch/ahmedk/test/remove/epix_correct\"  # output folder, required\n",
-    "metadata_folder = \"\"  # Directory containing calibration_metadata.yml when run by xfel-calibrate\n",
-    "sequences = [-1]  # sequences to correct, set to -1 for all, range allowed\n",
-    "run = 9988  # which run to read data from, required\n",
-    "\n",
-    "# Parameters for accessing the raw data.\n",
-    "karabo_id = \"MID_EXP_EPIX-1\"  # Detector Karabo_ID\n",
-    "karabo_da = \"EPIX01\"  # data aggregators\n",
-    "receiver_template = \"RECEIVER\"  # detector receiver template for accessing raw data files\n",
-    "instrument_source_template = '{}/DET/{}:daqOutput'  # instrument detector data source in h5files\n",
-    "\n",
-    "# Parameters for the calibration database.\n",
-    "creation_time = \"\"  # The timestamp to use with Calibration DB. Required Format: \"YYYY-MM-DD hh:mm:ss\" e.g. 2019-07-04 11:02:41\n",
-    "cal_db_interface = \"tcp://max-exfl016:8015#8025\"  # calibration DB interface to use\n",
-    "cal_db_timeout = 300000  # timeout on CalibrationDB requests\n",
-    "\n",
-    "# Conditions for retrieving calibration constants.\n",
-    "bias_voltage = 200  # bias voltage\n",
-    "in_vacuum = False  # detector operated in vacuum\n",
-    "fix_temperature = 290  # fixed temperature value in Kelvin. Default value -1 to use the value from files.\n",
-    "integration_time = -1  # Detector integration time, Default value -1 to use the value from the slow data.\n",
-    "gain_photon_energy = 9.0  # Photon energy used for gain calibration\n",
-    "\n",
-    "# Flags to select type of applied corrections.\n",
-    "relative_gain = True  # Apply relative gain correction."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "from logging import warning\n",
-    "\n",
-    "import numpy as np\n",
-    "from extra_data import RunDirectory\n",
-    "from pathlib import Path\n",
-    "\n",
-    "import cal_tools.restful_config as rest_cfg\n",
-    "from cal_tools.calcat_interface import EPIX100_CalibrationData\n",
-    "from cal_tools.epix100 import epix100lib\n",
-    "from cal_tools.tools import (\n",
-    "    calcat_creation_time,\n",
-    "    CalibrationMetadata,\n",
-    ")"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "in_folder = Path(in_folder)\n",
-    "out_folder = Path(out_folder)\n",
-    "\n",
-    "out_folder.mkdir(parents=True, exist_ok=True)\n",
-    "\n",
-    "metadata = CalibrationMetadata(metadata_folder or out_folder)\n",
-    "# NOTE: this notebook will not overwrite calibration metadata file,\n",
-    "# if it already contains details about which constants to use.\n",
-    "retrieved_constants = metadata.setdefault(\"retrieved-constants\", {})"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "creation_time = calcat_creation_time(in_folder, run, creation_time)\n",
-    "print(f\"Using {creation_time.isoformat()} as creation time\")"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# Read control data.\n",
-    "run_dc = RunDirectory(in_folder / f\"r{run:04d}\")\n",
-    "\n",
-    "ctrl_data = epix100lib.epix100Ctrl(\n",
-    "    run_dc=run_dc,\n",
-    "    instrument_src=f\"{karabo_id}/DET/{receiver_template}:daqOutput\",\n",
-    "    ctrl_src=f\"{karabo_id}/DET/CONTROL\",\n",
-    "    )\n",
-    "\n",
-    "if integration_time < 0:\n",
-    "    integration_time = ctrl_data.get_integration_time()\n",
-    "    integration_time_str_add = \"\"\n",
-    "else:\n",
-    "    integration_time_str_add = \"(manual input)\"\n",
-    "\n",
-    "if fix_temperature < 0:\n",
-    "    temperature = ctrl_data.get_temprature()\n",
-    "    temperature_k = temperature + 273.15\n",
-    "    temp_str_add = \"\"\n",
-    "else:\n",
-    "    temperature_k = fix_temperature\n",
-    "    temperature = fix_temperature - 273.15\n",
-    "    temp_str_add = \"(manual input)\"\n",
-    "\n",
-    "\n",
-    "print(f\"Bias voltage is {bias_voltage} V\")\n",
-    "print(f\"Detector integration time is set to {integration_time} \\u03BCs {integration_time_str_add}\")\n",
-    "print(f\"Mean temperature: {temperature:0.2f}°C / {temperature_k:0.2f} K {temp_str_add}\")\n",
-    "print(f\"Operated in vacuum: {in_vacuum}\")"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "tags": []
-   },
-   "outputs": [],
-   "source": [
-    "epix_cal = EPIX100_CalibrationData(\n",
-    "    detector_name=karabo_id,\n",
-    "    sensor_bias_voltage=bias_voltage,\n",
-    "    integration_time=integration_time,\n",
-    "    sensor_temperature=temperature_k,\n",
-    "    in_vacuum=in_vacuum,\n",
-    "    source_energy=gain_photon_energy,\n",
-    "    event_at=creation_time,\n",
-    "    client=rest_cfg.calibration_client(),\n",
-    "    )\n",
-    "\n",
-    "mdata_dict = {\"constants\": dict()}\n",
-    "\n",
-    "constant_names = [\"OffsetEPix100\", \"NoiseEPix100\"]\n",
-    "if relative_gain:\n",
-    "    constant_names += [\"RelativeGainEPix100\"]\n",
-    "\n",
-    "# Retrieve metadata for all epix100 constants.\n",
-    "\n",
-    "epix_metadata = epix_cal.metadata(constant_names)[karabo_da]\n",
-    "\n",
-    "# Validate the constants availability and raise/warn correspondingly.\n",
-    "missing_dark_constants = {\"OffsetEPix100\", \"NoiseEPix100\"} - set(epix_metadata)\n",
-    "if missing_dark_constants:\n",
-    "    raise ValueError(\n",
-    "        f\"Dark constants {missing_dark_constants} are not available to correct {karabo_da}.\")\n",
-    "\n",
-    "if relative_gain and \"RelativeGainEPix100\" not in epix_metadata.keys():\n",
-    "    warning(\"RelativeGainEPix100 is not found in CALCAT.\")\n",
-    "\n",
-    "for cname, ccv_metadata in epix_metadata.items():\n",
-    "    mdata_dict[\"constants\"][cname] = {\n",
-    "        \"path\": str(epix_cal.caldb_root / ccv_metadata[\"path\"]),\n",
-    "        \"dataset\": ccv_metadata[\"dataset\"],\n",
-    "        \"creation-time\": ccv_metadata[\"begin_validity_at\"],\n",
-    "        \"ccv_id\": ccv_metadata[\"ccv_id\"],\n",
-    "    }\n",
-    "    print(f\"Retrieved {cname} with creation-time: {ccv_metadata['begin_validity_at']}\")\n",
-    "\n",
-    "mdata_dict[\"physical-name\"] = ccv_metadata[\"physical_name\"]\n",
-    "retrieved_constants[karabo_da] = mdata_dict\n",
-    "metadata.save()\n",
-    "print(f\"Stored retrieved constants in {metadata.filename}\")"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3.8.11 ('.cal4_venv')",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.8.11"
-  },
-  "orig_nbformat": 4,
-  "vscode": {
-   "interpreter": {
-    "hash": "ccde353e8822f411c1c49844e1cbe3edf63293a69efd975d1b44f5e852832668"
-   }
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 2
-}
diff --git a/notebooks/ePix10K/Characterize_Darks_ePix10K_NBC.ipynb b/notebooks/ePix10K/Characterize_Darks_ePix10K_NBC.ipynb
index f8a341d476d779759aedd45fc31809ea6c4b026a..6183a9fe73c096bab60ba6d7dc96623bd3d7bcd6 100644
--- a/notebooks/ePix10K/Characterize_Darks_ePix10K_NBC.ipynb
+++ b/notebooks/ePix10K/Characterize_Darks_ePix10K_NBC.ipynb
@@ -35,7 +35,7 @@
     "h5path_cntrl = '/CONTROL/{}/DET'  # path to control data\n",
     "\n",
     "use_dir_creation_date = True\n",
-    "cal_db_interface = \"tcp://max-exfl016:8020\" # calibration DB interface to use\n",
+    "cal_db_interface = \"tcp://max-exfl-cal001:8020\" # calibration DB interface to use\n",
     "cal_db_timeout = 300000 # timeout on caldb requests\n",
     "db_output = False # Output constants to the calibration database\n",
     "local_output = True # output constants locally\n",
diff --git a/notebooks/ePix10K/Correction_ePix10K_NBC.ipynb b/notebooks/ePix10K/Correction_ePix10K_NBC.ipynb
index a85e3ae3afca1bb4193deaa4178c8612a001dedf..4a3f8f9330e9cca249379c4a59c4cc48a1a84d6f 100644
--- a/notebooks/ePix10K/Correction_ePix10K_NBC.ipynb
+++ b/notebooks/ePix10K/Correction_ePix10K_NBC.ipynb
@@ -37,7 +37,7 @@
     "h5path_cntrl = '/CONTROL/{}/DET'  # path to control data\n",
     "\n",
     "use_dir_creation_date = True # date constants injected before directory creation time\n",
-    "cal_db_interface = \"tcp://max-exfl016:8015#8025\" # calibration DB interface to use\n",
+    "cal_db_interface = \"tcp://max-exfl-cal001:8015#8025\" # calibration DB interface to use\n",
     "cal_db_timeout = 30000000 # timeout on caldb requests\n",
     "\n",
     "cpuCores = 4 # Specifies the number of running cpu cores\n",
diff --git a/notebooks/generic/PlotFromCalDB_NBC.ipynb b/notebooks/generic/PlotFromCalDB_NBC.ipynb
index 666b2d882c856ee99255fecb71392b805dd67f00..d611aeaceceeca90dc00db1f21d45dde8e0a4b14 100644
--- a/notebooks/generic/PlotFromCalDB_NBC.ipynb
+++ b/notebooks/generic/PlotFromCalDB_NBC.ipynb
@@ -47,7 +47,7 @@
     "photon_energy = 9.2 # Photon energy of the beam\n",
     "out_folder = \"/gpfs/exfel/data/scratch/karnem/test_bla4/\" # output folder\n",
     "use_existing = \"\" # If not empty, constants stored in given folder will be used\n",
-    "cal_db_interface = \"tcp://max-exfl016:8016\" # the database interface to use\n",
+    "cal_db_interface = \"tcp://max-exfl-cal001:8016\" # the database interface to use\n",
     "cal_db_timeout = 180000 # timeout on caldb requests\",\n",
     "plot_range = 3 # range for plotting in units of median absolute deviations\n",
     "spShape = [256, 64] # Shape of superpixel\n",
diff --git a/notebooks/generic/overallmodules_Darks_Summary_NBC.ipynb b/notebooks/generic/overallmodules_Darks_Summary_NBC.ipynb
index 0b5f1c966a23d46ac014612c967240be4e314c05..bb68d84433a6e013908334397faa75f1600ac49f 100644
--- a/notebooks/generic/overallmodules_Darks_Summary_NBC.ipynb
+++ b/notebooks/generic/overallmodules_Darks_Summary_NBC.ipynb
@@ -30,16 +30,11 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "import copy\n",
-    "import os\n",
     "import warnings\n",
-    "from collections import OrderedDict\n",
     "from pathlib import Path\n",
     "\n",
     "warnings.filterwarnings('ignore')\n",
     "\n",
-    "import glob\n",
-    "\n",
     "import h5py\n",
     "import matplotlib\n",
     "import numpy as np\n",
@@ -54,12 +49,24 @@
     "%matplotlib inline\n",
     "import extra_geom\n",
     "import tabulate\n",
+    "from cal_tools import step_timing\n",
     "from cal_tools.ana_tools import get_range\n",
+    "from cal_tools.enums import BadPixels\n",
     "from cal_tools.plotting import show_processed_modules\n",
     "from cal_tools.tools import CalibrationMetadata, module_index_to_qm\n",
     "from XFELDetAna.plotting.simpleplot import simplePlot"
    ]
   },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def bp_entry(bp):\n",
+    "    return [f\"{bp.name:<30s}\", f\"{bp.value:032b}\", f\"{int(bp.value)}\"]"
+   ]
+  },
   {
    "cell_type": "code",
    "execution_count": null,
@@ -79,21 +86,34 @@
     "    # This list needs to be in that order as later Adaptive or fixed gain is\n",
     "    # decided based on the condition for the Offset constant.\n",
     "    expected_constants = ['Offset', 'Noise', 'ThresholdsDark', 'BadPixelsDark']\n",
+    "\n",
+    "    table = []\n",
+    "    badpixels = [\n",
+    "        BadPixels.OFFSET_OUT_OF_THRESHOLD,\n",
+    "        BadPixels.NOISE_OUT_OF_THRESHOLD,\n",
+    "        BadPixels.OFFSET_NOISE_EVAL_ERROR,\n",
+    "        BadPixels.GAIN_THRESHOLDING_ERROR,\n",
+    "    ]\n",
+    "    for bp in badpixels:\n",
+    "        table.append(bp_entry(bp))\n",
+    "\n",
     "    display(Markdown(\"\"\"\n",
-    "    \n",
     "# Summary of AGIPD dark characterization #\n",
     "\n",
-    "The following report shows a set of dark images taken with the AGIPD detector to deduce detector offsets, noise, bad-pixel maps and thresholding. All four types of constants are evaluated per-pixel and per-memory cell.\n",
-    "\n",
+    "The following report shows a set of dark images taken with the AGIPD detector to deduce detector offsets, \n",
+    "noise, bad-pixel maps and thresholding. All four types of constants are evaluated per-pixel and per-memory cell.\n",
     "\n",
-    "**The offset** ($O$) is defined as the median ($M$) of the dark signal ($Ds$) over trains ($t$) for a given pixel ($x,y$) and memory cell ($c$). \n",
+    "**The offset** ($O$) is defined as the median ($M$) of the dark signal ($Ds$) over trains ($t$) for a given pixel \n",
+    "($x,y$) and memory cell ($c$). \n",
     "\n",
     "**The noise** $N$ is the standard deviation $\\sigma$ of the dark signal.\n",
     "\n",
     "$$ O_{x,y,c} = M(Ds)_{t} ,\\,\\,\\,\\,\\,\\, N_{x,y,c} = \\sigma(Ds)_{t}$$\n",
     "\n",
-    "**The bad pixel** mask is encoded as a bit mask.\n",
+    "**The bad pixel** mask is encoded as a bit mask.\"\"\"))\n",
     "\n",
+    "    display(Latex(tabulate.tabulate(table, tablefmt='latex', headers=[\"Name\", \"bit value\", \"integer value\"])))\n",
+    "    display(Markdown(\"\"\"\n",
     "**\"OFFSET_OUT_OF_THRESHOLD\":**\n",
     "\n",
     "Offset outside of bounds:\n",
@@ -120,7 +140,7 @@
     "\n",
     "Values: $\\mathrm{thresholds\\_offset\\_sigma}$, $\\mathrm{thresholds\\_offset\\_hard}$, $\\mathrm{thresholds\\_noise\\_sigma}$, $\\mathrm{thresholds\\_noise\\_hard}$ are given as parameters.\n",
     "\n",
-    "\"**\\\"GAIN_THRESHOLDING_ERROR\\\":**\n",
+    "**\"GAIN_THRESHOLDING_ERROR\":**\n",
     "\n",
     "Bad gain separated pixels with sigma separation less than gain_separation_sigma_threshold\n",
     "\n",
@@ -128,11 +148,20 @@
     "$$ Bad\\_separation = sigma\\_separation < \\mathrm{gain\\_separation\\_sigma\\_threshold} $$\n",
     "\n",
     "\"\"\"))\n",
+    "\n",
     "    \n",
     "elif \"LPD\" in karabo_id:\n",
     "    dinstance = \"LPD1M1\"\n",
     "    nmods = 16\n",
     "    expected_constants = ['Offset', 'Noise', 'BadPixelsDark']\n",
+    "    table = []\n",
+    "    badpixels = [\n",
+    "        BadPixels.OFFSET_OUT_OF_THRESHOLD,\n",
+    "        BadPixels.NOISE_OUT_OF_THRESHOLD,\n",
+    "        BadPixels.OFFSET_NOISE_EVAL_ERROR,\n",
+    "    ]\n",
+    "    for bp in badpixels:\n",
+    "        table.append(bp_entry(bp))\n",
     "    display(Markdown(\"\"\"\n",
     "    \n",
     "# Summary of LPD dark characterization #\n",
@@ -145,7 +174,9 @@
     "\n",
     "$$ O_{x,y,c} = M(Ds)_{t} ,\\,\\,\\,\\,\\,\\, N_{x,y,c} = \\sigma(Ds)_{t}$$\n",
     "\n",
-    "**The bad pixel** mask is encoded as a bit mask.\n",
+    "**The bad pixel** mask is encoded as a bit mask.\"\"\"))\n",
+    "    display(Latex(tabulate.tabulate(table, tablefmt='latex', headers=[\"Name\", \"bit value\", \"integer value\"])))\n",
+    "    display(Markdown(\"\"\"\n",
     "\n",
     "**\"OFFSET_OUT_OF_THRESHOLD\":**\n",
     "\n",
@@ -190,6 +221,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
+    "step_timer = step_timing.StepTimer()\n",
     "out_folder = Path(out_folder)\n",
     "metadata = CalibrationMetadata(metadata_folder or out_folder)\n",
     "mod_mapping = metadata.setdefault(\"modules-mapping\", {})\n",
@@ -241,6 +273,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
+    "step_timer.start()\n",
     "# Get shape, dtype, and number of files for each constant.\n",
     "# Also build lists of the files involved, to be loaded in parallel in a later cell.\n",
     "const_shape_and_dtype = {}\n",
@@ -288,7 +321,8 @@
     "prev_const = {\n",
     "    cname: psh.alloc((nmods_found,) + module_const_shape, dtype=dt, fill=0)\n",
     "    for cname, (module_const_shape, dt) in const_shape_and_dtype.items()\n",
-    "}"
+    "}\n",
+    "step_timer.done_step(\"Preparing arrays for old and new constants.\")"
    ]
   },
   {
@@ -297,6 +331,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
+    "step_timer.start()\n",
     "# Load the constant data in parallel\n",
     "found_module_nums = sorted(found_module_nums)\n",
     "mod_names = [module_index_to_qm(n) for n in found_module_nums]\n",
@@ -319,7 +354,8 @@
     "        f[h5path]['data'].read_direct(prev_const[cname][mod_ix])\n",
     "\n",
     "psh.map(load_piece_prev, pieces_to_load_prev)\n",
-    "print(f\"Loaded previous constant data from {len(pieces_to_load_prev)} files\")"
+    "print(f\"Loaded previous constant data from {len(pieces_to_load_prev)} files\")\n",
+    "step_timer.done_step(\"Loading constants data.\")"
    ]
   },
   {
@@ -433,6 +469,8 @@
    },
    "outputs": [],
    "source": [
+    "step_timer.start()\n",
+    "\n",
     "gainstages = 1\n",
     "\n",
     "for const_name, const in constants.items():\n",
@@ -483,7 +521,8 @@
     "\n",
     "        plot_const_and_delta(stacked_const, stacked_delta, const_name, glabel)\n",
     "\n",
-    "        plt.show()"
+    "        plt.show()\n",
+    "step_timer.done_step(\"Plotting constants and relative differences.\")"
    ]
   },
   {
@@ -495,6 +534,7 @@
    "outputs": [],
    "source": [
     "# Loop over modules and constants\n",
+    "step_timer.start()\n",
     "for const_name, const in constants.items():\n",
     "    if const_name == 'BadPixelsDark':\n",
     "        continue  # Displayed separately below\n",
@@ -579,7 +619,8 @@
     "                            legend='outside-top-ncol6-frame', legend_size='18%',\n",
     "                            legend_pad=0.00)\n",
     "\n",
-    "        plt.show()"
+    "        plt.show()\n",
+    "step_timer.done_step(\"Plotting summary across modules.\")"
    ]
   },
   {
@@ -589,6 +630,7 @@
    "outputs": [],
    "source": [
     "if 'BadPixelsDark' in constants:\n",
+    "    step_timer.start()\n",
     "    display(Markdown(f'### Summary across Modules - BadPixelsDark'))\n",
     "\n",
     "    bad_px_dark = constants['BadPixelsDark']\n",
@@ -626,7 +668,8 @@
     "                            legend='outside-top-ncol6-frame', legend_size='18%',\n",
     "                            legend_pad=0.00)\n",
     "\n",
-    "        plt.show()"
+    "        plt.show()\n",
+    "    step_timer.done_step(\"Summary across modules for BadPixels.\")"
    ]
   },
   {
@@ -657,6 +700,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
+    "step_timer.start()\n",
     "head = ['Module', 'High gain', 'Medium gain', 'Low gain']\n",
     "head_th = ['Module', 'HG_MG threshold', 'MG_LG threshold']\n",
     "for const_name, const in constants.items():\n",
@@ -702,7 +746,8 @@
     "    display(Markdown(label))\n",
     "    header = head_th if const_name == 'ThresholdsDark' else head\n",
     "    md = display(Latex(tabulate.tabulate(\n",
-    "        table, tablefmt='latex', headers=header)))"
+    "        table, tablefmt='latex', headers=header)))\n",
+    "step_timer.done_step(\"Summary tables across modules.\")"
    ]
   },
   {
@@ -712,7 +757,8 @@
    "outputs": [],
    "source": [
     "# Bad pixels summary table\n",
-    "if 'BadPixelsDark' in constants:    \n",
+    "if 'BadPixelsDark' in constants:\n",
+    "    step_timer.start()\n",
     "    bad_px_dark = constants['BadPixelsDark']\n",
     "\n",
     "    table = []\n",
@@ -736,7 +782,8 @@
     "\n",
     "    display(Markdown(label))\n",
     "    md = display(Latex(tabulate.tabulate(\n",
-    "        table, tablefmt='latex', headers=head)))"
+    "        table, tablefmt='latex', headers=head)))\n",
+    "    step_timer.done_step(\"Summary table across modules for BadPixels.\")"
    ]
   }
  ],
diff --git a/notebooks/pnCCD/Characterize_pnCCD_Dark_NBC.ipynb b/notebooks/pnCCD/Characterize_pnCCD_Dark_NBC.ipynb
index 1f01b0aa031fcfff4644f392bdb18ce9f23fe246..f6ef23f64136329b405b7a4cdc84240ba3631c3b 100644
--- a/notebooks/pnCCD/Characterize_pnCCD_Dark_NBC.ipynb
+++ b/notebooks/pnCCD/Characterize_pnCCD_Dark_NBC.ipynb
@@ -39,7 +39,7 @@
     "\n",
     "# Database access parameters.\n",
     "use_dir_creation_date = True  # use dir creation date as data production reference date\n",
-    "cal_db_interface = \"tcp://max-exfl016:8021\"  # calibration DB interface to use\n",
+    "cal_db_interface = \"tcp://max-exfl-cal001:8021\"  # calibration DB interface to use\n",
     "cal_db_timeout = 300000 # timeout on caldb requests\n",
     "db_output = False # if True, the notebook sends dark constants to the calibration database\n",
     "local_output = True # if True, the notebook saves dark constants locally\n",
diff --git a/notebooks/pnCCD/Characterize_pnCCD_Gain.ipynb b/notebooks/pnCCD/Characterize_pnCCD_Gain.ipynb
index 637f1766cff1562018d53134a134a18b383e2016..1e63597449218f0fce147598239b4a5cda1a391a 100644
--- a/notebooks/pnCCD/Characterize_pnCCD_Gain.ipynb
+++ b/notebooks/pnCCD/Characterize_pnCCD_Gain.ipynb
@@ -52,7 +52,7 @@
     "db_output = False # if True, the notebook injects dark constants into the calibration database\n",
     "local_output = True # if True, the notebook saves dark constants locally\n",
     "\n",
-    "cal_db_interface = \"tcp://max-exfl016:8015\" # calibration DB interface to use\n",
+    "cal_db_interface = \"tcp://max-exfl-cal001:8015\" # calibration DB interface to use\n",
     "cal_db_timeout = 300000 # timeout on caldb requests\n",
     "creation_time = \"\" # To overwrite the measured creation_time. Required Format: YYYY-MM-DD HR:MN:SC.00 e.g. 2019-07-04 11:02:41.00\n",
     "\n",
diff --git a/notebooks/pnCCD/Correct_pnCCD_NBC.ipynb b/notebooks/pnCCD/Correct_pnCCD_NBC.ipynb
index fe3c9eacb650af9d250d8feb5f6565a223ff9b4f..3306fa388ece967b786ac14afacfafb813553755 100644
--- a/notebooks/pnCCD/Correct_pnCCD_NBC.ipynb
+++ b/notebooks/pnCCD/Correct_pnCCD_NBC.ipynb
@@ -46,9 +46,10 @@
     "photon_energy = 1.6 # Al fluorescence in keV\n",
     "\n",
     "# Parameters for the calibration database.\n",
-    "cal_db_interface = \"tcp://max-exfl016:8015\" # calibration DB interface to use\n",
+    "cal_db_interface = \"tcp://max-exfl-cal001:8015\" # calibration DB interface to use\n",
     "cal_db_timeout = 300000 # timeout on caldb requests\n",
     "creation_time = \"\"  # The timestamp to use with Calibration DB. Required Format: \"YYYY-MM-DD hh:mm:ss\" e.g. 2019-07-04 11:02:41\n",
+    "remove_bias_voltage_if_zero = True  # This flag enables removing bias voltage from the conditions if a 0 value is read from RAW data. This is useful when the corresponding constants for old RAW had no bias voltage because of a mistake in control data. e.g. p002857\n",
     "\n",
     "# Booleans for selecting corrections to apply.\n",
     "only_offset = False # Only, apply offset.\n",
@@ -97,7 +98,6 @@
     "from pathlib import Path\n",
     "warnings.filterwarnings('ignore')\n",
     "\n",
-    "import h5py\n",
     "import matplotlib.pyplot as plt\n",
     "import numpy as np\n",
     "import pasha as psh\n",
@@ -107,22 +107,18 @@
     "\n",
     "%matplotlib inline\n",
     "\n",
+    "import cal_tools.restful_config as rest_cfg\n",
     "from XFELDetAna import xfelpyanatools as xana\n",
     "from XFELDetAna import xfelpycaltools as xcal\n",
     "from cal_tools import pnccdlib\n",
     "from cal_tools.files import DataFile\n",
+    "from cal_tools.calcat_interface import CalCatError, PNCCD_CalibrationData\n",
+    "\n",
     "from cal_tools.tools import (\n",
     "    calcat_creation_time,\n",
-    "    get_dir_creation_date,\n",
-    "    get_constant_from_db_and_time,\n",
-    "    get_random_db_interface,\n",
-    "    load_specified_constants,\n",
-    "    CalibrationMetadata,\n",
+    "    write_constants_fragment,\n",
     ")\n",
-    "from cal_tools.step_timing import StepTimer\n",
-    "from cal_tools import h5_copy_except\n",
-    "from iCalibrationDB import Conditions, Constants\n",
-    "from iCalibrationDB.detectors import DetectorTypes"
+    "from cal_tools.step_timing import StepTimer"
    ]
   },
   {
@@ -174,14 +170,8 @@
     "# Output Folder Creation:\n",
     "os.makedirs(out_folder, exist_ok=True)\n",
     "\n",
-    "# NOTE: this notebook shouldn't overwrite calibration metadata file.\n",
-    "metadata = CalibrationMetadata(metadata_folder or out_folder)\n",
-    "# Constant paths are saved under retrieved-constants in calibration_metadata.yml\n",
-    "const_yaml = metadata.get(\"retrieved-constants\", {})\n",
-    "\n",
     "# extract control data\n",
     "step_timer.start()\n",
-    "\n",
     "ctrl_data = pnccdlib.PnccdCtrl(run_dc, karabo_id)\n",
     "if bias_voltage == 0.:\n",
     "    bias_voltage = ctrl_data.get_bias_voltage()\n",
@@ -303,10 +293,11 @@
    ]
   },
   {
+   "attachments": {},
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "As a first step, dark constants have to be retrieved from the calibration database"
+    "As a first step, calibration constants have to be retrieved from the calibration database"
    ]
   },
   {
@@ -318,49 +309,64 @@
     "display(Markdown(\"### Constants retrieval\"))\n",
     "step_timer.start()\n",
     "\n",
-    "conditions_dict = {\n",
-    "    \"bias_voltage\": bias_voltage,\n",
-    "    \"integration_time\": integration_time,\n",
-    "    \"gain_setting\": gain,\n",
-    "    \"temperature\": fix_temperature_top,\n",
-    "    \"pixels_x\": pixels_x,\n",
-    "    \"pixels_y\": pixels_y,\n",
-    "}\n",
-    "# Dark condition\n",
-    "dark_condition = Conditions.Dark.CCD(**conditions_dict)\n",
-    "# Add photon energy.\n",
-    "conditions_dict.update({\"photon_energy\": photon_energy})\n",
-    "illum_condition = Conditions.Illuminated.CCD(**conditions_dict)\n",
-    "\n",
-    "# A dictionary for initializing constants. {cname: empty constant array}\n",
-    "empty_constants = {\n",
-    "    \"Offset\": np.zeros((pixels_x, pixels_y, 1), dtype=np.float32),\n",
-    "    \"Noise\": np.zeros((pixels_x, pixels_y, 1), dtype=np.float32),\n",
-    "    \"BadPixelsDark\": np.zeros((pixels_x, pixels_y, 1), dtype=np.uint32),\n",
-    "    \"RelativeGain\": np.zeros((pixels_x, pixels_y), dtype=np.float32),\n",
-    "}\n",
-    "\n",
-    "if const_yaml:  #  Used while reproducing corrected data.\n",
-    "    print(f\"Using stored constants in {metadata.filename}\")\n",
-    "    constants, when = load_specified_constants(\n",
-    "        const_yaml[karabo_da][\"constants\"], empty_constants\n",
-    "    )\n",
-    "else:\n",
-    "    constants = dict()\n",
-    "    when = dict()\n",
-    "    for cname, cempty in empty_constants.items():\n",
-    "        # No need for retrieving RelativeGain, if not used for correction.\n",
-    "        if not corr_bools.get(\"relgain\") and cname == \"RelativeGain\":\n",
-    "            continue\n",
-    "        constants[cname], when[cname] = get_constant_from_db_and_time(\n",
-    "            karabo_id,\n",
-    "            karabo_da,\n",
-    "            constant=getattr(Constants.CCD(DetectorTypes.pnCCD), cname)(),\n",
-    "            condition=illum_condition if cname == \"RelativeGain\" else dark_condition,\n",
-    "            empty_constant=cempty,\n",
-    "            cal_db_interface=get_random_db_interface(cal_db_interface),\n",
-    "            creation_time=creation_time,\n",
-    "        )"
+    "# In the case of an older proposal (e.g., proposal 002857),\n",
+    "# it is possible that the bias voltage was 0\n",
+    "# resulting in the absence of bias voltage values in\n",
+    "# the previously injected dark constants. This situation can be\n",
+    "# attributed to a feature that is currently not available in iCalibrationDB.\n",
+    "if bias_voltage == 0 and remove_bias_voltage_if_zero:\n",
+    "    bias_voltage = None\n",
+    "\n",
+    "pnccd_cal = PNCCD_CalibrationData(\n",
+    "    detector_name=karabo_id,\n",
+    "    sensor_bias_voltage=bias_voltage,\n",
+    "    integration_time=integration_time,\n",
+    "    sensor_temperature=fix_temperature_top,\n",
+    "    gain_setting=gain,\n",
+    "    event_at=creation_time,\n",
+    "    source_energy=photon_energy,\n",
+    "    client=rest_cfg.calibration_client(),\n",
+    ")\n",
+    "\n",
+    "pnccd_metadata = pnccd_cal.metadata(calibrations=pnccd_cal.dark_calibrations)\n",
+    "\n",
+    "if relgain:\n",
+    "    try:\n",
+    "        gain_metadata = pnccd_cal.metadata(calibrations=[\"RelativeGainCCD\"])\n",
+    "        for mod, md in gain_metadata.items():\n",
+    "            pnccd_metadata[mod].update(md)\n",
+    "\n",
+    "    except CalCatError as e:  # TODO: fix after getting new exceptions.\n",
+    "        warning(f\"{e} While asking for {pnccd_cal.illuminated_calibrations}\")\n",
+    "        warning(\"RelativeGainEPix100 is not retrieved from the calibration database. \"\n",
+    "                \"Relative gain correction is disabled.\")\n",
+    "        corr_bools['relgain'] = False\n",
+    "\n",
+    "# Display retrieved calibration constants timestamps\n",
+    "pnccd_cal.display_markdown_retrieved_constants(metadata=pnccd_metadata)\n",
+    "\n",
+    "metadata = pnccd_metadata[karabo_da]\n",
+    "\n",
+    "# Validate the constants availability and raise/warn correspondingly. \n",
+    "missing_dark_constants = set(\n",
+    "    c for c in pnccd_cal.dark_calibrations if c not in metadata.keys())\n",
+    "\n",
+    "if missing_dark_constants:\n",
+    "    raise KeyError(\n",
+    "        f\"Dark constants {missing_dark_constants} are not available for correction.\")\n",
+    "\n",
+    "# Record constant details in YAML metadata\n",
+    "write_constants_fragment(\n",
+    "    out_folder=(metadata_folder or out_folder),\n",
+    "    det_metadata=pnccd_metadata,\n",
+    "    caldb_root=pnccd_cal.caldb_root,\n",
+    ")\n",
+    "\n",
+    "# load constants arrays after storing fragment YAML file\n",
+    "# and validating constants availability.\n",
+    "constants = pnccd_cal.ndarray_map(metadata=pnccd_metadata).get(karabo_da, {})\n",
+    "\n",
+    "step_timer.done_step(\"Constants retrieval\")"
    ]
   },
   {
@@ -369,33 +375,32 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "fig = xana.heatmapPlot(constants[\"Offset\"][:,:,0], x_label='Columns', y_label='Rows', lut_label='Offset (ADU)', \n",
+    "fig = xana.heatmapPlot(constants[\"OffsetCCD\"][:,:,0], x_label='Columns', y_label='Rows', lut_label='Offset (ADU)', \n",
     "                       aspect=1, \n",
     "                       x_range=(0, pixels_y), y_range=(0, pixels_x), vmax=16000, \n",
     "                       panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)', \n",
     "                       title = 'Dark Offset Map')\n",
     "\n",
-    "fig = xana.heatmapPlot(constants[\"Noise\"][:,:,0], x_label='Columns', y_label='Rows', \n",
+    "fig = xana.heatmapPlot(constants[\"NoiseCCD\"][:,:,0], x_label='Columns', y_label='Rows', \n",
     "                       lut_label='Corrected Noise (ADU)', \n",
     "                       aspect=1, x_range=(0, pixels_y), y_range=(0, pixels_x),  \n",
     "                       panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)', \n",
     "                       title = 'Dark Noise Map')\n",
     "\n",
-    "fig = xana.heatmapPlot(np.log2(constants[\"BadPixelsDark\"][:,:,0]), x_label='Columns', y_label='Rows', \n",
+    "fig = xana.heatmapPlot(np.log2(constants[\"BadPixelsDarkCCD\"][:,:,0]), x_label='Columns', y_label='Rows', \n",
     "                       lut_label='Bad Pixel Value (ADU)', \n",
     "                       aspect=1, x_range=(0, pixels_y), y_range=(0, pixels_x), \n",
     "                       panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)', \n",
     "                       title = 'Dark Bad Pixels Map')\n",
     "\n",
     "if corr_bools.get('relgain'):\n",
-    "    fig = xana.heatmapPlot(constants[\"RelativeGain\"], figsize=(8, 8), x_label='Columns', y_label='Rows', \n",
+    "    fig = xana.heatmapPlot(constants[\"RelativeGainCCD\"], figsize=(8, 8), x_label='Columns', y_label='Rows', \n",
     "                            lut_label='Relative Gain', \n",
     "                            aspect=1, x_range=(0, pixels_y), y_range=(0, pixels_x), vmin=0.8, vmax=1.2, \n",
     "                            panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)', \n",
     "                            panel_top_low_lim = 0.5, panel_top_high_lim = 1.5, panel_side_low_lim = 0.5, \n",
     "                            panel_side_high_lim = 1.5, \n",
-    "                            title = f'Relative Gain Map for pnCCD (Gain = 1/{int(gain)})')\n",
-    "step_timer.done_step(\"Constants retrieval\")"
+    "                            title = f'Relative Gain Map for pnCCD (Gain = 1/{int(gain)})')"
    ]
   },
   {
@@ -411,13 +416,13 @@
     "                                             commonModeBlockSize,\n",
     "                                             commonModeAxis,\n",
     "                                             parallel=False, dType=np.float32, stride=1,\n",
-    "                                             noiseMap=constants[\"Noise\"].astype(np.float32), minFrac=0.25)\n",
+    "                                             noiseMap=constants[\"NoiseCCD\"].astype(np.float32), minFrac=0.25)\n",
     "\n",
     "if corr_bools.get('pattern_class'):\n",
     "    # Pattern Classifier Calculator:\n",
     "    # Left Hemisphere:\n",
     "    patternClassifierLH = xcal.PatternClassifier([pixels_x, pixels_y//2],\n",
-    "                                                 constants[\"Noise\"][:, :pixels_y//2],\n",
+    "                                                 constants[\"NoiseCCD\"][:, :pixels_y//2],\n",
     "                                                 split_evt_primary_threshold,\n",
     "                                                 split_evt_secondary_threshold,\n",
     "                                                 split_evt_mip_threshold,\n",
@@ -429,7 +434,7 @@
     "\n",
     "    # Right Hemisphere:\n",
     "    patternClassifierRH = xcal.PatternClassifier([pixels_x, pixels_y//2],\n",
-    "                                                 constants[\"Noise\"][:, pixels_y//2:],\n",
+    "                                                 constants[\"NoiseCCD\"][:, pixels_y//2:],\n",
     "                                                 split_evt_primary_threshold,\n",
     "                                                 split_evt_secondary_threshold,\n",
     "                                                 split_evt_mip_threshold,\n",
@@ -442,11 +447,11 @@
     "    patternClassifierLH._imagesPerChunk = 1\n",
     "    patternClassifierRH._imagesPerChunk = 1\n",
     "\n",
-    "    patternClassifierLH._noisemap = constants[\"Noise\"][:, :pixels_x//2]\n",
-    "    patternClassifierRH._noisemap = constants[\"Noise\"][:, pixels_x//2:]\n",
+    "    patternClassifierLH._noisemap = constants[\"NoiseCCD\"][:, :pixels_x//2]\n",
+    "    patternClassifierRH._noisemap = constants[\"NoiseCCD\"][:, pixels_x//2:]\n",
     "    # Setting bad pixels:\n",
-    "    patternClassifierLH.setBadPixelMask(constants[\"BadPixelsDark\"][:, :pixels_x//2] != 0)\n",
-    "    patternClassifierRH.setBadPixelMask(constants[\"BadPixelsDark\"][:, pixels_x//2:] != 0)"
+    "    patternClassifierLH.setBadPixelMask(constants[\"BadPixelsDarkCCD\"][:, :pixels_x//2] != 0)\n",
+    "    patternClassifierRH.setBadPixelMask(constants[\"BadPixelsDarkCCD\"][:, pixels_x//2:] != 0)"
    ]
   },
   {
@@ -586,10 +591,10 @@
     "\n",
     "data_path = \"INSTRUMENT/\"+instrument_src+\"/data/\"\n",
     "\n",
-    "offset = np.squeeze(constants[\"Offset\"])\n",
-    "noise = np.squeeze(constants[\"Noise\"])\n",
-    "bpix = np.squeeze(constants[\"BadPixelsDark\"])\n",
-    "relativegain = constants.get(\"RelativeGain\")"
+    "offset = np.squeeze(constants[\"OffsetCCD\"])\n",
+    "noise = np.squeeze(constants[\"NoiseCCD\"])\n",
+    "bpix = np.squeeze(constants[\"BadPixelsDarkCCD\"])\n",
+    "relativegain = constants.get(\"RelativeGainCCD\")"
    ]
   },
   {
diff --git a/notebooks/pnCCD/pnCCD_retrieve_constants_precorrection.ipynb b/notebooks/pnCCD/pnCCD_retrieve_constants_precorrection.ipynb
deleted file mode 100644
index eeac52fe3e033eb3bfef5a82fc3376ca1e8aecf4..0000000000000000000000000000000000000000
--- a/notebooks/pnCCD/pnCCD_retrieve_constants_precorrection.ipynb
+++ /dev/null
@@ -1,223 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "# pnCCD retrieve constants precorrection\n",
-    "\n",
-    "Author: European XFEL Detector Group, Version: 1.0\n",
-    "\n",
-    "The following notebook provides constants for the selected pnCCD modules before executing correction on the selected sequence files."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "in_folder = \"/gpfs/exfel/exp/SQS/202031/p900166/raw\"  # input folder\n",
-    "out_folder = \"/gpfs/exfel/data/scratch/ahmedk/test/remove/pnccd_correct\"  # output folder\n",
-    "metadata_folder = \"\"  # Directory containing calibration_metadata.yml when run by xfel-calibrate\n",
-    "run = 347  # which run to read data from\n",
-    "sequences = [0]  # sequences to correct, set to -1 for all, range allowed\n",
-    "\n",
-    "karabo_da = 'PNCCD01'  # data aggregators\n",
-    "karabo_id = \"SQS_NQS_PNCCD1MP\"  # detector Karabo_ID\n",
-    "\n",
-    "# Conditions for retrieving calibration constants\n",
-    "fix_temperature_top = 0.  # fix temperature for top sensor in K, set to 0. to use value from slow data.\n",
-    "fix_temperature_bot = 0.  # fix temperature for bottom sensor in K, set to 0. to use value from slow data.\n",
-    "gain = -1  # the detector's gain setting. Set to -1 to use the value from the slow data.\n",
-    "bias_voltage = 0.  # the detector's bias voltage. set to 0. to use value from slow data.\n",
-    "integration_time = 70  # detector's integration time\n",
-    "photon_energy = 1.6  # Al fluorescence in keV\n",
-    "\n",
-    "# Parameters for the calibration database.\n",
-    "cal_db_interface = \"tcp://max-exfl016:8015\"  # calibration DB interface to use\n",
-    "cal_db_timeout = 300000  # timeout on CalibrationDB requests\n",
-    "creation_time = \"\"  # The timestamp to use with Calibration DBe. Required Format: \"YYYY-MM-DD hh:mm:ss\" e.g. 2019-07-04 11:02:41\n",
-    "\n",
-    "# Booleans for selecting corrections to apply.\n",
-    "only_offset = False  # Only, apply offset.\n",
-    "relgain = True  # Apply relative gain correction\n",
-    "\n",
-    "# parameters affecting stored output data.\n",
-    "overwrite = True  # keep this as True to not overwrite the output "
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import datetime\n",
-    "from pathlib import Path\n",
-    "\n",
-    "from IPython.display import Markdown, display\n",
-    "from extra_data import RunDirectory\n",
-    "\n",
-    "from cal_tools import pnccdlib\n",
-    "from cal_tools.tools import (\n",
-    "    calcat_creation_time,\n",
-    "    get_dir_creation_date,\n",
-    "    get_from_db,\n",
-    "    get_random_db_interface,\n",
-    "    save_constant_metadata,\n",
-    "    CalibrationMetadata,\n",
-    ")\n",
-    "from iCalibrationDB import Conditions, Constants\n",
-    "from iCalibrationDB.detectors import DetectorTypes"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "metadata = CalibrationMetadata(metadata_folder or out_folder)\n",
-    "# NOTE: this notebook will not overwrite calibration metadata file,\n",
-    "# if it already contains details about which constants to use.\n",
-    "retrieved_constants = metadata.setdefault(\"retrieved-constants\", {})\n",
-    "if karabo_da in retrieved_constants:\n",
-    "    print(\n",
-    "        f\"Constant for {karabo_da} already in {metadata.filename}, won't query again.\"\n",
-    "    )  # noqa\n",
-    "    import sys\n",
-    "\n",
-    "    sys.exit(0)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# Here the correction booleans dictionary is defined\n",
-    "corr_bools = {}\n",
-    "\n",
-    "corr_bools[\"only_offset\"] = only_offset\n",
-    "\n",
-    "# Apply offset only.\n",
-    "if not only_offset:\n",
-    "    corr_bools[\"relgain\"] = relgain"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "print(f\"Calibration database interface selected: {cal_db_interface}\")\n",
-    "\n",
-    "# Run's creation time:\n",
-    "creation_time = calcat_creation_time(in_folder, run, creation_time)\n",
-    "print(f\"Creation time: {creation_time}\")"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "run_dc = RunDirectory(Path(in_folder) / f\"r{run:04d}\", _use_voview=False)\n",
-    "ctrl_data = pnccdlib.PnccdCtrl(run_dc, karabo_id)\n",
-    "\n",
-    "# extract control data\n",
-    "if bias_voltage == 0.0:\n",
-    "    bias_voltage = ctrl_data.get_bias_voltage()\n",
-    "if gain == -1:\n",
-    "    gain = ctrl_data.get_gain()\n",
-    "if fix_temperature_top == 0:\n",
-    "    fix_temperature_top = ctrl_data.get_fix_temperature_top()\n",
-    "\n",
-    "# Printing the Parameters Read from the Data File:\n",
-    "display(Markdown(\"### Detector Parameters\"))\n",
-    "print(f\"Bias voltage: {bias_voltage:0.1f} V.\")\n",
-    "print(f\"Detector gain: {int(gain)}.\")\n",
-    "print(f\"Detector integration time: {integration_time} ms\")\n",
-    "print(f\"Top pnCCD sensor temperature: {fix_temperature_top:0.2f} K\")"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "display(Markdown(\"### Constants retrieval\"))\n",
-    "\n",
-    "conditions_dict = {\n",
-    "    \"bias_voltage\": bias_voltage,\n",
-    "    \"integration_time\": integration_time,\n",
-    "    \"gain_setting\": gain,\n",
-    "    \"temperature\": fix_temperature_top,\n",
-    "    \"pixels_x\": 1024,\n",
-    "    \"pixels_y\": 1024,\n",
-    "}\n",
-    "# Dark condition\n",
-    "dark_condition = Conditions.Dark.CCD(**conditions_dict)\n",
-    "# Add photon energy.\n",
-    "conditions_dict.update({\"photon_energy\": photon_energy})\n",
-    "illum_condition = Conditions.Illuminated.CCD(**conditions_dict)\n",
-    "\n",
-    "mdata_dict = dict()\n",
-    "mdata_dict[\"constants\"] = dict()\n",
-    "for cname in [\"Offset\", \"Noise\", \"BadPixelsDark\", \"RelativeGain\"]:\n",
-    "    # No need for retrieving RelativeGain, if not used for correction.\n",
-    "    if not corr_bools.get(\"relgain\") and cname == \"RelativeGain\":\n",
-    "        continue\n",
-    "    _, mdata = get_from_db(\n",
-    "        karabo_id=karabo_id,\n",
-    "        karabo_da=karabo_da,\n",
-    "        constant=getattr(Constants.CCD(DetectorTypes.pnCCD), cname)(),\n",
-    "        condition=illum_condition if cname == \"RelativeGain\" else dark_condition,\n",
-    "        empty_constant=None,\n",
-    "        cal_db_interface=get_random_db_interface(cal_db_interface),\n",
-    "        creation_time=creation_time,\n",
-    "        verbosity=1,\n",
-    "        load_data=False,\n",
-    "    )\n",
-    "    save_constant_metadata(mdata_dict[\"constants\"], mdata, cname)\n",
-    "\n",
-    "mdata_dict[\"physical-detector-unit\"] = mdata.calibration_constant_version.device_name\n",
-    "retrieved_constants[karabo_da] = mdata_dict\n",
-    "metadata.save()\n",
-    "print(f\"Stored retrieved constants in {metadata.filename}\")"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3.8.11 ('.cal4_venv')",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.8.11"
-  },
-  "orig_nbformat": 4,
-  "vscode": {
-   "interpreter": {
-    "hash": "ccde353e8822f411c1c49844e1cbe3edf63293a69efd975d1b44f5e852832668"
-   }
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 2
-}
diff --git a/reportservice/README.md b/reportservice/README.md
index ccf7f91bab188ec10bffa1638a4fa61b9238aa4c..b0a3d83d211e857e029facaa1fcbbd9d5ae1d732 100644
--- a/reportservice/README.md
+++ b/reportservice/README.md
@@ -34,7 +34,7 @@ The available command line arguments are:
 
 Modes:
 
-*prod* is the production mode working on the max-exfl016 as xcal user for generating the DC report through RTD
+*prod* is the production mode working on the max-exfl-cal001 as xcal user for generating the DC report through RTD
 and it should generate a very generalized DC report for the available detectors with information useful for most of the detector experts and users.
 
 *local* is the mode used for generating figures locally without uploading the DC report on RTD or pushing figures
diff --git a/reportservice/report_conf.yaml b/reportservice/report_conf.yaml
index aa6a59202e7c7bde890d1bdb816e56fdffe7ad79..9c517865c2232c68b3a8dbfb7d0ba3a760637dad 100644
--- a/reportservice/report_conf.yaml
+++ b/reportservice/report_conf.yaml
@@ -2,7 +2,7 @@ GLOBAL:
     git:
         repo-local: "/gpfs/exfel/data/scratch/xcal/calibration/DetectorCharacterization/"
         figures-remote: "http://git@git.xfel.eu/gitlab/detectors/DetectorCharacterization.git"
-    server-port: "tcp://max-exfl016:5566"
+    server-port: "tcp://max-exfl-cal001:5566"
 
     run-on:
         - Monday 08:30:00 UTC
@@ -69,7 +69,7 @@ SPB:
         use-existing: "''"
         out-folder: "/gpfs/exfel/data/scratch/xcal/report_service/tmp/{instrument}/{detector}/"
         cal-db-timeout: 180000
-        cal-db-interface: "tcp://max-exfl016:8015#8025"
+        cal-db-interface: "tcp://max-exfl-cal001:8015#8025"
 
     JUNGFRAU:
         det-type:
@@ -138,7 +138,7 @@ SPB:
         use-existing: "''"
         out-folder: "/gpfs/exfel/data/scratch/xcal/report_service/tmp/{instrument}/{detector}/"
         cal-db-timeout: 180000
-        cal-db-interface: "tcp://max-exfl016:8015#8025"
+        cal-db-interface: "tcp://max-exfl-cal001:8015#8025"
 
 MID:
     AGIPD1M2:
@@ -194,7 +194,7 @@ MID:
         use-existing: "''"
         out-folder: "/gpfs/exfel/data/scratch/xcal/report_service/tmp/{instrument}/{detector}/"
         cal-db-timeout: 180000
-        cal-db-interface: "tcp://max-exfl016:8015#8025"
+        cal-db-interface: "tcp://max-exfl-cal001:8015#8025"
 
     EPIX:
         det-type:
@@ -236,7 +236,7 @@ MID:
         use-existing: "''"
         out-folder: "/gpfs/exfel/data/scratch/xcal/report_service/tmp/{instrument}/{detector}/"
         cal-db-timeout: 180000
-        cal-db-interface: "tcp://max-exfl016:8015#8025"
+        cal-db-interface: "tcp://max-exfl-cal001:8015#8025"
 
 
 FXE:
@@ -287,7 +287,7 @@ FXE:
         use-existing: "''"
         out-folder: "/gpfs/exfel/data/scratch/xcal/report_service/tmp/{instrument}/{detector}/"
         cal-db-timeout: 180000
-        cal-db-interface: "tcp://max-exfl016:8015#8025"
+        cal-db-interface: "tcp://max-exfl-cal001:8015#8025"
 
     JUNGFRAU:
         det-type:
@@ -353,7 +353,7 @@ FXE:
         use-existing: "''"
         out-folder: "/gpfs/exfel/data/scratch/xcal/report_service/tmp/{instrument}/{detector}/"
         cal-db-timeout: 180000
-        cal-db-interface: "tcp://max-exfl016:8015#8025"
+        cal-db-interface: "tcp://max-exfl-cal001:8015#8025"
 
 DETLAB:
     FASTCCD:
@@ -415,7 +415,7 @@ DETLAB:
         use-existing: "''"
         out-folder: "/gpfs/exfel/data/scratch/xcal/report_service/tmp/{instrument}/{detector}/"
         cal-db-timeout: 180000
-        cal-db-interface: "tcp://max-exfl016:8015#8025"
+        cal-db-interface: "tcp://max-exfl-cal001:8015#8025"
 
 
 SCS:
@@ -478,7 +478,7 @@ SCS:
         use-existing: "''"
         out-folder: "/gpfs/exfel/data/scratch/xcal/report_service/tmp/{instrument}/{detector}/"
         cal-db-timeout: 180000
-        cal-db-interface: "tcp://max-exfl016:8015#8025"
+        cal-db-interface: "tcp://max-exfl-cal001:8015#8025"
 
 SQS:
     PNCCD:
@@ -529,7 +529,7 @@ SQS:
         use-existing: "''"
         out-folder: "/gpfs/exfel/data/scratch/xcal/report_service/tmp/{instrument}/{detector}/"
         cal-db-timeout: 180000
-        cal-db-interface: "tcp://max-exfl016:8015#8025"
+        cal-db-interface: "tcp://max-exfl-cal001:8015#8025"
 
 HED:
     EPIX:
@@ -572,7 +572,7 @@ HED:
         use-existing: "''"
         out-folder: "/gpfs/exfel/data/scratch/xcal/report_service/tmp/{instrument}/{detector}/"
         cal-db-timeout: 180000
-        cal-db-interface: "tcp://max-exfl016:8015#8025"
+        cal-db-interface: "tcp://max-exfl-cal001:8015#8025"
 
     JUNGFRAU:
         det-type:
@@ -637,4 +637,4 @@ HED:
         use-existing: "''"
         out-folder: "/gpfs/exfel/data/scratch/xcal/report_service/tmp/{instrument}/{detector}/"
         cal-db-timeout: 180000
-        cal-db-interface: "tcp://max-exfl016:8015#8025"
+        cal-db-interface: "tcp://max-exfl-cal001:8015#8025"
diff --git a/setup.py b/setup.py
index 3a6d9f43cdf01bfa287f161648a09b82d6c73ea4..567962d615368aac963c7c6111ac23a0a6e7a610 100644
--- a/setup.py
+++ b/setup.py
@@ -63,8 +63,8 @@ install_requires = [
         "docutils==0.17.1",
         "dynaconf==3.1.4",
         "env_cache==0.1",
-        "extra_data==1.12.0",
-        "extra_geom==1.8.0",
+        "extra_data==1.15.1",
+        "extra_geom==1.10.0",
         "gitpython==3.1.0",
         "h5py==3.5.0",
         "iminuit==1.3.8",
@@ -101,14 +101,16 @@ install_requires = [
         "tabulate==0.8.6",
         "traitlets==4.3.3",
         "xarray==2022.3.0",
-        "EXtra-redu==0.0.7",
+        "EXtra-redu==0.0.8",
+        "rich==12.6.0",
+        "httpx==0.23.0",
 ]
 
 if "readthedocs.org" not in sys.executable:
     install_requires += [
-        "iCalibrationDB @ git+ssh://git@git.xfel.eu:10022/detectors/cal_db_interactive.git@2.4.0",  # noqa
+        "iCalibrationDB @ git+ssh://git@git.xfel.eu:10022/detectors/cal_db_interactive.git@2.4.1",  # noqa
         "XFELDetectorAnalysis @ git+ssh://git@git.xfel.eu:10022/karaboDevices/pyDetLib.git@2.7.0",  # noqa
-        "CalParrot @ git+ssh://git@git.xfel.eu:10022/calibration/calparrot.git@0.1",  # noqa
+        "CalParrot @ git+ssh://git@git.xfel.eu:10022/calibration/calparrot.git@0.3",  # noqa
     ]
 
 setup(
@@ -159,6 +161,16 @@ setup(
             "testpath",
             "unittest-xml-reporting==3.0.2",
         ],
+        "automated_test": [
+            "coverage",
+            "nbval",
+            "pytest-asyncio",
+            "pytest-cov",
+            "pytest-subprocess",
+            "pytest>=5.4.0",
+            "testpath",
+            "unittest-xml-reporting==3.0.2",
+        ],
         "dev": [
             "nbqa[toolchain]",
             "pre-commit",
diff --git a/src/cal_tools/agipdlib.py b/src/cal_tools/agipdlib.py
index 88c3413cc623efda893852293865abc2b90db701..1b87470d3e7222a9d0dfcd5fff8ff428dc8940fc 100644
--- a/src/cal_tools/agipdlib.py
+++ b/src/cal_tools/agipdlib.py
@@ -1,64 +1,65 @@
 import os
 import posixpath
 import zlib
+from dataclasses import dataclass, field
 from datetime import datetime
+from logging import warning
 from multiprocessing import Manager
 from multiprocessing.pool import ThreadPool
-from typing import Any, Dict, List, Optional, Tuple
+from typing import List, Optional
 
 import h5py
 import numpy as np
 import sharedmem
 from dateutil import parser
-from extra_data import DataCollection, H5File, by_id, components
-from iCalibrationDB import Conditions, Constants
+from extra_data import DataCollection, H5File, RunDirectory, by_id
 
 from cal_tools import agipdalgs as calgs
 from cal_tools.agipdutils import (
-    assemble_constant_dict,
     baseline_correct_via_noise,
     baseline_correct_via_stripe,
+    cast_array_inplace,
     correct_baseline_via_hist,
     correct_baseline_via_hist_asic,
     make_noisy_adc_mask,
     match_asic_borders,
     melt_snowy_pixels,
-    cast_array_inplace
 )
 from cal_tools.enums import AgipdGainMode, BadPixels, SnowResolution
 from cal_tools.h5_copy_except import h5_copy_except_paths
-from cal_tools.tools import get_from_db
 
 
+@dataclass
 class AgipdCtrl:
-    def __init__(
-        self,
-        run_dc: DataCollection,
-        image_src: str,
-        ctrl_src: str,
-        raise_error: bool = True,
-    ):
-        """ Initialize AgipdCondition class to read
-        all required AGIPD parameters.
-
-        :param run_dc: Run data collection with expected sources
-        to read needed parameters.
-        :param image_src: H5 source for image data.
-        :param ctrl_src: H5 source for control (slow) data.
-        :param raise_error: Boolean to raise errors for missing
-        sources and keys.
-        """
-        self.run_dc = run_dc
-        self.image_src = image_src
-        self.ctrl_src = ctrl_src
-        self.raise_error = raise_error
-
-    def get_num_cells(self) -> Optional[int]:
-        """Read number of memory cells from fast data.
+    """Access AGIPD control parameters from a single run.
+
+    Args:
+        run_dc (DataCollection): Run data collection with expected sources
+            to read needed parameters.
+        image_src (str): H5 source for image data.
+        ctrl_src (str): H5 source for control (slow) data.
+        raise_error (bool): Boolean to raise errors for missing
+            sources and keys.
+        run: (int, optional): Run number.
+    """
+    run_dc: DataCollection
+    image_src: str
+    ctrl_src: str
+    raise_error: bool = False
+
+    def _get_num_cells_ctrl(self) -> Optional[int]:
+        """Get number of cells from CONTROL source."""
+        # Attempt to look for number of cells in slow data
+        ncell_src = (
+            self.ctrl_src, "bunchStructure.nPulses.value")
+        if (
+            ncell_src[0] in self.run_dc.all_sources and
+            ncell_src[1] in self.run_dc.keys_for_source(ncell_src[0])
+        ):
+            return int(self.run_dc[ncell_src].as_single_value(reduce_by='max'))
 
-        :return mem_cells: Number of memory cells
-        return None, if no data available.
-        """
+    def _get_num_cells_instr(self) -> Optional[int]:
+        """Get number of cells from INSTRUMENT source."""
         cells = np.squeeze(
             self.run_dc[
                 self.image_src, "image.cellId"].drop_empty_trains().ndarray()
@@ -70,6 +71,20 @@ class AgipdCtrl:
         dists = [abs(o - maxcell) for o in options]
         return options[np.argmin(dists)]
 
+    def get_num_cells(self) -> Optional[int]:
+        """Read number of memory cells from fast data.
+
+        :return mem_cells: Number of memory cells
+        return None, if no data available.
+        """
+        ncell = self._get_num_cells_ctrl()
+        if ncell is not None:
+            return ncell
+        # The method implemented in this function doesn't suit for filtered
+        # data. If DAQ filters data and the last cell is removed, the
+        # function returns wrong value
+        return self._get_num_cells_instr()
+
     def _get_acq_rate_ctrl(self) -> Optional[float]:
         """Get acquisition (repetition) rate from CONTROL source."""
         # Attempt to look for acquisition rate in slow data
@@ -83,9 +98,6 @@ class AgipdCtrl:
             # about bucketing the rate for managing meta-data.
             return round(float(self.run_dc[rep_rate_src].as_single_value()), 1)
 
-    def _get_acq_rate_instr(self) -> Optional[float]:
-        """Get acquisition (repetition rate) from INSTRUMENT source."""
-
     def _get_acq_rate_instr(self) -> Optional[float]:
         """Get acquisition (repetition rate) from INSTRUMENT source."""
 
@@ -281,6 +293,171 @@ class AgipdCtrl:
         return 12
 
 
+@dataclass
+class AgipdCtrlRuns:
+    """Get AGIPD control parameters across several runs,
+    e.g. 3 runs for darks.
+
+    Args:
+        raw_folder (str): The RAW folder path.
+        runs (list): The list of runs to read the operating conditions.
+        image_src (str): H5 source for image data.
+        ctrl_src (str): H5 source for control (slow) data.
+    """
+    raw_folder: str
+    runs: List[int]
+    image_src: str
+    ctrl_src: str
+    sort_dark_runs_enabled: bool = False
+
+    adaptive_gain_modes = [AgipdGainMode.ADAPTIVE_GAIN] * 3
+    fixed_gain_modes = [
+        AgipdGainMode.FIXED_HIGH_GAIN,
+        AgipdGainMode.FIXED_MEDIUM_GAIN,
+        AgipdGainMode.FIXED_LOW_GAIN,
+    ]
+
+    def __post_init__(self):
+        # validate that all runs belong to the same
+        self.run_ctrls = [
+            AgipdCtrl(
+                run_dc=RunDirectory(f"{self.raw_folder}/r{r:04d}"),
+                image_src=self.image_src,
+                ctrl_src=self.ctrl_src,
+                ) for r in self.runs]
+        self.gain_modes = self.get_gain_modes()
+        if self.sort_dark_runs_enabled:
+            self.sort_dark_runs()
+
+    def _validate_same_value(self, name, values):
+            if len(set(values)) != 1:
+                # Should we raise an error and stop processing?
+                warning(
+                    f"{name} is not the same for all runs {self.runs}"
+                    f" with values of {values}, respectively.")
+
+    def sort_dark_runs(self):
+        """Order dark runs based on run patterns for Adaptive mode
+        or gain modes for Fixed mode.
+        """
+        assert len(self.runs) == 3, f"AGIPD dark runs are expected to be 3. {len(self.runs)} runs are given."  # noqa
+        # Expected patterns:
+        # XRay: 0, DarkHG: 1, DarkMG: 2, DarkLG: 3, PC: 4 and CS: 5.
+        sort_by = None
+        sort_values = []
+        if self.gain_modes == self.adaptive_gain_modes:  # Adaptive gain # sort by patterns
+            # Patterns -> DarkHG: 1, DarkMG: 2, DarkLG: 3
+            if "AGIPD1M" in self.ctrl_src:
+                sort_by = "patternTypeIndex"
+            elif "AGIPD500K" in self.ctrl_src:
+                sort_by = "expTypeIndex"
+
+            for c in self.run_ctrls:
+                sort_values.append(
+                    c.run_dc[self.ctrl_src, sort_by].as_single_value())
+
+        # Check if a mix of adaptive and fixed gain runs.
+        elif any(gm == AgipdGainMode.ADAPTIVE_GAIN for gm in self.gain_modes):
+            raise ValueError(
+                f"Given runs {self.runs} have a mix of ADAPTIVE and "
+                f"FIXED gain modes: {self.gain_modes}.")
+        else:  # Fixed gain: Patterns is X-Ray: 0 for all runs.
+            sort_by = "gainModeIndex"
+            sort_values = [int(gm) for gm in self.gain_modes]
+
+        zipped_lists = zip(sort_values, self.runs, self.run_ctrls)
+
+        # Sort the lists based on the patterns
+        sorted_zipped_lists = sorted(zipped_lists, key=lambda item: item[0])
+        _, sorted_runs, sorted_run_ctrls = zip(*sorted_zipped_lists)
+        if sorted_runs != self.runs:
+            Warning("Given dark runs are unsorted. Runs will be sorted from"
+                    f" {self.runs} with {sort_by}:"
+                    f" {sort_values} to {sorted_runs}.")
+            # Update run_ctrls and runs order
+            self.runs = list(sorted_runs)
+            self.run_ctrls = list(sorted_run_ctrls)
+            self.gain_modes = self.get_gain_modes()
+
+    def fixed_gain_mode(self):
+        """Check if runs are in fixed gain mode.
+
+        Raises:
+            ValueError: Unexpected gain modes for the dark runs
+
+        Returns:
+            bool: runs are in fixed gain mode.
+        """
+        if self.gain_modes == self.adaptive_gain_modes:
+            return False
+        elif self.gain_modes == self.fixed_gain_modes:
+            return True
+        else:
+            raise ValueError(f"Unexpected runs' gain modes: {self.gain_modes}")
+
+    def get_gain_modes(self):
+        """Get runs' gain modes.
+        Returns:
+            list: `AgipdGainMode`s
+        """
+        return [c.get_gain_mode() for c in self.run_ctrls]
+
+    def get_integration_time(self):
+        """
+        Returns:
+            float: Integration time
+        """
+        integration_times = [c.get_integration_time() for c in self.run_ctrls]
+        self._validate_same_value("Integration Time", integration_times)
+        return integration_times[0]
+
+    def get_bias_voltage(self, karabo_id_control: str = None):
+        """
+        Args:
+            karabo_id_control (str):
+                Karabo ID for control device.
+
+        Returns:
+            int: Bias voltage.
+        """
+        bias_voltages = [
+            c.get_bias_voltage(karabo_id_control) for c in self.run_ctrls]
+        self._validate_same_value("Bias Voltage", bias_voltages)
+        return bias_voltages[0]
+
+    def get_memory_cells(self):
+        """
+        Returns:
+            int: number of memory cells.
+        """
+        memory_cells = [c.get_num_cells() for c in self.run_ctrls]
+        self._validate_same_value("Memory cells", memory_cells)
+        return memory_cells[0]
+
+    def get_gain_setting(self, creation_time: Optional[datetime] = None):
+        """
+        Args:
+            creation_time (Optional[datetime], optional):
+                Creation time for the runs.
+
+        Returns:
+            float: Gain Setting
+        """
+        gain_settings = [
+            c.get_gain_setting(creation_time) for c in self.run_ctrls]
+        self._validate_same_value("Gain Setting", gain_settings)
+        return gain_settings[0]
+
+    def get_acq_rate(self):
+        """
+        Returns:
+            float: Acquisition rate
+        """
+        acquisition_rates = [c.get_acq_rate() for c in self.run_ctrls]
+        self._validate_same_value("acquisition_rate", acquisition_rates)
+        return acquisition_rates[0]
+
+
 class CellSelection:
     """Selection of detector memory cells (abstract class)"""
     row_size = 32
@@ -298,17 +475,21 @@ class CellSelection:
         raise NotImplementedError
 
     def get_cells_on_trains(
-        self, train_sel: np.ndarray, nfrm: np.ndarray, cm: int = 0
+        self, train_sel: np.ndarray, nfrm: np.ndarray,
+        cellid: np.ndarray, cm: int = 0
     ) -> np.array:
         """Returns mask of cells selected for processing
 
         :param train_sel: list of a train ids selected for processing
         :param nfrm: the number of frames expected for every train in
             the list `train_sel`
+        :param cellid: array of cell IDs in the same sequence as images to
+            filter
         :param cm: flag indicates the final selection or interim selection
             for common-mode correction
-
-        :return: boolean array with flags indicating images for processing
+        :returns:
+            - boolean array with flags indicating images for processing
+            - integer array with number of selected frames in trains
         """
         raise NotImplementedError
 
@@ -319,17 +500,6 @@ class CellSelection:
         """
         raise NotImplementedError
 
-    @staticmethod
-    def _sel_for_cm(flag, flag_cm, cm):
-        if cm == CellSelection.CM_NONE:
-            return flag
-        elif cm == CellSelection.CM_PRESEL:
-            return flag_cm
-        elif cm == CellSelection.CM_FINSEL:
-            return flag[flag_cm]
-        else:
-            raise ValueError("param 'cm' takes only 0,1,2")
-
 
 class AgipdCorrections:
 
@@ -416,6 +586,7 @@ class AgipdCorrections:
         self.noisy_adc_threshold = 0.25
         self.ff_gain = 1
         self.photon_energy = 9.2
+        self.rounding_threshold = 0.5
 
         # Output parameters
         self.compress_fields = ['gain', 'mask']
@@ -511,48 +682,45 @@ class AgipdCorrections:
         valid_train_ids = im_dc.train_ids
         # Get a count of images in each train
         nimg_in_trains = im_dc[agipd_base, "image.trainId"].data_counts(False)
-        nimg_in_trains = nimg_in_trains.astype(int)
+        nimg_in_trains = nimg_in_trains.astype(np.int64)
 
         # store valid trains in shared memory
         n_valid_trains = len(valid_train_ids)
         data_dict["n_valid_trains"][0] = n_valid_trains
         data_dict["valid_trains"][:n_valid_trains] = valid_train_ids
-        data_dict["nimg_in_trains"][:n_valid_trains] = nimg_in_trains
-
-        if "AGIPD500K" in agipd_base:
-            agipd_comp = components.AGIPD500K(im_dc)
-        else:
-            agipd_comp = components.AGIPD1M(im_dc)
-
-        kw = {
-            "unstack_pulses": False,
-        }
 
         # get selection for the images in this file
         cm = (self.cell_sel.CM_NONE if apply_sel_pulses
               else self.cell_sel.CM_PRESEL)
 
-        img_selected = self.cell_sel.get_cells_on_trains(
-            np.array(valid_train_ids), nimg_in_trains, cm=cm)
+        agipd_src = im_dc[agipd_base]
+
+        cellid = agipd_src["image.cellId"].ndarray()[:, 0]
+
+        img_selected, nimg_in_trains = self.cell_sel.get_cells_on_trains(
+            np.array(valid_train_ids), nimg_in_trains, cellid, cm=cm)
 
-        frm_ix = np.flatnonzero(img_selected)
+        data_dict["nimg_in_trains"][:n_valid_trains] = nimg_in_trains
         data_dict["cm_presel"][0] = (cm == self.cell_sel.CM_PRESEL)
-        n_img = len(frm_ix)
+
+        n_img = img_selected.sum()
+        if img_selected.all():
+            # All frames selected - use slice to skip unnecessary copy
+            frm_ix = np.s_[:]
+        else:
+            frm_ix = np.flatnonzero(img_selected)
 
         # read raw data
-        # [n_modules, n_imgs, 2, x, y]
-        raw_data = agipd_comp.get_array("image.data", **kw)[0]
+        # [n_imgs, 2, x, y]
+        raw_data = agipd_src['image.data'].ndarray()
 
         # store in shmem only selected images
         data_dict['nImg'][0] = n_img
         data_dict['data'][:n_img] = raw_data[frm_ix, 0]
         data_dict['rawgain'][:n_img] = raw_data[frm_ix, 1]
-        data_dict['cellId'][:n_img] = agipd_comp.get_array(
-            "image.cellId", **kw)[0, frm_ix]
-        data_dict['pulseId'][:n_img] = agipd_comp.get_array(
-            "image.pulseId", **kw)[0, frm_ix]
-        data_dict['trainId'][:n_img] = agipd_comp.get_array(
-            "image.trainId", **kw)[0, frm_ix]
+        data_dict['cellId'][:n_img] = cellid[frm_ix]
+        data_dict['pulseId'][:n_img] = agipd_src['image.pulseId'].ndarray()[frm_ix, 0]
+        data_dict['trainId'][:n_img] = agipd_src['image.trainId'].ndarray()[frm_ix, 0]
 
         return n_img
 
@@ -919,13 +1087,19 @@ class AgipdCorrections:
             data_hist_preround, _ = np.histogram(data, bins=self.hist_bins_preround)
 
             data /= self.photon_energy
-            np.round(data, out=data)
 
-            # This could also be done before and its mask inverted for
-            # rounding, but the performance difference is negligible.
-            bidx = data < 0
-            data[bidx] = 0
+            # keep the noise peak symmetrical so that
+            # the expected value of zero remains unshifted
+            bidx = data < -self.rounding_threshold
             msk[bidx] |= BadPixels.VALUE_OUT_OF_RANGE
+
+            np.subtract(data, self.rounding_threshold - 0.5, out=data, where=~bidx)
+            np.round(data, out=data)
+
+            # the interval of the noise peak may be greater than one,
+            # which is why some of the noise values may be negative after rounding,
+            # but should be not masked
+            data[data < 0.0] = 0.0
             del bidx
 
             data_hist_postround, _ = np.histogram(data * self.photon_energy,
@@ -1004,11 +1178,13 @@ class AgipdCorrections:
         ntrains = data_dict["n_valid_trains"][0]
         train_ids = data_dict["valid_trains"][:ntrains]
         nimg_in_trains = data_dict["nimg_in_trains"][:ntrains]
+        cellid = data_dict["cellId"][:n_img]
 
         # Initializing can_calibrate array
-        can_calibrate = self.cell_sel.get_cells_on_trains(
-            train_ids, nimg_in_trains, cm=self.cell_sel.CM_FINSEL
+        can_calibrate, nimg_in_trains = self.cell_sel.get_cells_on_trains(
+            train_ids, nimg_in_trains, cellid, cm=self.cell_sel.CM_FINSEL
         )
+        data_dict["nimg_in_trains"][:ntrains] = nimg_in_trains
         if np.all(can_calibrate):
             return n_img
 
@@ -1112,7 +1288,7 @@ class AgipdCorrections:
                                    fletcher32=True)
 
     def init_constants(
-        self, cons_data: dict, when: dict, module_idx: int, variant: dict):
+        self, cons_data: dict, module_idx: int, variant: dict):
         """
         For CI derived gain, a mean multiplication factor of 4.48 compared
         to medium gain is used, as no reliable CI data for all memory cells
@@ -1151,8 +1327,6 @@ class AgipdCorrections:
             rel_low gain = _rel_medium gain * 4.48
 
         :param cons_data: A dictionary for each retrieved constant value.
-        :param when: A dictionary for the creation time
-                     of each retrieved constant.
         :param module_idx: A module_idx index
         :param variant: A dictionary for the variant of each retrieved CCV.
         :return:
@@ -1162,11 +1336,17 @@ class AgipdCorrections:
         # assuming this method runs in parallel.
         calgs_opts = dict(num_threads=os.cpu_count() // len(self.offset))
 
-        calgs.transpose_constant(self.offset[module_idx], cons_data['Offset'], **calgs_opts)
-        calgs.transpose_constant(self.noise[module_idx], cons_data['Noise'], **calgs_opts)
+        calgs.transpose_constant(
+            self.offset[module_idx], cons_data["Offset"], **calgs_opts)
+
+        # In case noise wasn't retrieved no need for transposing.
+        if "Noise" in cons_data:
+            calgs.transpose_constant(
+                self.noise[module_idx], cons_data["Noise"], **calgs_opts)
+
         if self.gain_mode is AgipdGainMode.ADAPTIVE_GAIN:
             calgs.transpose_constant(self.thresholds[module_idx],
-                                     cons_data['ThresholdsDark'][..., :3],
+                                     cons_data["ThresholdsDark"][..., :3],
                                      **calgs_opts)
 
         if self.corr_bools.get("low_medium_gap"):
@@ -1177,12 +1357,12 @@ class AgipdCorrections:
         bpixels = cons_data["BadPixelsDark"].astype(np.uint32)
 
         if self.corr_bools.get("xray_corr"):
-            if when["BadPixelsFF"]:
+            if "BadPixelsFF" in cons_data:
                 bpixels |= cons_data["BadPixelsFF"].astype(np.uint32)[...,
                                                                       :bpixels.shape[2],  # noqa
                                                                       None]
 
-            if when["SlopesFF"]:  # Checking if constant was retrieved
+            if "SlopesFF" in cons_data:  # Checking if constant was retrieved
 
                 slopesFF = cons_data["SlopesFF"]
                 # This could be used for backward compatibility
@@ -1222,7 +1402,7 @@ class AgipdCorrections:
 
         # add additional bad pixel information
         if any(self.pc_bools):
-            if when["BadPixelsPC"]:
+            if "BadPixelsPC" in cons_data:
                 bppc = np.moveaxis(cons_data["BadPixelsPC"].astype(np.uint32),
                                    0, 2)
                 bpixels |= bppc[..., :bpixels.shape[2], None]
@@ -1230,7 +1410,7 @@ class AgipdCorrections:
             # calculate relative gain from the constants
             rel_gain = np.ones((128, 512, self.max_cells, 3), np.float32)
 
-            if when["SlopesPC"]:
+            if "SlopesPC" in cons_data:
                 slopesPC = cons_data["SlopesPC"].astype(np.float32, copy=False)
 
                 # This will handle some historical data in a different format
@@ -1324,149 +1504,6 @@ class AgipdCorrections:
 
         return
 
-    def initialize_from_yaml(
-        self, karabo_da: str, const_yaml: Dict[str, Any], module_idx: int
-    ) -> Dict[str, Any]:
-        """Initialize calibration constants from a yaml file
-
-        :param karabo_da: a karabo data aggregator
-        :param const_yaml: from the "retrieved-constants" part of a yaml file
-        from pre-notebook, which consists of metadata of either the constant
-        file path or the empty constant shape, and the creation-time of the
-        retrieved constants
-        :param module_idx: Index of module
-        :return when: dict of retrieved constants with their creation-time
-        """
-
-        # string of the device name.
-        cons_data = dict()
-        when = dict()
-        variant = dict()
-        db_module = const_yaml[karabo_da]["physical-detector-unit"]
-        for cname, mdata in const_yaml[karabo_da]["constants"].items():
-            base_key = f"{db_module}/{cname}/0"
-            when[cname] = mdata["creation-time"]
-            if when[cname]:
-                with h5py.File(mdata["file-path"], "r") as cf:
-                    cons_data[cname] = np.copy(cf[f"{base_key}/data"])
-                    # Set variant to 0 if the attribute is missing
-                    # as for old constants.
-                    if "variant" in cf[base_key].attrs.keys():
-                        variant[cname] = cf[base_key].attrs["variant"]
-                    else:
-                        variant[cname] = 0
-            else:
-                # Create empty constant using the list elements
-                cons_data[cname] = getattr(np, mdata["file-path"][0])(mdata["file-path"][1])  # noqa
-
-        self.init_constants(cons_data, when, module_idx, variant)
-
-        return when
-
-    def initialize_from_db(self, karabo_id: str, karabo_da: str,
-                           cal_db_interface: str,
-                           creation_time: datetime,
-                           memory_cells: float, bias_voltage: int,
-                           photon_energy: float, gain_setting: float,
-                           acquisition_rate: float, integration_time: int,
-                           module_idx: int, only_dark: bool = False):
-        """ Initialize calibration constants from the calibration database
-
-        :param karabo_id: karabo identifier
-        :param karabo_da: karabo data aggregator
-        :param cal_db_interface: database interaface port
-        :param creation_time: time for desired calibration constant version
-        :param memory_cells: number of memory cells used for CCV conditions
-        :param bias_voltage: bias voltage used for CCV conditions
-        :param photon_energy: photon energy used for CCV conditions
-        :param gain_setting: gain setting used for CCV conditions
-        :param acquisition_rate: acquistion rate used for CCV conditions
-        :param integration_time: integration time used for CCV conditions
-        :param module_idx: module index to save retrieved CCV in sharedmem
-        :param only_dark: load only dark image derived constants. This
-            implies that a `calfile` is used to load the remaining
-            constants. Useful to reduce DB traffic and interactions
-            for non-frequently changing constants, i.e. such which are
-            not usually updated during a beamtime.
-
-        The `cal_db_interface` parameter in the `dbparms` tuple may be in
-        one of the following notations:
-            * tcp://host:port to directly identify the host and port to
-              connect to
-            * tcp://host:port_low#port_high to specify a port range from
-              which a random port will be picked. E.g. specifying
-
-              tcp://max-exfl016:8015#8025
-
-              will randomly pick an address in the range max-exfl016:8015 and
-              max-exfl016:8025.
-
-        The latter notation allows for load-balancing.
-
-        This routine loads the following constants as given in
-        `iCalibrationDB`:
-
-            Dark Image Derived
-            ------------------
-
-            * Constants.AGIPD.Offset
-            * Constants.AGIPD.Noise
-            * Constants.AGIPD.BadPixelsDark
-            * Constants.AGIPD.ThresholdsDark
-
-            Pulse Capacitor Derived
-            -----------------------
-
-            * Constants.AGIPD.SlopesPC
-
-            Flat-Field Derived
-
-            * Constants.AGIPD.SlopesFF
-
-        """
-
-        const_dict = assemble_constant_dict(
-            self.corr_bools,
-            self.pc_bools,
-            memory_cells,
-            bias_voltage,
-            gain_setting,
-            acquisition_rate,
-            photon_energy,
-            beam_energy=None,
-            only_dark=only_dark,
-            integration_time=integration_time
-        )
-
-        when = {}
-        cons_data = {}
-        variant = {}
-
-        for cname, cval in const_dict.items():
-            condition = getattr(
-                Conditions, cval[2][0]).AGIPD(**cval[2][1])
-            cdata, md = get_from_db(
-                karabo_id=karabo_id,
-                karabo_da=karabo_da,
-                constant=getattr(Constants.AGIPD, cname)(),
-                condition=condition,
-                empty_constant=getattr(np, cval[0])(cval[1]),
-                cal_db_interface=cal_db_interface,
-                creation_time=creation_time,
-                verbosity=0,
-            )
-            cons_data[cname] = cdata
-            variant[cname] = md.calibration_constant_version.variant
-
-            when[cname] = None
-            # Read the CCV begin at if constant was retrieved successfully.
-            if md and md.comm_db_success:
-                when[cname] = md.calibration_constant_version.begin_at
-
-        self.init_constants(cons_data, when, module_idx, variant)
-
-        return when
-
     def allocate_constants(self, modules, constant_shape):
         """
         Allocate memory for correction constants
@@ -1606,6 +1643,7 @@ class CellRange(CellSelection):
         self.flag_cm[:self.max_cells] = self.flag
         self.flag_cm = (self.flag_cm.reshape(-1, self.row_size).any(1)
                         .repeat(self.row_size)[:self.max_cells])
+        self.sel_type = [self.flag, self.flag_cm, self.flag]
 
     def msg(self):
         return (
@@ -1615,10 +1653,24 @@ class CellRange(CellSelection):
         )
 
     def get_cells_on_trains(
-        self, train_sel: np.ndarray, nfrm: np.ndarray, cm: int = 0
+        self, train_sel: np.ndarray, nfrm: np.ndarray,
+        cellid: np.ndarray, cm: int = 0
     ) -> np.array:
-        return np.tile(self._sel_for_cm(self.flag, self.flag_cm, cm),
-                       len(train_sel))
+        if cm < 0 or cm > 2:
+            raise ValueError("param 'cm' takes only 0,1,2")
+
+        flag = self.sel_type[cm]
+        sel = np.zeros(np.sum(nfrm), bool)
+        counts = np.zeros(len(nfrm), int)
+        i0 = 0
+        for i, nfrm_i in enumerate(nfrm):
+            iN = i0 + nfrm_i
+            f = flag[cellid[i0:iN]]
+            sel[i0:iN] = f
+            counts[i] = np.sum(f)
+            i0 = iN
+
+        return sel, counts
 
     def filter_trains(self, train_sel: np.ndarray):
         return train_sel
@@ -1652,14 +1704,11 @@ class LitFrameSelection(CellSelection):
         self.use_super_selection = use_super_selection
 
         if use_super_selection == 'off':
-            self.cm_sel_type = SelType.ROW
-            self.final_sel_type = SelType.CELL
+            self.sel_type = [SelType.CELL, SelType.ROW, SelType.CELL]
         elif use_super_selection == 'cm':
-            self.cm_sel_type = SelType.SUPER_ROW
-            self.final_sel_type = SelType.CELL
+            self.sel_type = [SelType.CELL, SelType.SUPER_ROW, SelType.CELL]
         elif use_super_selection == 'final':
-            self.cm_sel_type = SelType.SUPER_ROW
-            self.final_sel_type = SelType.SUPER_CELL
+            self.sel_type = [SelType.SUPER_CELL, SelType.SUPER_ROW, SelType.SUPER_CELL]
         else:
             raise ValueError("param 'use_super_selection' takes only "
                              "'off', 'cm' or 'final'")
@@ -1695,12 +1744,16 @@ class LitFrameSelection(CellSelection):
         )
 
     def get_cells_on_trains(
-        self, train_sel: np.ndarray, nfrm: np.ndarray, cm: int = 0
+        self, train_sel: np.ndarray, nfrm: np.ndarray,
+        cellid: np.ndarray, cm: int = 0
     ) -> np.array:
+        if cm < 0 or cm > 2:
+            raise ValueError("param 'cm' takes only 0,1,2")
+
+        (sel, counts), = self._sel.litframes_on_trains(
+            train_sel, nfrm, cellid, [self.sel_type[cm]])
 
-        cell_flags, cm_flags = self._sel.litframes_on_trains(
-            train_sel, nfrm, [self.final_sel_type, self.cm_sel_type])
-        return self._sel_for_cm(cell_flags, cm_flags, cm)
+        return sel, counts
 
     def filter_trains(self, train_sel: np.ndarray):
         return self._sel.filter_trains(train_sel, drop_empty=True)
diff --git a/src/cal_tools/agipdutils.py b/src/cal_tools/agipdutils.py
index 6d859edbe2ab7dc7c573e858d41d196c21532664..a7ee52de1c110921dbc652325c6441afed3cc008 100644
--- a/src/cal_tools/agipdutils.py
+++ b/src/cal_tools/agipdutils.py
@@ -6,80 +6,7 @@ from scipy.signal import cwt, find_peaks_cwt, ricker
 from sklearn.mixture import GaussianMixture
 from sklearn.preprocessing import StandardScaler
 
-from cal_tools.enums import AgipdGainMode, BadPixels, SnowResolution
-
-
-def assemble_constant_dict(
-    corr_bools,
-    pc_bools,
-    memory_cells,
-    bias_voltage,
-    gain_setting,
-    acquisition_rate,
-    photon_energy,
-    beam_energy=None,
-    only_dark=False,
-    gain_mode=AgipdGainMode.ADAPTIVE_GAIN,
-    integration_time=None
-):
-    """
-    Assemble a dictionary with the iCalibrationDB constant names and
-    the operating conditions for retrieving the required constants
-    for correction.
-
-    :param corr_bools: (Dict) A dict of booleans for applying
-    specific corrections
-    :param pc_bools: (List) A list of booleans to enable SlopesPC retrieval
-    :param memory_cells: (Int) Number of memory cells
-    :param bias_voltage: (Int) Bias Voltage
-    :param gain_setting: (Float) Gain setting
-    :param acquisition_rate: (Float) Acquisition rate
-    :param photon_energy: (Float) Photon energy
-    :param integration_time: (Float) Integration time
-    :param beam_energy: (Float) Beam Energy
-    :param only_dark: (Bool) Indicating a retrieval for dark constants only from db
-    :param gain_mode: Operation mode of the detector (default to adaptive gain)
-    :return: const_dict: (Dict) An assembled dictionary that can be used
-    to retrieve the required constants
-    """
-
-    darkcond = [
-        "Dark",
-        {
-            "memory_cells": memory_cells,
-            "bias_voltage": bias_voltage,
-            "acquisition_rate": acquisition_rate,
-            "gain_setting": gain_setting,
-            "gain_mode": gain_mode,
-            "integration_time": integration_time,
-            "pixels_x": 512,
-            "pixels_y": 128,
-        },
-    ]
-    const_dict = {
-        "Offset": ["zeros", (128, 512, memory_cells, 3), darkcond],
-        "Noise": ["zeros", (128, 512, memory_cells, 3), darkcond],
-        "ThresholdsDark": ["ones", (128, 512, memory_cells, 5), darkcond],
-        "BadPixelsDark": ["zeros", (128, 512, memory_cells, 3), darkcond],
-    }
-
-    if not (corr_bools.get("only_offset") or only_dark):
-        if any(pc_bools):
-            const_dict["BadPixelsPC"] = ["zeros", (memory_cells, 128, 512), darkcond]
-            const_dict["SlopesPC"] = ["ones", (128, 512, memory_cells, 10), darkcond]
-
-        if corr_bools.get("xray_corr"):
-            # Add illuminated conditions
-            illumcond = [
-                "Illuminated",
-                {"beam_energy": beam_energy, "photon_energy": photon_energy},
-            ]
-            illumcond[1].update(darkcond[1])
-
-            const_dict["BadPixelsFF"] = ["zeros", (128, 512, memory_cells), illumcond]
-            const_dict["SlopesFF"] = ["ones", (128, 512, memory_cells, 2), illumcond]
-
-    return const_dict
+from cal_tools.enums import BadPixels, SnowResolution
 
 
 # contiguous_regions() by Joe Kington on Stackoverflow
diff --git a/src/cal_tools/calcat_interface.py b/src/cal_tools/calcat_interface.py
index a7c8cf158007f8a1e097ee928276dfd92bba22e4..db67afc900f82e1949dd2ef9ce37d76502a4eda4 100644
--- a/src/cal_tools/calcat_interface.py
+++ b/src/cal_tools/calcat_interface.py
@@ -1,9 +1,6 @@
 """Interfaces to calibration constant data."""
-import re
-import socket
 from datetime import date, datetime, time, timezone
 from functools import lru_cache
-from os import getenv
 from pathlib import Path
 from weakref import WeakKeyDictionary
 
@@ -125,7 +122,7 @@ class CalCatApi(metaclass=ClientWrapper):
 
         return {
             "parameters_conditions_attributes": [
-                {"parameter_id": self.parameter_id(k), "value": str(v)}
+                {"parameter_name": k, "value": str(v)}
                 for k, v in condition.items()
             ]
         }
@@ -179,7 +176,7 @@ class CalCatApi(metaclass=ClientWrapper):
                 for pdu in resp_pdus["data"]
             }
         else:
-            raise ValueError(f"{module_naming} is unknown!")
+            raise ValueError(f"{module_naming} is unknown!. Expected da, modno, or qm")
 
 
     @lru_cache()
@@ -367,6 +364,7 @@ class CalibrationData:
 
     calibrations = set()
     default_client = None
+    _default_caldb_root = ...
 
     def __init__(
         self,
@@ -375,6 +373,7 @@ class CalibrationData:
         client=None,
         event_at=None,
         module_naming="da",
+        caldb_root=None,
     ):
         """Initialize a new CalibrationData object.
 
@@ -397,6 +396,8 @@ class CalibrationData:
                     integers in karabo_da.
                 `qm`: QxMx naming convention is used. Virtual names for
                     AGIPD, DSSC, and LPD.
+            caldb_root (str or None): Path to the root directory for caldb
+                files, finds folder for production caldb by default.
             **condition_params: Operating condition parameters defined
                 on an instance level.
         """
@@ -406,6 +407,10 @@ class CalibrationData:
         self.event_at = event_at
         self.pdu_snapshot_at = event_at
         self.module_naming = module_naming
+        if caldb_root is None:
+            self.caldb_root = self._get_default_caldb_root()
+        else:
+            self.caldb_root = Path(caldb_root)
 
         if client is None:
 
@@ -486,29 +491,19 @@ class CalibrationData:
         )
         return CalibrationData.default_client
 
-    @property
-    def caldb_root(self):
-        """Root directory for calibration constant data.
-
-        Returns:
-            (Path or None) Location of caldb store or
-                None if not available.
-        """
-
-        if not hasattr(CalibrationData, "_caldb_root"):
-            if getenv("SASE"):
-                # ONC
-                CalibrationData._caldb_root = Path("/common/cal/caldb_store")
-            elif re.match(r"^max-(.+)\.desy\.de$", socket.getfqdn()):
-                # Maxwell
-                CalibrationData._caldb_root = Path(
-                    "/gpfs/exfel/d/cal/caldb_store"
-                )
+    @staticmethod
+    def _get_default_caldb_root():
+        if CalibrationData._default_caldb_root is ...:
+            onc_path = Path("/common/cal/caldb_store")
+            maxwell_path = Path("/gpfs/exfel/d/cal/caldb_store")
+            if onc_path.is_dir():
+                CalibrationData._default_caldb_root = onc_path
+            elif maxwell_path.is_dir():
+                CalibrationData._default_caldb_root = maxwell_path
             else:
-                # Probably unavailable
-                CalibrationData._caldb_root = None
+                CalibrationData._default_caldb_root = None
 
-        return CalibrationData._caldb_root
+        return CalibrationData._default_caldb_root
 
     @property
     def client(self):
@@ -524,6 +519,22 @@ class CalibrationData:
             self.detector["id"], self.pdu_snapshot_at, self.module_naming
         )
 
+    @property
+    def mod_to_pdu(self):
+        """Get the physical detector units and create a dictionary
+        mapping each module name to physical name (physical detector unit).
+
+        Returns:
+            DICT: mapping module to physical detector unit name.
+        """
+        return {
+            mod: pdu_md["physical_name"] for mod, pdu_md in self._api.physical_detector_units(  # noqa
+            self.detector["id"],
+            self.pdu_snapshot_at,
+            self.module_naming,
+            ).items()
+        }
+
     @property
     def condition(self):
         return self._build_condition(self.parameters)
@@ -681,6 +692,72 @@ class CalibrationData:
 
         return self.load_constants_from_metadata(metadata)
 
+    def display_markdown_retrieved_constants(
+        self,
+        metadata=None,
+        ccvs_url="https://in.xfel.eu/calibration/calibration_constant_versions/"  # noqa
+    ):
+        """
+        Display markdown table with reference links for the
+        retrieved constants. Tables are split into groups of a
+        maximum of 4 modules.
+
+        Args:
+            metadata (dict, optional): Metadata for calibration constants.
+                Defaults to None.
+            ccvs_url (str, optional): URL for calibration constant versions.
+                Defaults to
+                "https://in.xfel.eu/calibration/calibration_constant_versions/".
+        """
+        from IPython.display import Markdown, display
+        from tabulate import tabulate
+
+        if metadata is None:
+            metadata = self.metadata()
+
+        calibrations = set()
+        # Get all calibrations available in the metadata for all modules.
+        for c in list(metadata.values()):
+            calibrations |= c.keys()
+
+        cal_groups = [
+            list(calibrations)[x:x+4] for x in range(0, len(calibrations), 4)]
+
+        # Loop over groups of calibrations.
+        for cal_group in cal_groups:
+            table = [["Modules"] + cal_group]
+
+            # Loop over calibrations and modules to form the next rows.
+            for mod in metadata:
+                mod_consts = []
+
+                for cname in cal_group:
+                    c_mdata = metadata[mod].get(cname)
+                    # A calibration that is available in given metadata.
+                    if c_mdata is not None:
+                        # Have the creation time a reference
+                        # link to the CCV on CALCAT.
+                        c_time = datetime.fromisoformat(
+                            c_mdata["begin_validity_at"]).strftime(
+                                "%Y-%m-%d %H:%M")
+                        mod_consts.append(
+                            f"[{c_time}]({ccvs_url}/{c_mdata['ccv_id']})")
+                    else:
+                        # Constant is not available for this module.
+                        mod_consts.append("___")
+
+                table.append([mod] + mod_consts)
+
+            display(
+                Markdown(
+                    tabulate(
+                        table,
+                        tablefmt="pipe",
+                        headers="firstrow",
+                        )
+                    )
+                )
+
     def _build_condition(self, parameters):
         cond = dict()
 
@@ -755,9 +832,9 @@ class CalibrationData:
         try:
             creation_date = data.files[0].metadata()["creationDate"]
         except KeyError:
-            from warnings import warning
+            from warnings import warn
 
-            warning(
+            warn(
                 "Last file modification time used as creation date for old "
                 "DAQ file format may be unreliable"
             )
@@ -767,9 +844,9 @@ class CalibrationData:
             )
         else:
             if not data.is_single_run:
-                from warnings import warning
+                from warnings import warn
 
-                warning(
+                warn(
                     "Sample file used to determine creation date for multi "
                     "run data"
                 )
@@ -931,6 +1008,7 @@ class AGIPD_CalibrationData(SplitConditionCalibrationData):
         gain_setting=None,
         gain_mode=None,
         module_naming="da",
+        caldb_root=None,
         integration_time=12,
         source_energy=9.2,
         pixels_x=512,
@@ -942,6 +1020,7 @@ class AGIPD_CalibrationData(SplitConditionCalibrationData):
             client,
             event_at,
             module_naming,
+            caldb_root,
         )
 
         self.sensor_bias_voltage = sensor_bias_voltage
@@ -958,10 +1037,12 @@ class AGIPD_CalibrationData(SplitConditionCalibrationData):
         cond = super()._build_condition(parameters)
 
         # Fix-up some database quirks.
-        if int(cond.get("Gain mode", -1)) == 0:
-            del cond["Gain mode"]
+        if cond.get("Gain mode", None):
+            cond["Gain mode"] = 1
+        else:
+            cond.pop("Gain mode", None)
 
-        if int(cond.get("Integration time", -1)) == 12:
+        if cond.get("Integration time", None) == 12:
             del cond["Integration time"]
 
         return cond
@@ -988,8 +1069,18 @@ class LPD_CalibrationData(SplitConditionCalibrationData):
         "Pixels X",
         "Pixels Y",
         "Feedback capacitor",
+        "Memory cell order",
+    ]
+
+    illuminated_parameters = [
+        "Sensor Bias Voltage",
+        "Memory cells",
+        "Pixels X",
+        "Pixels Y",
+        "Feedback capacitor",
+        "Source Energy",
+        "category"
     ]
-    illuminated_parameters = dark_parameters + ["Source Energy", "category"]
 
     def __init__(
         self,
@@ -1000,11 +1091,13 @@ class LPD_CalibrationData(SplitConditionCalibrationData):
         pixels_x=256,
         pixels_y=256,
         source_energy=9.2,
+        memory_cell_order=None,
         category=1,
         modules=None,
         client=None,
         event_at=None,
         module_naming="da",
+        caldb_root=None,
     ):
         super().__init__(
             detector_name,
@@ -1012,6 +1105,7 @@ class LPD_CalibrationData(SplitConditionCalibrationData):
             client,
             event_at,
             module_naming,
+            caldb_root,
         )
 
         self.sensor_bias_voltage = sensor_bias_voltage
@@ -1019,6 +1113,7 @@ class LPD_CalibrationData(SplitConditionCalibrationData):
         self.pixels_x = pixels_x
         self.pixels_y = pixels_y
         self.feedback_capacitor = feedback_capacitor
+        self.memory_cell_order = memory_cell_order
         self.source_energy = source_energy
         self.category = category
 
@@ -1056,6 +1151,7 @@ class DSSC_CalibrationData(CalibrationData):
         client=None,
         event_at=None,
         module_naming="da",
+        caldb_root=None,
     ):
         super().__init__(
             detector_name,
@@ -1063,6 +1159,7 @@ class DSSC_CalibrationData(CalibrationData):
             client,
             event_at,
             module_naming,
+            caldb_root,
         )
 
         self.sensor_bias_voltage = sensor_bias_voltage
@@ -1093,6 +1190,7 @@ class JUNGFRAU_CalibrationData(CalibrationData):
         "Integration Time",
         "Sensor temperature",
         "Gain Setting",
+        "Gain mode",
     ]
 
     def __init__(
@@ -1110,6 +1208,7 @@ class JUNGFRAU_CalibrationData(CalibrationData):
         client=None,
         event_at=None,
         module_naming="da",
+        caldb_root=None,
     ):
         super().__init__(
             detector_name,
@@ -1117,6 +1216,7 @@ class JUNGFRAU_CalibrationData(CalibrationData):
             client,
             event_at,
             module_naming,
+            caldb_root,
         )
 
         self.sensor_bias_voltage = sensor_bias_voltage
@@ -1177,6 +1277,7 @@ class PNCCD_CalibrationData(SplitConditionCalibrationData):
         client=None,
         event_at=None,
         module_naming="da",
+        caldb_root=None,
     ):
         # Ignore modules for this detector.
         super().__init__(
@@ -1185,6 +1286,7 @@ class PNCCD_CalibrationData(SplitConditionCalibrationData):
             client,
             event_at,
             module_naming,
+            caldb_root,
         )
 
         self.sensor_bias_voltage = sensor_bias_voltage
@@ -1233,6 +1335,7 @@ class EPIX100_CalibrationData(SplitConditionCalibrationData):
         client=None,
         event_at=None,
         module_naming="da",
+        caldb_root=None,
     ):
         # Ignore modules for this detector.
         super().__init__(
@@ -1241,6 +1344,7 @@ class EPIX100_CalibrationData(SplitConditionCalibrationData):
             client,
             event_at,
             module_naming,
+            caldb_root,
         )
 
         self.sensor_bias_voltage = sensor_bias_voltage
@@ -1283,6 +1387,7 @@ class GOTTHARD2_CalibrationData(CalibrationData):
         client=None,
         event_at=None,
         module_naming="da",
+        caldb_root=None,
     ):
         # Ignore modules for this detector.
         super().__init__(
@@ -1291,6 +1396,7 @@ class GOTTHARD2_CalibrationData(CalibrationData):
             client,
             event_at,
             module_naming,
+            caldb_root,
         )
 
         self.sensor_bias_voltage = sensor_bias_voltage
diff --git a/src/cal_tools/dssclib.py b/src/cal_tools/dssclib.py
index 59ca874d2c896224418347577f2a991789fb00b4..a17ec3648524c7449e3e20cae44c4348965bbcc8 100644
--- a/src/cal_tools/dssclib.py
+++ b/src/cal_tools/dssclib.py
@@ -9,6 +9,16 @@ import h5py
 import numpy as np
 
 
+def get_num_cells(fname, h5path):
+
+    with h5py.File(fname, "r") as f:
+        cells = f[f"{h5path}/cellId"][()]
+        if cells == []:
+            return
+        maxcell = np.max(cells)
+        return maxcell+1
+
+
 def get_pulseid_checksum(fname, h5path, h5path_idx):
     """generates hash value from pulse pattern (veto defined)."""
     with h5py.File(fname, "r") as infile:
diff --git a/src/cal_tools/enums.py b/src/cal_tools/enums.py
index eeebf151d8582255a1f66ce77a3c58c303590a7b..94c8dccea76265290f317aecf4f88eb473b90612 100644
--- a/src/cal_tools/enums.py
+++ b/src/cal_tools/enums.py
@@ -48,7 +48,6 @@ class AgipdGainMode(IntEnum):
 
 class JungfrauSettings(Enum):
     """Jungfrau run gain settings."""
-    # old setting, new setting, new mode
     GAIN_0 = "gain0"
     HIGH_GAIN_0 = "highgain0"
 
diff --git a/src/cal_tools/epix100/epix100lib.py b/src/cal_tools/epix100/epix100lib.py
index c9ba41c9d533934d8ed712384b51b80a0719eab7..6b5e85c52d55843ea023ac8c055bbe2ea669a6af 100644
--- a/src/cal_tools/epix100/epix100lib.py
+++ b/src/cal_tools/epix100/epix100lib.py
@@ -28,15 +28,24 @@ class epix100Ctrl():
 
     def get_temprature(self) -> float:
         """Get temperature value from CONTROL.
-        Temprature is stored in Celsius/100 units.
-        Therefore, we are dividing by 100 and
-        there is an absolute tolerance of 100.
-        atol=100 is a 1 degree variation tolerance.
-
-        Returns:
-            Temperature: temperature in Celsius. 
+        atol is degree variation tolerance.
         """
-        # data.backTemp shape evolved from (n_trains,) to (n_trains, 1)
-        return self.run_dc[
-            self.instrument_src, 'data.backTemp'].as_single_value(
-                reduce_by='mean', atol=100).item() / 100
+        # old receiver device configuration
+        # temperature was stored in:
+        #   source: 'MID_EXP_EPIX-1/DET/RECEIVER:daqOutput'
+        #   key: 'data.backTemp'
+        if 'data.backTemp' in self.run_dc[self.instrument_src]:
+            # using `item()` because data.backTemp shape evolved from (n_trains,) to (n_trains, 1)
+            # atol = 100 because temperature was in C/100
+            return self.run_dc[
+                self.instrument_src, 'data.backTemp'].as_single_value(
+                reduce_by='mean', atol=100).item() / 100 
+        
+        # new (2023) receiver device configuration 
+        # temperature is stored in:
+        #   source: 'MID_EXP_EPIX-1/DET/RECEIVER'
+        #   key: 'slowdata.backTemp.value'
+        else:
+            return self.run_dc[
+                self.instrument_src.split(':daqOutput')[0], 'slowdata.backTemp.value'].as_single_value(
+                reduce_by='mean', atol=1)
diff --git a/src/cal_tools/jfstrixel.py b/src/cal_tools/jfstrixel.py
deleted file mode 100644
index 189b036f6cddf4aaa8ba802e908a999afb6cdfe3..0000000000000000000000000000000000000000
--- a/src/cal_tools/jfstrixel.py
+++ /dev/null
@@ -1,161 +0,0 @@
-
-import numpy as np
-
-
-REGULAR_SHAPE = (512, 1024)
-STRIXEL_SHAPE = (86, 3090)
-
-
-def _normal_indices():
-    """Build normal size pixel indices."""
-    
-    # Normal pixels
-    yin = np.arange(256)
-    xin = np.arange(1024)
-
-    Yin, Xin = np.meshgrid(yin, xin)
-    Yout, Xout = np.meshgrid(yin // 3, (xin // 256 * 774) + (xin % 256) * 3)
-    Xout += (yin % 3).astype(int)[None, :]
-    
-    return Yout, Xout, Yin, Xin
-
-
-def _gap_indices(in_gap_offset=0, out_gap_offset=0,
-                 xout_factor=+1, yout_offset=0):
-    """Build one half of double size gap pixel indices."""
-    
-    igap = np.arange(3)
-    yin = np.arange(256)
-
-    Yin, Xin = np.meshgrid(yin, igap * 256 + 255 + in_gap_offset)
-    Yout, Xout = np.meshgrid(yin // 6 * 2, igap * 774 + 765 + out_gap_offset)
-    Xout += xout_factor * (yin % 6).astype(int)[None, :]
-    Yout += yout_offset
-    
-    return Yout, Xout, Yin, Xin
-
-
-def transformation_indices2d():
-    """Build 2D strixel transformation index arrays."""
-    
-    # Each of this index sets contains four 2D index arrays
-    # Yout, Xout, Yin, Xin from different parts constituting the full
-    # strixel frame. They are each concatenated across these parts into
-    # four final index arrays to be used for translating between the
-    # regular frame and the strixel frame.
-    index_sets = [
-        _normal_indices(),
-        
-        # Left gap
-        _gap_indices(0, 0, +1, 0), _gap_indices(0, 0, +1, 1),
-        
-        # Right gap
-        _gap_indices(1, 11, -1, 0), _gap_indices(1, 11, -1, 1)
-    ]
-    
-    # Yout, Xout, Yin, Xin
-    # Casting to int64 improves indexing performance by up to 30%.
-    return [np.concatenate(index_set).astype(np.int64)
-            for index_set in zip(*index_sets)]
-
-
-def transformation_indices1d():
-    """Build 1D strixel transformation index arrays.
-    
-    Internally this function reduces the 2D index arrays to a single
-    dimension to operate on raveled data arrays. This improves the
-    transformation performance substantially by up to 3x.
-    """
-
-    Yout, Xout, Yin, Xin = transformation_indices2d()
-     
-    regular_pixel_idx = np.arange(np.prod(REGULAR_SHAPE), dtype=np.uint32) \
-        .reshape(REGULAR_SHAPE)
-    strixel_pixel_idx = np.empty(STRIXEL_SHAPE, dtype=np.int64)
-    strixel_pixel_idx.fill(-1)
-    strixel_pixel_idx[Yout, Xout] = regular_pixel_idx[Yin, Xin]
-
-    Iout = np.where(strixel_pixel_idx.ravel() != -1)[0].astype(np.int64)
-    Iin = strixel_pixel_idx.ravel()[Iout].astype(np.int64)
-    
-    return Iout, Iin
-
-
-def double_pixel_indices():
-    """Build index arrays for double-size pixels.
-
-    In raw data, the entire columns 255, 256, 511, 512, 767 and 768
-    are double-size pixels. After strixelation, these end up in columns
-    765-776, 1539-1550 and 2313-2324 on rows 0-85 or 0-83, with a set
-    of four columns with 86 rows followed by a set of 84 and 86 again.
-
-    This function builds the index arrays for double pixels after
-    strixelation.
-
-    Returns: 
-        (ndarray, ndarray) 2D index arrays for double pixel Y and X.
-    """
-
-    Ydouble = []
-    Xdouble = []
-
-    for double_col in [765, 1539, 2313]:
-        for col in range(double_col, double_col+12):
-            for row in range(84 if ((col-double_col) // 4) == 1 else 86):
-                Ydouble.append(row)
-                Xdouble.append(col)
-
-    return np.array(Ydouble), np.array(Xdouble)
-
-
-def to_strixel(data, out=None):
-    """Transform from regular to strixel geometry.
-
-    Only the last two axes are considered for transformation, input data
-    may have any number of additional axes in front.
-    
-    Args:
-        data (array_like): Data in regular geometry.
-        out (array_like, optional): Buffer for transformed output, a new
-            one is allocated if omitted. Must match all non-frame axes
-            of input data and able to hold strixel frame.
-
-    Returns:
-        (array_like) Data in strixel geometry.
-    """
-
-    if out is None:
-        out = np.zeros((*data.shape[:-2], *STRIXEL_SHAPE), dtype=data.dtype)
-
-    out.reshape(*out.shape[:-2], -1)[..., Iout] = data.reshape(
-        *data.shape[:-2], -1)[..., Iin]
-
-    return out
-
-
-def from_strixel(data, out=None):
-    """Transform from strixel to regular geometry.
-
-    Only the last two axes are considered for transformation, input data
-    may have any number of additional axes in front.
-
-    Args:
-        data (array_like): Data in strixel geometry.
-        out (array_like, optional): Buffer for transformed output, a new
-            one is allocated if omitted. Must match all non-frame axes
-            of input data and able to hold regular frame.
-
-    Returns:
-        (array_like): Data in regular geometry.
-    """
-
-    if out is None:
-        out = np.zeros((*data.shape[:-2], *REGULAR_SHAPE), dtype=data.dtype)
-
-    out.reshape(*out.shape[:-2], -1)[..., Iin] = data.reshape(
-        *data.shape[:-2], -1)[..., Iout]
-
-    return out
-
-
-Iout, Iin = transformation_indices1d()
diff --git a/src/cal_tools/jungfrau/__init__.py b/src/cal_tools/jungfrau/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/cal_tools/jungfrau/jfstrixel.py b/src/cal_tools/jungfrau/jfstrixel.py
new file mode 100644
index 0000000000000000000000000000000000000000..b388e5063430295174da65bbdee4a53476073796
--- /dev/null
+++ b/src/cal_tools/jungfrau/jfstrixel.py
@@ -0,0 +1,120 @@
+
+from functools import lru_cache
+from pathlib import Path
+
+import numpy as np
+
+REGULAR_SHAPE = (512, 1024)
+DIR_PATH = package_directory = Path(__file__).resolve().parent
+
+
+@lru_cache
+def get_strixel_parameters(kind):
+    """Returns a dictionary of strixel parameters stored in .npz file
+    based on the given kind.
+
+    Args:
+        kind (str): Specifies the type of strixel parameters to retrieve.
+            There is two possible values: "A0123" or "A1256"
+    Returns:
+        (dict): Dictionary contating the strixel parameters.
+    """
+    strx_parameters = {}
+
+    if kind == "A0123":
+        file_path = DIR_PATH / "strixel_cols_A0123-lut_mask.npz"
+    elif kind == "A1256":
+        file_path = DIR_PATH / "strixel_rows_A1256-lut_mask.npz"
+
+    with np.load(file_path) as data:
+        for k in data.files:
+            strx_parameters[k] = data[k]
+
+    return strx_parameters
+
+
+def store_double_pixel_indices():
+    """Build index arrays for double-size pixels.
+
+    In raw data for A0123 strixel detector,
+    the entire columns 255, 256, 511, 512, 767 and 768
+    are double-size pixels. After strixelation, these end up in columns
+    765-776, 1539-1550 and 2313-2324 on rows 0-85 or 0-83, with a set
+    of four columns with 86 rows followed by a set of 84 and 86 again.
+
+    This function builds the index arrays for double pixels after
+    strixelation and stores it in the available A0123 .npz file.
+    """
+
+    ydouble = []
+    xdouble = []
+    file_path = DIR_PATH / "strixel_cols_A0123-lut_mask.npz"
+
+    with np.load(file_path) as data:
+        for double_col in [765, 1539, 2313]:
+            for col in range(double_col, double_col+12):
+                for row in range(84 if ((col-double_col) // 4) == 1 else 86):
+                    ydouble.append(row)
+                    xdouble.append(col)
+        np.savez(file_path, **data, ydouble=ydouble, xdouble=xdouble)
+
+
+def to_strixel(data, out=None, kind="A0123"):
+    """Transform from regular to strixel geometry.
+
+    Only the last two axes are considered for transformation, input data
+    may have any number of additional axes in front.
+
+    Args:
+        data (array_like): Data in regular geometry.
+        out (array_like, optional): Buffer for transformed output, a new
+            one is allocated if omitted. Must match all non-frame axes
+            of input data and able to hold strixel frame.
+
+    Returns:
+        (array_like) Data in strixel geometry.
+    """
+
+    if kind is None:
+        return data
+
+    strx = get_strixel_parameters(kind)
+
+    if out is None:
+        out = np.zeros(
+            (*data.shape[:-2], *strx["frame_shape"]), dtype=data.dtype)
+
+    out.reshape(*out.shape[:-2], -1)[..., ~strx["mask"]] = data.reshape(
+        *data.shape[:-2], -1)[..., strx["lut"]]
+
+    return out
+
+
+def from_strixel(data, out=None, kind="A0123"):
+    """Transform from strixel to regular geometry.
+
+    Only the last two axes are considered for transformation, input data
+    may have any number of additional axes in front.
+
+    Args:
+        data (array_like): Data in strixel geometry.
+        out (array_like, optional): Buffer for transformed output, a new
+            one is allocated if omitted. Must match all non-frame axes
+            of input data and able to hold regular frame.
+
+    Returns:
+        (array_like): Data in regular geometry.
+    """
+
+    if kind is None:
+        return data
+
+    strx = get_strixel_parameters(kind)
+
+    if out is None:
+        out = np.zeros((*data.shape[:-2], *REGULAR_SHAPE), dtype=data.dtype)
+
+    out.reshape(*out.shape[:-2], -1)[..., strx["lut"]] = data.reshape(
+        *data.shape[:-2], -1)[..., strx["mask"]]
+
+    return out
diff --git a/src/cal_tools/jungfraulib.py b/src/cal_tools/jungfrau/jungfraulib.py
similarity index 52%
rename from src/cal_tools/jungfraulib.py
rename to src/cal_tools/jungfrau/jungfraulib.py
index fdfdb0bd571099514de806dbbcc69674e4cdf63e..bb1fcbcfa14439eb965d84cae47785a09725ffb0 100644
--- a/src/cal_tools/jungfraulib.py
+++ b/src/cal_tools/jungfrau/jungfraulib.py
@@ -1,8 +1,10 @@
-from typing import Optional, Tuple
+from logging import warning
+from typing import Tuple
 
 import extra_data
 
-from cal_tools.enums import JungfrauGainMode, JungfrauSettings
+from cal_tools.enums import JungfrauGainMode as JGM
+from cal_tools.enums import JungfrauSettings
 
 
 def _old_settings_to_new(settings: str, index: int) -> str:
@@ -98,22 +100,112 @@ class JungfrauCtrl():
         else:  # JungfrauSettings.GAIN_0
             return 0
 
-    def get_gain_mode(self) -> int:
-        """Get gain mode value. Fixed `1` or Adaptive `1`.
-        - `0` if run_mode = dynamic, forceswitchg1, forceswitchg2, or None.
-        - `1` if run_mode = fixg1 or fixg2.
-        """
+    def get_gain_mode_str(self):
         # Check if run_mode is of an old settings to convert
         # into new mode value.
-        if self.run_mode in [m.value for m in JungfrauGainMode]:
-            mode = self.run_mode
+        if self.run_mode in [m.value for m in JGM]:
+            return self.run_mode
         else:
-            mode = _old_settings_to_new(self.run_mode, 1)
-        
-        if mode in [
-            JungfrauGainMode.FIX_GAIN_1.value,
-            JungfrauGainMode.FIX_GAIN_2.value,
-        ]:
+            return _old_settings_to_new(self.run_mode, 1)
+
+    def get_gain_mode(self) -> int:
+        """Get gain mode value. Fixed `1` or Adaptive `0`.
+        Returns:
+            (int): gain mode parameter condition
+        """
+        gm_enum = self.get_gain_mode_str()
+
+        if gm_enum in [JGM.FIX_GAIN_1.value, JGM.FIX_GAIN_2.value]:
             return 1
-        else:  # DYNAMIC, FORCE_SWITCH_G1, or FORCE_SWITCH_G2
+        else:  # DYNAMIC, FORCE_SWITCH_G1, FORCE_SWITCH_G2 or None
             return 0
+
+
+MODES_ORDER = {
+    JGM.DYNAMIC.value: 0,
+    JGM.FORCE_SWITCH_HG1.value: 1,
+    JGM.FORCE_SWITCH_HG2.value: 2,
+    JGM.FIX_GAIN_1.value: 3,
+    JGM.FIX_GAIN_2.value: 4,
+}
+EXPECTED_RUN_ORDER = [
+    [  # Adaptive operation mode pattern
+        JGM.DYNAMIC.value,
+        JGM.FORCE_SWITCH_HG1.value,
+        JGM.FORCE_SWITCH_HG2.value
+    ],
+    [  # Fixed operation mode pattern
+        JGM.DYNAMIC.value,
+        JGM.FIX_GAIN_1.value,
+        JGM.FIX_GAIN_2.value
+    ],
+]
+
+
+def sort_runs_by_gain(
+    raw_folder,
+    runs,
+    ctrl_src,
+    modes_order=MODES_ORDER,
+    expected_run_order=EXPECTED_RUN_ORDER
+):
+    """Validate the 3 dark runs given for Jungfrau.
+
+    Args:
+        raw_folder (str): RAW folder for the validated dark runs.
+        runs (list): [High run, Medium run, Low run].
+        ctrl_src (str): Control source path for slow data.
+        modes_order (dict): Gain modes order to sort the runs by.
+        expected_run_order (list):Expected dark runs order to process.
+    Raises:
+        ValueError: Wrong given dark runs
+    """
+    assert len(runs) == 3, "Wrong number of runs. expected a list of 3 runs."
+
+    run_gm_mapping = dict()
+    for run in runs:
+        ctrl_data = JungfrauCtrl(
+            extra_data.RunDirectory(f"{raw_folder}/r{run:04d}/"),
+            ctrl_src)
+        gm = ctrl_data.get_gain_mode_str()
+        run_gm_mapping[run] = gm
+
+    # 1st legacy case before having run.settings in data.
+    if all(value is None for value in run_gm_mapping.values()):
+        warning("run.settings is not stored in the data "
+                f"to read. Hence assuming gain_mode = {gm}"
+                " for adaptive old data.")
+        return runs
+
+    run_gm_mapping = dict(sorted(
+        run_gm_mapping.items(),
+        key=lambda item: modes_order[item[1]]
+        ))
+    if list(run_gm_mapping.keys()) != runs:
+        warning("Given dark runs are unsorted. "
+                f"Runs will be sorted from {runs} of gain modes "
+                f"{list(run_gm_mapping.values())} to "
+                f"{list(run_gm_mapping.keys())}")
+
+    runs = list(run_gm_mapping.keys())
+    modes = list(run_gm_mapping.values())
+
+    legacy_adaptive = [
+        JGM.DYNAMIC.value,
+        JGM.DYNAMIC.value,
+        JGM.FORCE_SWITCH_HG1.value
+    ]
+
+    # 2nd legacy case with CTRL/MDL bug resulting in wrong run settings.
+    if modes == legacy_adaptive:
+        warning(f"run.settings for medium and low gain runs"
+                f" are wrong {modes[1:]}. This is an expected "
+                f"bug for old raw data. "
+                "Assuming this is an adaptive gain runs.")
+    elif not modes in expected_run_order:
+        raise ValueError("Wrong dark runs are given. "
+                         f"The given three runs are {runs} with "
+                         f"wrong gain modes {modes}."
+                         "Please verify the selected 3 dark runs to process.")
+
+    return runs
diff --git a/src/cal_tools/jungfrau/strixel_cols_A0123-lut_mask.npz b/src/cal_tools/jungfrau/strixel_cols_A0123-lut_mask.npz
new file mode 100644
index 0000000000000000000000000000000000000000..fcf08e279709b7a8a64c858fdd932d3c7738c172
Binary files /dev/null and b/src/cal_tools/jungfrau/strixel_cols_A0123-lut_mask.npz differ
diff --git a/src/cal_tools/jungfrau/strixel_rows_A1256-lut_mask.npz b/src/cal_tools/jungfrau/strixel_rows_A1256-lut_mask.npz
new file mode 100644
index 0000000000000000000000000000000000000000..be31b53bb7bb0fe2cf1060468e21438793c33dd3
Binary files /dev/null and b/src/cal_tools/jungfrau/strixel_rows_A1256-lut_mask.npz differ
diff --git a/src/cal_tools/lpdlib.py b/src/cal_tools/lpdlib.py
index 872a52964daaa8119b296cff7a7c2f305b09d65c..b61652b33005bf31e1838103c9d5fb59c2f889c9 100644
--- a/src/cal_tools/lpdlib.py
+++ b/src/cal_tools/lpdlib.py
@@ -590,10 +590,10 @@ class LpdCorrections:
             * tcp://host:port_low#port_high to specify a port range from
               which a random port will be picked. E.g. specifying
 
-              tcp://max-exfl016:8015#8025
+              tcp://max-exfl-cal001:8015#8025
 
-              will randomly pick an address in the range max-exfl016:8015 and
-              max-exfl016:8025.
+              will randomly pick an address in the range max-exfl-cal001:8015 and
+              max-exfl-cal001:8025.
 
 
         The latter notation allows for load-balancing.
@@ -772,21 +772,38 @@ class LpdCorrections:
                         flat_fields)
 
 
-def get_mem_cell_order(run, sources) -> str:
-    """Load the memory cell order to use as a condition to find constants"""
-    res = set()
+def get_mem_cell_pattern(run, sources) -> np.ndarray:
+    """Load the memory cell order to use as a condition to find constants
+
+    This looks at the first train for each source, issuing a warning if the
+    pattern differs between sources.
+    """
+    patterns = []
     for source in sources:
         cell_id_data = run[source, 'image.cellId'].drop_empty_trains()
         if len(cell_id_data.train_ids) == 0:
             continue  # No data for this module
-        cell_ids = cell_id_data[0].ndarray()
-        # Trailing comma required so e.g. "...,1" doesn't match "...,10"
-        res.add(",".join([str(c) for c in cell_ids.flatten()]) + ",")
+        cell_ids = cell_id_data[0].ndarray().flatten()
+        if not any(np.array_equal(cell_ids, p) for p in patterns):
+            patterns.append(cell_ids)
 
-    if len(res) > 1:
+    if len(patterns) > 1:
         warn("Memory cell order varies between detector modules: "
-             "; ".join([f"{s[:10]}...{s[-10:]}" for s in res]))
-    elif not res:
+             "; ".join([f"{s[:10]}...{s[-10:]}" for s in patterns]))
+    elif not patterns:
         raise ValueError("Couldn't find memory cell order for any modules")
 
-    return res.pop()
+    return patterns[0]
+
+
+def make_cell_order_condition(use_param, cellid_pattern) -> Optional[str]:
+    """Convert the cell ID array to a condition string, or None if not used"""
+    if use_param == 'auto':
+        # auto -> use cell order if it wraps around (cells not filled monotonically)
+        use = len(cellid_pattern) > 2 and (
+                np.diff(cellid_pattern.astype(np.int32)) < 0
+        ).any()
+    else:
+        use = (use_param == 'always')
+
+    return (",".join([str(c) for c in cellid_pattern]) + ",") if use else None
diff --git a/src/cal_tools/plotting.py b/src/cal_tools/plotting.py
index 1e49a548219ff39d0f254b1cf74467d392d20dc7..8638d957de82e7b665880cc3bb1b04e5cc6649cf 100644
--- a/src/cal_tools/plotting.py
+++ b/src/cal_tools/plotting.py
@@ -415,7 +415,7 @@ def init_jungfrau_geom(
     karabo_da: List[str]
     ) -> Tuple[List[str], JUNGFRAUGeometry]:
     """ Initiate JUNGFRAUGeometry object based on the selected detector
-    (SPB_IRDA_JF4M, FXE_XAD_JF1M, or a single module detector).
+    (JF4M, JF1M, or JF500K detectors).
     
     :param karabo_id: the detector identifer of an expected multimodular
         detector or a single module detector.
@@ -429,7 +429,7 @@ def init_jungfrau_geom(
     mod_width = (256 * 4) + (2 * 3)  # inc. 2px gaps between tiles
     mod_height = (256 * 2) + 2
 
-    if karabo_id == "SPB_IRDA_JF4M":
+    if "JF4M" in karabo_id:
         nmods = 8
         expected_modules = [f"JNGFR{i:02d}" for i in range(1, nmods+1)]
         # The first 4 modules are rotated 180 degrees relative to the others.
@@ -445,12 +445,16 @@ def init_jungfrau_geom(
         ]
         orientations = [
             (-1, -1) for _ in range(4)] + [(1, 1) for _ in range(4)]
-    elif karabo_id == "FXE_XAD_JF1M":
+    elif "JF1M" in karabo_id:
         nmods = 2
-        expected_modules = [f"JNGFR{i:02d}" for i in range(1, nmods+1)]
+        st_modno = 1
+        # TODO: This is a temporary workaround. A proper solution is needed.
+        if karabo_id == "SPB_CFEL_JF1M":
+            st_modno = 9
+        expected_modules = [f"JNGFR{i:02d}" for i in range(st_modno, st_modno+nmods)]
         module_pos = ((-mod_width//2, 33), (-mod_width//2, -mod_height-33))
         orientations = [(-1,-1), (1,1)]
-    else:
+    else:  # e.g. HED_IA1_JF500K1, FXE_XAD_JF500K, FXE_XAD_JFHZ
         nmods = 1
         expected_modules = karabo_da
         module_pos = ((-mod_width//2, -mod_height//2),)
diff --git a/src/cal_tools/tools.py b/src/cal_tools/tools.py
index 2d15866ac2517bc0925e2b6ad5bad10560f5ab36..88ab28d1e03c23264bb77c1c9a0e6b707a1535a1 100644
--- a/src/cal_tools/tools.py
+++ b/src/cal_tools/tools.py
@@ -10,6 +10,7 @@ from os import environ, listdir, path
 from os.path import isfile
 from pathlib import Path
 from queue import Queue
+from tempfile import NamedTemporaryFile
 from time import sleep
 from typing import List, Optional, Tuple, Union
 from urllib.parse import urljoin
@@ -476,7 +477,7 @@ def get_pdu_from_db(karabo_id: str, karabo_da: Union[str, list],
                      intialize CalibrationConstantMetadata class.
     :param condition: Detector condition object to
                       intialize CalibrationConstantMetadata class.
-    :param cal_db_interface: Interface string, e.g. "tcp://max-exfl016:8015".
+    :param cal_db_interface: Interface string, e.g. "tcp://max-exfl-cal001:8015".
     :param snapshot_at: Database snapshot.
     :param timeout: Calibration Database timeout.
     :return: List of physical detector units (db_modules)
@@ -556,7 +557,7 @@ def get_from_db(karabo_id: str, karabo_da: str,
     :param constant: Calibration constant known for given detector.
     :param condition: Calibration condition.
     :param empty_constant: Constant to be returned in case of failure.
-    :param cal_db_interface: Interface string, e.g. "tcp://max-exfl016:8015"
+    :param cal_db_interface: Interface string, e.g. "tcp://max-exfl-cal001:8015"
     :param creation_time: Latest time for constant to be created.
     :param verbosity: Level of verbosity (0 - silent)
     :param timeout: Timeout for zmq request
@@ -668,7 +669,7 @@ def send_to_db(db_module: str, karabo_id: str, constant, condition,
     :param file_loc: Location of raw data.
     :param report_path: xfel-calbrate report path to inject along with
         the calibration constant versions to the database.
-    :param cal_db_interface: Interface string, e.g. "tcp://max-exfl016:8015"
+    :param cal_db_interface: Interface string, e.g. "tcp://max-exfl-cal001:8015"
     :param creation_time: Latest time for constant to be created
     :param timeout: Timeout for zmq request
     :param ntries: number of tries to contact the database,
@@ -811,6 +812,24 @@ def module_index_to_qm(index: int, total_modules: int = 16):
     return f"Q{quad+1}M{mod+1}"
 
 
+def recursive_update(target: dict, source: dict):
+    """Recursively merge source into target, checking for conflicts
+
+    Conflicting entries will not be copied to target. Returns True if any
+    conflicts were found.
+    """
+    conflict = False
+    for k, v2 in source.items():
+        v1 = target.get(k, None)
+        if isinstance(v1, dict) and isinstance(v2, dict):
+            conflict = recursive_update(v1, v2) or conflict
+        elif (v1 is not None) and (v1 != v2):
+            conflict = True
+        else:
+            target[k] = v2
+
+    return conflict
+
 class CalibrationMetadata(dict):
     """Convenience class: dictionary stored in metadata YAML file
 
@@ -847,6 +866,34 @@ class CalibrationMetadata(dict):
         with (copy_dir / self._yaml_fn.name).open("w") as fd:
             yaml.safe_dump(dict(self), fd)
 
+    def add_fragment(self, data: dict):
+        """Save metadata to a separate 'fragment' file to be merged later
+
+        Avoids a risk of corrupting the main file by writing in parallel.
+        """
+        prefix = f"metadata_frag_j{os.environ.get('SLURM_JOB_ID', '')}_"
+        with NamedTemporaryFile("w", dir=self._yaml_fn.parent,
+                    prefix=prefix, suffix='.yml', delete=False) as fd:
+            yaml.safe_dump(data, fd)
+
+    def gather_fragments(self):
+        """Merge in fragments saved by add_fragment(), then delete them"""
+        frag_files = list(self._yaml_fn.parent.glob('metadata_frag_*.yml'))
+        to_delete = []
+        for fn in frag_files:
+            with fn.open("r") as fd:
+                data = yaml.safe_load(fd)
+                if recursive_update(self, data):
+                    print(f"{fn} contained conflicting metadata. "
+                          f"This file will be left for debugging")
+                else:
+                    to_delete.append(fn)
+
+        self.save()
+
+        for fn in to_delete:
+            fn.unlink()
+
 
 def save_constant_metadata(
     retrieved_constants: dict,
@@ -915,6 +962,36 @@ def load_specified_constants(
     return const_data, when
 
 
+def write_constants_fragment(
+        out_folder: Path,
+        det_metadata: dict,
+        caldb_root: Path,
+):
+    """Record calibration constants metadata to a fragment file.
+
+    Args:
+        out_folder (Path): The output folder to store the fragment file.
+        det_metadata (dict): A dictionary with the desired detector metadata.
+            {karabo_da: {constant_name: metadata}}
+        caldb_root (Path): The calibration database root path for constant files.
+    """
+    metadata = {"retrieved-constants": {}}
+    for karabo_da, const_metadata in det_metadata.items():
+        mod_metadata = {}
+        mod_metadata["constants"] = {
+            cname: {
+                "path": str(caldb_root / ccv_metadata["path"]),
+                "dataset": ccv_metadata["dataset"],
+                "creation-time": ccv_metadata["begin_validity_at"],
+                "ccv_id": ccv_metadata["ccv_id"],
+            } for cname, ccv_metadata in const_metadata.items()
+        }
+        mod_metadata["physical-name"] = list(
+                const_metadata.values())[0]["physical_name"]
+        metadata["retrieved-constants"][karabo_da] = mod_metadata
+    CalibrationMetadata(out_folder).add_fragment(metadata)
+
+
 def write_compressed_frames(
         arr: np.ndarray,
         ofile: h5py.File,
@@ -953,3 +1030,17 @@ def write_compressed_frames(
             dataset.id.write_direct_chunk(chunk_start, compressed)
 
     return dataset
+
+
+def reorder_axes(a, from_order, to_order):
+    """Rearrange axes of array a from from_order to to_order
+
+    This does the same as np.transpose(), but making the before & after axes
+    more explicit. from_order is a sequence of strings labelling the axes of a,
+    and to_order is a similar sequence for the axes of the result.
+    """
+    assert len(from_order) == a.ndim
+    assert sorted(from_order) == sorted(to_order)
+    from_order = list(from_order)
+    order = tuple([from_order.index(lbl) for lbl in to_order])
+    return a.transpose(order)
diff --git a/src/xfel_calibrate/calibrate.py b/src/xfel_calibrate/calibrate.py
index 53f0cba19e70b05503835ca946f58c98f22713f1..5590e4c34f5f6c9c10d3700260adfb3c3e1ea2f2 100755
--- a/src/xfel_calibrate/calibrate.py
+++ b/src/xfel_calibrate/calibrate.py
@@ -351,7 +351,9 @@ def prepare_job(
         if title_cell is not None:
             title_cell.source = ''
     set_figure_format(new_nb, args["vector_figs"])
-    new_name = f"{nb_path.stem}__{cparm}__{suffix}.ipynb"
+    # In some cases a suffix for example can have `/`. e.g. LPDMini and GH2
+    # have karabo_da with a `/`.
+    new_name = f"{nb_path.stem}__{cparm}__{suffix}.ipynb".replace("/", "-")
 
     nbformat.write(new_nb, cal_work_dir / new_name)
 
@@ -491,10 +493,30 @@ def make_par_table(parms):
     # Add space in long strings without line breakers ` ,-/` to
     # wrap them in latex
     def split_len(seq, length):
+        """
+        Splits a sequence into smaller segments of a specified length,
+        concatenates them, and adds line-breaking characters
+        to ensure proper line breaks in LaTeX.
+
+        Args:
+            seq (str): The sequence to be split.
+            length (int): The desired length of each segment.
+
+        Returns:
+            str: The concatenated line with line-breaking characters.
+
+        Examples:
+            >>> split_len("slurm_prof_230711_095647.832671_0", 10)
+            ''slurm_prof_230711_09\\-5647.832671_0\\-''
+        """
         lbc = set(' ,-/')
         line = ''
         for i in range(0, len(seq), length):
             sub_line = seq[i:i + length]
+            # Ensure proper line break if the
+            # start of the new line begins with `_`
+            if sub_line[0] == '_' and line[-1] == "-":
+                line += '\\'
             line += sub_line.replace('/', '/\-')
             if not any(c in lbc for c in sub_line):
                 line += '\-'
@@ -581,7 +603,11 @@ def run(argv=None):
 
     title = title.rstrip()
 
-    run_uuid = f"t{datetime.now().strftime('%y%m%d_%H%M%S.%f')}"
+    # request_time is in local timezone
+    if args["request_time"] == "Now":
+        request_time = datetime.now()
+    else:
+        request_time = datetime.fromisoformat(args["request_time"])
 
     # check if concurrency parameter is given and we run concurrently
     if concurrency_par is not None and not any(
@@ -594,8 +620,7 @@ def run(argv=None):
     default_params_by_name = {p.name: p.value for p in nb_details.default_params}
     if 'cluster_profile' in default_params_by_name:
         if args.get("cluster_profile") == default_params_by_name["cluster_profile"]:
-            args['cluster_profile'] = "slurm_prof_{}".format(run_uuid)
-
+            args['cluster_profile'] = f"slurm_prof_{request_time:%y%m%d_%H%M%S.%f}"
 
     # wait on all jobs to run and then finalize the run by creating a report from the notebooks
     out_path = Path(default_report_path) / nb_details.detector / nb_details.caltype / datetime.now().isoformat()
@@ -607,26 +632,22 @@ def run(argv=None):
 
     out_path.mkdir(parents=True, exist_ok=True)
 
-    # Use given report name, falling back to notebook title
+    # Use given report name, or automatic unique name if not specified
+    det_name = args.get('karabo_id', nb_details.detector)
+    unique_name = f"{det_name}-{nb_details.caltype}-{request_time:%y%m%d_%H%M%S.%f}"
     if args['skip_report']:
         report_to = ''
     elif args["report_to"] is None:
-        report_to = out_path / title.replace(" ", "")
+        report_to = out_path / f"{unique_name}.pdf"
         print(f"report_to not specified, will use {report_to}")
     else:
-        report_to = Path(args["report_to"])
-        if report_to.is_dir():
-            print(f"report_to is directory, will use title '{title}' for filename")
-            report_to = report_to / title.replace(" ", "")
-        elif len(report_to.parts) == 1:
-            print(f"report_to path contained no path, saving report in '{out_path}'")
-            report_to = out_path / report_to
-
-    workdir_name = f"slurm_out_{nb_details.detector}_{nb_details.caltype}_{run_uuid}"
+        report_to = Path(args["report_to"]).with_suffix('.pdf').absolute()
+
     if report_to:
-        cal_work_dir = report_to.parent / workdir_name
+        # Work dir matching report file but without .pdf
+        cal_work_dir = report_to.with_suffix('')
     else:
-        cal_work_dir = out_path / workdir_name
+        cal_work_dir = out_path / unique_name
     cal_work_dir.mkdir(parents=True)
 
     # Write all input parameters to rst file to be included to final report
@@ -658,7 +679,7 @@ def run(argv=None):
         parm_subdict[name] = p.value
 
     metadata["pycalibration-version"] = version
-    metadata["report-path"] = f"{report_to}.pdf" if report_to \
+    metadata["report-path"] = str(report_to) if report_to \
         else '# REPORT SKIPPED #'
     metadata['reproducible'] = not args['not_reproducible']
     metadata["concurrency"] = {
@@ -687,11 +708,6 @@ def run(argv=None):
 
     folder = get_par_attr(parms, 'in_folder', 'value', '')
 
-    if args["request_time"] == "Now":
-        request_time = datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
-    else:
-        request_time = args["request_time"]
-
     pre_jobs = []
     cluster_cores = concurrency.get("cluster cores", 8)
     # Check if there are pre-notebooks
@@ -754,6 +770,9 @@ def run(argv=None):
         cvtype = get_par_attr(parms, concurrency_par, 'type', list)
         cvals = remove_duplications(cvals)
 
+        if not cvals:
+            raise ValueError("Splitting data for concurrency gave 0 jobs")
+
         for cnum, cval in enumerate(cvals):
             show_title = cnum == 0
             cval = [cval, ] if not isinstance(cval, list) and cvtype is list else cval
@@ -821,7 +840,7 @@ def run(argv=None):
                 'author': author,
                 'report_to': report_to,
                 'in_folder': folder,
-                'request_time': request_time,
+                'request_time': request_time.strftime("%Y-%m-%dT%H:%M:%S"),
                 'submission_time': submission_time,
                 }
 
diff --git a/src/xfel_calibrate/finalize.py b/src/xfel_calibrate/finalize.py
index 0911f0afb7ac63ab9643009b33527da6e666536a..41b3f9dd4b84f61a666647064959b966ba915543 100644
--- a/src/xfel_calibrate/finalize.py
+++ b/src/xfel_calibrate/finalize.py
@@ -379,6 +379,7 @@ def finalize(joblist, finaljob, cal_work_dir, out_path, version, title, author,
     if finaljob:
         joblist.append(str(finaljob))
     metadata = cal_tools.tools.CalibrationMetadata(cal_work_dir)
+    metadata.gather_fragments()
 
     job_time_fmt = 'JobID,Start,End,Elapsed,Suspended,State'.split(',')
     job_time_summary = get_job_info(joblist, job_time_fmt)
@@ -413,7 +414,7 @@ def finalize(joblist, finaljob, cal_work_dir, out_path, version, title, author,
             version,
             report_to,
         )
-        det = metadata['calibration-configurations'].get('karabo-id', report_to.name)
+        det = metadata['calibration-configurations'].get('karabo-id', report_to.stem)
     else:
         try:
             det = metadata['calibration-configurations']['karabo-id']
diff --git a/src/xfel_calibrate/nb_args.py b/src/xfel_calibrate/nb_args.py
index e3e1dc5e2adbdd040bb566790064672321a52bd5..93c6eba71ed8b0df2bbb22bcca83d2652a7434d0 100644
--- a/src/xfel_calibrate/nb_args.py
+++ b/src/xfel_calibrate/nb_args.py
@@ -53,8 +53,7 @@ def make_initial_parser(**kwargs):
                         help="Prepare notebooks but don't run them")
 
     parser.add_argument('--report-to', type=str,
-                        help='Filename (and optionally path) for output'
-                             ' report')
+                        help='Full path for the PDF report output')
 
     parser.add_argument('--not-reproducible', action='store_true',
                         help='Disable checks to allow the processing result '
diff --git a/src/xfel_calibrate/notebooks.py b/src/xfel_calibrate/notebooks.py
index 55f1113a4cbb2f25178c60cea6cf67c5d4bc3b36..3d241b27e7f1d41050ce49b28be9823611d9040d 100644
--- a/src/xfel_calibrate/notebooks.py
+++ b/src/xfel_calibrate/notebooks.py
@@ -28,7 +28,6 @@ notebooks = {
                             "cluster cores": 16},
         },
         "CORRECT": {
-            "pre_notebooks": ["notebooks/AGIPD/AGIPD_Retrieve_Constants_Precorrection.ipynb"],
             "notebook": "notebooks/AGIPD/AGIPD_Correct_and_Verify.ipynb",
             "dep_notebooks": [
                 "notebooks/AGIPD/AGIPD_Correct_and_Verify_Summary_NBC.ipynb"],
@@ -76,8 +75,6 @@ notebooks = {
                             "cluster cores": 8},
         },
         "CORRECT": {
-            "pre_notebooks": [
-                "notebooks/LPD/LPD_retrieve_constants_precorrection.ipynb"],
             "notebook": "notebooks/LPD/LPD_Correct_Fast.ipynb",
             "concurrency": {"parameter": "sequences",
                             "default concurrency": [-1],
@@ -98,6 +95,25 @@ notebooks = {
                             "cluster cores": 1},
         }
     },
+    "LPDMINI": {
+        "DARK": {
+            "notebook": "notebooks/LPDMini/LPD_Mini_Char_Darks_NBC.ipynb",
+            "concurrency": {"parameter": None},
+        },
+        "CORRECT": {
+            "notebook": "notebooks/LPDMini/LPD_Mini_Correct.ipynb",
+            "concurrency": {"parameter": "sequences",
+                            "default concurrency": [-1],
+                            "use function": "balance_sequences",
+                            "cluster cores": 16},
+        },
+        "INJECT_CONSTANTS": {
+            "notebook": "notebooks/LPDMini/LPD_Mini_Inject_calibration_constants_from_h5files.ipynb",
+            "concurrency": {"parameter": None,
+                            "default concurrency": None,
+                            "cluster cores": 1},
+        }
+    },
     "PNCCD": {
         "DARK": {
             "notebook": "notebooks/pnCCD/Characterize_pnCCD_Dark_NBC.ipynb",
@@ -112,7 +128,6 @@ notebooks = {
                             "cluster cores": 32},
         },
         "CORRECT": {
-            "pre_notebooks": ["notebooks/pnCCD/pnCCD_retrieve_constants_precorrection.ipynb"],
             "notebook": "notebooks/pnCCD/Correct_pnCCD_NBC.ipynb",
             "concurrency": {"parameter": "sequences",
                             "default concurrency": [-1],
@@ -179,8 +194,6 @@ notebooks = {
                             "cluster cores": 4},
         },
         "CORRECT": {
-            "pre_notebooks": [
-                "notebooks/Jungfrau/Jungfrau_retrieve_constants_precorrection_NBC.ipynb"],  # noqa
             "notebook":
                 "notebooks/Jungfrau/Jungfrau_Gain_Correct_and_Verify_NBC.ipynb",
             "concurrency": {"parameter": "sequences",
@@ -191,8 +204,6 @@ notebooks = {
     },
     "GOTTHARD2": {
         "CORRECT": {
-            "pre_notebooks": [
-                "notebooks/Gotthard2/Gotthard2_retrieve_constants_precorrection_NBC.ipynb"],  # noqa
             "notebook":
                 "notebooks/Gotthard2/Correction_Gotthard2_NBC.ipynb",
             "concurrency": {"parameter": "sequences",
@@ -217,13 +228,18 @@ notebooks = {
         },
 
         "CORRECT": {
-            "pre_notebooks": ["notebooks/ePix100/ePix100_retrieve_constants_precorrection.ipynb"],
             "notebook": "notebooks/ePix100/Correction_ePix100_NBC.ipynb",
             "concurrency": {"parameter": "sequences",
                             "default concurrency": [-1],
                             "use function": "balance_sequences",
                             "cluster cores": 4},
         },
+        "FF": {
+            "notebook": "notebooks/ePix100/Characterize_FlatFields_ePix100_NBC.ipynb",
+            "concurrency": {"parameter": None,
+                            "default concurrency": None,
+                            "cluster cores": 4},
+        },
     },
     "EPIX10K": {
         "DARK": {
@@ -273,6 +289,17 @@ notebooks = {
             },
         },
     },
+    "TIMEPIX": {
+        "CORRECT": {
+            "notebook": "notebooks/Timepix/Compute_Timepix_Event_Centroids.ipynb",
+            "concurrency": {
+                "parameter": None,
+                "use function": None,
+                "default concurrency": None,
+                "cluster cores": 1
+            },
+        },
+    },
     "TEST": {
         "TEST-CLI": {
             "notebook": "notebooks/test/test-cli.ipynb",
diff --git a/src/xfel_calibrate/repeat.py b/src/xfel_calibrate/repeat.py
index e8d94134f3b8a46384f1272b754fea66c070b80c..3112a960f5f428cce261595347f36c10bdc5739f 100644
--- a/src/xfel_calibrate/repeat.py
+++ b/src/xfel_calibrate/repeat.py
@@ -13,7 +13,6 @@ from cal_tools.tools import CalibrationMetadata
 from .calibrate import (
     JobChain, SlurmOptions, run_finalize, get_pycalib_version,
 )
-from .settings import temp_path
 
 # This function is copied and modified from Python 3.8.10
 # Copyright © 2001-2022 Python Software Foundation; All Rights Reserved
@@ -123,30 +122,35 @@ def main(argv=None):
     start_time = datetime.now()
     run_uuid = f"t{start_time:%y%m%d_%H%M%S}"
 
-    cal_work_dir = Path(temp_path, f'slurm_out_repeat_{run_uuid}')
-    copytree_no_metadata(
-        args.from_dir, cal_work_dir, ignore=shutil.ignore_patterns('slurm-*.out')
-    )
-    print(f"New working directory: {cal_work_dir}")
-
-    cal_metadata = CalibrationMetadata(cal_work_dir)
-    parameters = cal_metadata['calibration-configurations']
+    parameters = CalibrationMetadata(args.from_dir)['calibration-configurations']
+    karabo_id = parameters['karabo-id']
 
     out_folder = parameters['out-folder']
     params_to_set = {'metadata_folder': "."}
     if args.out_folder:
         out_folder = parameters['out-folder'] = os.path.abspath(args.out_folder)
         params_to_set['out_folder'] = out_folder
-    update_notebooks_params(cal_work_dir, params_to_set)
 
     if args.report_to:
-        report_to = os.path.abspath(args.report_to)
+        report_to = Path(args.report_to).with_suffix('.pdf').absolute()
     else:  # Default to saving report in output folder
-        report_to = str(Path(out_folder, f'xfel-calibrate-repeat-{run_uuid}'))
-    cal_metadata['report-path'] = f'{report_to}.pdf'
+        report_to = Path(out_folder, f'{karabo_id}-repeat-{run_uuid}.pdf')
 
+    # Copy working directory to new location
+    cal_work_dir = report_to.with_suffix('')
+    copytree_no_metadata(
+        args.from_dir, cal_work_dir, ignore=shutil.ignore_patterns('slurm-*.out')
+    )
+    print(f"New working directory: {cal_work_dir}")
+
+    # Update metadata YAML file & notebooks with any changes
+    cal_metadata = CalibrationMetadata(cal_work_dir)
+    cal_metadata['calibration-configurations'] = parameters
+    cal_metadata['report-path'] = str(report_to)
     cal_metadata.save()
 
+    update_notebooks_params(cal_work_dir, params_to_set)
+
     # finalize & some notebooks expect yaml metadata in the output folder
     Path(out_folder).mkdir(parents=True, exist_ok=True)
     shutil.copy(cal_work_dir / 'calibration_metadata.yml', out_folder)
diff --git a/tests/conftest.py b/tests/conftest.py
index e5a45a293cfd107829309ce18e5f10c914a05107..3c34eb25f4e2668daf2695f34db0b36d052ec3f6 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -69,7 +69,7 @@ def pytest_configure(config):
 
 
 @lru_cache()
-def server_reachable(server: str = "max-exfl017"):
+def server_reachable(server: str = "max-exfl-cal002"):
     reachable = True
 
     try:
diff --git a/tests/pytest.ini b/tests/pytest.ini
new file mode 100644
index 0000000000000000000000000000000000000000..74e59734360638a1a5a894bb5ae9647dbd8e22a5
--- /dev/null
+++ b/tests/pytest.ini
@@ -0,0 +1,6 @@
+[pytest]
+asyncio_mode = strict
+log_cli = 1
+log_cli_level = INFO
+log_cli_format = %(asctime)s [%(levelname)8s] %(message)s (%(filename)s:%(lineno)s)
+log_cli_date_format=%Y-%m-%d %H:%M:%S
\ No newline at end of file
diff --git a/tests/test_agipdlib.py b/tests/test_agipdlib.py
index ad1a92f42f956d291adebedbfb6df61a929d4e32..89ef4054df964fb3fc4b8a44f32b267dd66ac702 100644
--- a/tests/test_agipdlib.py
+++ b/tests/test_agipdlib.py
@@ -1,11 +1,14 @@
 from datetime import datetime
 
+import pytest
 from extra_data import RunDirectory
 
-from cal_tools.agipdlib import AgipdCtrl
+from cal_tools.agipdlib import AgipdCtrl, AgipdCtrlRuns
+from cal_tools.enums import AgipdGainMode
 
 SPB_AGIPD_INST_SRC = 'SPB_DET_AGIPD1M-1/DET/0CH0:xtdf'
-CTRL_SRC = 'SPB_IRU_AGIPD1M1/MDL/FPGA_COMP'
+SPB_AGIPD_KARABO_CTRL_ID = 'SPB_IRU_AGIPD1M1'
+CTRL_SRC = f'{SPB_AGIPD_KARABO_CTRL_ID}/MDL/FPGA_COMP'
 
 
 def test_get_acq_rate_ctrl(mock_agipd1m_run):
@@ -136,8 +139,8 @@ def test_get_bias_voltage(
     # Read bias voltage for HED_DET_AGIPD500K from slow data.
     agipd_ctrl = AgipdCtrl(
         run_dc=RunDirectory(mock_agipd500k_run),
-        image_src=SPB_AGIPD_INST_SRC,
-        ctrl_src=CTRL_SRC)
+        image_src=None,
+        ctrl_src=None)
     bias_voltage = agipd_ctrl.get_bias_voltage(
         karabo_id_control="HED_EXP_AGIPD500K2G")
 
@@ -198,3 +201,128 @@ def test_get_gain_mode(mock_agipd1m_run):
     gain_mode = agipd_ctrl.get_gain_mode()
     assert isinstance(gain_mode, int)
     assert gain_mode == 0
+
+
+"""Testing `AgipdCtrlRuns`"""
+
+TEST_RAW_FOLDER = "/gpfs/exfel/exp/CALLAB/202130/p900203/raw/"
+SPB_FIXED_RUNS = [9011, 9012, 9013]
+SPB_ADAPTIVE_RUNS = [9015, 9016, 9017]
+
+FIXED_CTRL_RUNS = AgipdCtrlRuns(
+    raw_folder=TEST_RAW_FOLDER,
+    runs=SPB_FIXED_RUNS,
+    image_src=SPB_AGIPD_INST_SRC,
+    ctrl_src=CTRL_SRC,
+)
+ADAPTIVE_CTRL_RUNS = AgipdCtrlRuns(
+    raw_folder=TEST_RAW_FOLDER,
+    runs=SPB_ADAPTIVE_RUNS,
+    image_src=SPB_AGIPD_INST_SRC,
+    ctrl_src=CTRL_SRC,
+)
+
+
+@pytest.mark.requires_gpfs
+def test_get_memory_cells_runs():
+    assert FIXED_CTRL_RUNS.get_memory_cells() == 352
+
+    assert ADAPTIVE_CTRL_RUNS.get_memory_cells() == 352
+
+
+@pytest.mark.requires_gpfs
+def test_get_bias_voltage_runs():
+    assert FIXED_CTRL_RUNS.get_bias_voltage(SPB_AGIPD_KARABO_CTRL_ID) == 300
+
+    assert ADAPTIVE_CTRL_RUNS.get_bias_voltage(SPB_AGIPD_KARABO_CTRL_ID) == 300
+
+
+@pytest.mark.requires_gpfs
+def test_get_integration_time_runs():
+    assert FIXED_CTRL_RUNS.get_integration_time() == 12
+
+    assert ADAPTIVE_CTRL_RUNS.get_integration_time() == 20
+
+
+@pytest.mark.requires_gpfs
+def test_get_acquisition_rate_runs():
+    assert FIXED_CTRL_RUNS.get_acq_rate() == 1.1
+
+    assert ADAPTIVE_CTRL_RUNS.get_acq_rate() == 1.1
+
+
+@pytest.mark.requires_gpfs
+def test_get_gain_setting_runs():
+    assert FIXED_CTRL_RUNS.get_gain_setting() == 0
+
+    assert ADAPTIVE_CTRL_RUNS.get_gain_setting() == 0
+
+
+@pytest.mark.requires_gpfs
+def test_get_gain_mode_runs():
+    assert FIXED_CTRL_RUNS.get_gain_modes() == [
+        AgipdGainMode.FIXED_HIGH_GAIN,
+        AgipdGainMode.FIXED_MEDIUM_GAIN,
+        AgipdGainMode.FIXED_LOW_GAIN
+    ]
+
+    assert ADAPTIVE_CTRL_RUNS.get_gain_modes() == [
+        AgipdGainMode.ADAPTIVE_GAIN]*3
+
+
+@pytest.mark.requires_gpfs
+def test_fixed_gain_mode():
+    assert FIXED_CTRL_RUNS.fixed_gain_mode()
+
+    assert not ADAPTIVE_CTRL_RUNS.fixed_gain_mode()
+
+
+@pytest.mark.requires_gpfs
+def test_raise_fixed_gain_mode():
+    adaptive_fixed_ctrls = AgipdCtrlRuns(
+        raw_folder=TEST_RAW_FOLDER,
+        runs=[9011, 9016, 9017],
+        image_src=SPB_AGIPD_INST_SRC,
+        ctrl_src=CTRL_SRC,
+    )
+    with pytest.raises(ValueError):
+        adaptive_fixed_ctrls.fixed_gain_mode()
+
+
+@pytest.mark.requires_gpfs
+@pytest.mark.parametrize(
+    "runs,expected",
+    [
+        ([9013, 9011, 9012], [9011, 9012, 9013]),
+        ([9017, 9016, 9015], [9015, 9016, 9017]),
+    ],
+)
+def test_sort_dark_runs(runs, expected):
+    runs_ctrls = AgipdCtrlRuns(
+        raw_folder=TEST_RAW_FOLDER,
+        runs=runs,
+        image_src=SPB_AGIPD_INST_SRC,
+        ctrl_src=CTRL_SRC,
+    )
+    runs_ctrls.sort_dark_runs()
+    assert runs_ctrls.runs == expected
+
+
+def test_raise_sort_dark_runs():
+    with pytest.raises(ValueError):
+        adaptive_fixed_ctrls = AgipdCtrlRuns(
+            raw_folder=TEST_RAW_FOLDER,
+            runs=[9011, 9016, 9017],
+            image_src=SPB_AGIPD_INST_SRC,
+            ctrl_src=CTRL_SRC,
+            sort_dark_runs_enabled=True
+        )
+
+    adaptive_fixed_ctrls = AgipdCtrlRuns(
+        raw_folder=TEST_RAW_FOLDER,
+        runs=[9011, 9016, 9017],
+        image_src=SPB_AGIPD_INST_SRC,
+        ctrl_src=CTRL_SRC,
+    )
+    with pytest.raises(ValueError):
+        adaptive_fixed_ctrls.sort_dark_runs()
diff --git a/tests/test_cal_tools.py b/tests/test_cal_tools.py
index 7343c6f5b673514c01de002964668e0f140a0f72..3eb962b0ec1a12c29efd03584b77d5340cd277cf 100644
--- a/tests/test_cal_tools.py
+++ b/tests/test_cal_tools.py
@@ -4,11 +4,11 @@ from unittest.mock import patch
 
 import numpy as np
 import pytest
+import yaml
 import zmq
 from extra_data import open_run
 from iCalibrationDB import Conditions, ConstantMetaData, Constants
 
-from cal_tools.agipdlib import AgipdCorrections, CellRange
 from cal_tools.plotting import show_processed_modules
 from cal_tools.tools import (
     creation_date_file_metadata,
@@ -18,22 +18,23 @@ from cal_tools.tools import (
     get_pdu_from_db,
     map_seq_files,
     module_index_to_qm,
+    recursive_update,
     send_to_db,
+    write_constants_fragment,
+    reorder_axes,
 )
 
 # AGIPD operating conditions.
 ACQ_RATE = 1.1
 BIAS_VOLTAGE = 300
 GAIN_SETTING = 0
-INTEGRATION_TIME = 12
 MEM_CELLS = 352
-PHOTON_ENERGY = 9.2
 
 AGIPD_KARABO_ID = "SPB_DET_AGIPD1M-1"
 WRONG_AGIPD_MODULE = "AGIPD_**"
 
-CAL_DB_INTERFACE = "tcp://max-exfl017:8020"
-WRONG_CAL_DB_INTERFACE = "tcp://max-exfl017:0000"
+CAL_DB_INTERFACE = "tcp://max-exfl-cal002:8020"
+WRONG_CAL_DB_INTERFACE = "tcp://max-exfl-cal002:0000"
 
 PROPOSAL = 900113
 
@@ -402,61 +403,6 @@ def test_get_pdu_from_db(_agipd_const_cond):
                         "CAL_PHYSICAL_DETECTOR_UNIT-2_TEST"]
 
 
-# TODO add a marker for accessing zmq end_point
-@pytest.mark.requires_gpfs
-@pytest.mark.requires_caldb
-def test_initialize_from_db():
-    creation_time = datetime.strptime(
-        "2020-01-07 13:26:48.00", "%Y-%m-%d %H:%M:%S.%f")
-
-    agipd_corr = AgipdCorrections(
-        max_cells=MEM_CELLS,
-        cell_sel=CellRange([0, 500, 1], MEM_CELLS))
-
-    agipd_corr.allocate_constants(
-        modules=[0],
-        constant_shape=(3, MEM_CELLS, 512, 128))
-
-    dark_const_time_dict = agipd_corr.initialize_from_db(
-        karabo_id="TEST_DET_CAL_CI-1",
-        karabo_da="TEST_DET_CAL_DA1",
-        cal_db_interface=CAL_DB_INTERFACE,
-        creation_time=creation_time,
-        memory_cells=MEM_CELLS,
-        bias_voltage=BIAS_VOLTAGE,
-        photon_energy=PHOTON_ENERGY,
-        gain_setting=GAIN_SETTING,
-        acquisition_rate=ACQ_RATE,
-        integration_time=INTEGRATION_TIME,
-        module_idx=0,
-        only_dark=False,
-    )
-
-    assert dark_const_time_dict == {
-        "Offset": None,
-        "Noise": None,
-        "ThresholdsDark": None,
-        "BadPixelsDark": None,
-    }
-
-    dark_const_time_dict = agipd_corr.initialize_from_db(
-        karabo_id=AGIPD_KARABO_ID,
-        karabo_da="AGIPD00",
-        cal_db_interface=CAL_DB_INTERFACE,
-        creation_time=creation_time,
-        memory_cells=MEM_CELLS, bias_voltage=BIAS_VOLTAGE,
-        photon_energy=PHOTON_ENERGY, gain_setting=GAIN_SETTING,
-        integration_time=INTEGRATION_TIME,
-        acquisition_rate=ACQ_RATE, module_idx=0,
-        only_dark=False,
-    )
-
-    # A retrieved constant has a value of datetime creation_time
-    assert isinstance(dark_const_time_dict["Offset"], datetime)
-    assert list(dark_const_time_dict.keys()) == [
-        "Offset", "Noise", "ThresholdsDark", "BadPixelsDark"]
-
-
 def test_module_index_to_qm():
 
     assert module_index_to_qm(0) == 'Q1M1'
@@ -471,3 +417,153 @@ def test_module_index_to_qm():
 
     with pytest.raises(AssertionError):
         module_index_to_qm(7, 5)
+
+
+def test_recursive_update():
+    tgt = {"a": {"b": 1}, "c": 2}
+    src = {"a": {"d": 3}, "e": 4}
+    assert recursive_update(tgt, src) is False
+    assert tgt == {"a": {"b": 1, "d": 3}, "c": 2, "e": 4}
+
+    tgt = {"a": {"b": 1}, "c": 2}
+    src = {"a": {"b": 3}, "e": 4}
+    assert recursive_update(tgt, src) is True
+    assert tgt == {"a": {"b": 1}, "c": 2, "e": 4}
+
+
+def test_write_constants_fragment(tmp_path: Path):
+    """Test `write_constants_fragment` with jungfrau.
+    This metadata is from constants used to correct FXE_XAD_JF1M
+    detector from proposal 900226, run 106.
+
+    tmp_path:
+        tmp_path (pathlib.Path): Temporary directory for file tests.
+        https://docs.pytest.org/en/7.1.x/how-to/tmp_path.html
+    """
+
+    jf_metadata = {
+        "JNGFR01": {
+            "Offset10Hz": {
+                "cc_id": 7064,
+                "cc_name": "jungfrau-Type_Offset10Hz_Jungfrau DefiFE6iJX",
+                "condition_id": 2060,
+                "ccv_id": 41876,
+                "ccv_name": "20200304_152733_sIdx=0",
+                "path": Path("xfel/cal/jungfrau-type/jungfrau_m233/cal.1583335651.8084984.h5"),
+                "dataset": "/Jungfrau_M233/Offset10Hz/0",
+                "begin_validity_at": "2020-03-04T15:16:34.000+01:00",
+                "end_validity_at": None,
+                "raw_data_location": "proposal:p900121 runs:136 137 138",
+                "start_idx": 0,
+                "end_idx": 0,
+                "physical_name": "Jungfrau_M233"},
+            "BadPixelsDark10Hz": {
+                "cc_id": 7066,
+                "cc_name": "jungfrau-Type_BadPixelsDark10Hz_Jungfrau DefiFE6iJX",
+                "condition_id": 2060,
+                "ccv_id": 41878,
+                "ccv_name": "20200304_152740_sIdx=0",
+                "path": Path("xfel/cal/jungfrau-type/jungfrau_m233/cal.1583335658.6813955.h5"),
+                "dataset": "/Jungfrau_M233/BadPixelsDark10Hz/0",
+                "begin_validity_at": "2020-03-04T15:16:34.000+01:00",
+                "end_validity_at": None,
+                "raw_data_location": "proposal:p900121 runs:136 137 138",
+                "start_idx": 0,
+                "end_idx": 0,
+                "physical_name": "Jungfrau_M233"
+                }
+            },
+        "JNGFR02": {
+            "Offset10Hz": {
+                "cc_id": 7067,
+                "cc_name": "jungfrau-Type_Offset10Hz_Jungfrau DefzgIVHz1",
+                "condition_id": 2061,
+                "ccv_id": 41889,
+                "ccv_name": "20200304_154434_sIdx=0",
+                "path": Path("xfel/cal/jungfrau-type/jungfrau_m125/cal.1583336672.760199.h5"),
+                "dataset": "/Jungfrau_M125/Offset10Hz/0",
+                "begin_validity_at": "2020-03-04T15:16:34.000+01:00",
+                "end_validity_at": None,
+                "raw_data_location": "proposal:p900121 runs:136 137 138",
+                "start_idx": 0,
+                "end_idx": 0,
+                "physical_name": "Jungfrau_M125",
+                },
+            "BadPixelsDark10Hz": {
+                "cc_id": 7069,
+                "cc_name": "jungfrau-Type_BadPixelsDark10Hz_Jungfrau DefzgIVHz1",
+                "condition_id": 2061,
+                "ccv_id": 41893,
+                "ccv_name": "20200304_154441_sIdx=0",
+                "path": Path("xfel/cal/jungfrau-type/jungfrau_m125/cal.1583336679.5835564.h5"),
+                "dataset": "/Jungfrau_M125/BadPixelsDark10Hz/0",
+                "begin_validity_at": "2020-03-04T15:16:34.000+01:00",
+                "end_validity_at": None,
+                "raw_data_location": "proposal:p900121 runs:136 137 138",
+                "start_idx": 0,
+                "end_idx": 0,
+                "physical_name": "Jungfrau_M125",
+                }
+            }
+        }
+
+    write_constants_fragment(
+        tmp_path,
+        jf_metadata,
+        Path("/gpfs/exfel/d/cal/caldb_store")
+    )
+    fragments = list(tmp_path.glob("metadata_frag*yml"))
+    assert len(fragments) == 1
+
+    # Open YAML file
+    with open(fragments[0], "r") as file:
+        # Load YAML content into dictionary
+        yaml_dict = yaml.safe_load(file)
+        assert yaml_dict == {
+            "retrieved-constants":{
+                "JNGFR01": {
+                    "constants": {
+                        "BadPixelsDark10Hz": {
+                            "ccv_id": 41878,
+                            "creation-time": "2020-03-04T15:16:34.000+01:00",
+                            "dataset": "/Jungfrau_M233/BadPixelsDark10Hz/0",
+                            "path": "/gpfs/exfel/d/cal/caldb_store/xfel/cal/jungfrau-type/jungfrau_m233/cal.1583335658.6813955.h5",  # noqa
+                        },
+                        "Offset10Hz": {
+                            "ccv_id": 41876,
+                            "creation-time": "2020-03-04T15:16:34.000+01:00",
+                            "dataset": "/Jungfrau_M233/Offset10Hz/0",
+                            "path": "/gpfs/exfel/d/cal/caldb_store/xfel/cal/jungfrau-type/jungfrau_m233/cal.1583335651.8084984.h5",  # noqa
+                        },
+                    },
+                    "physical-name": "Jungfrau_M233",
+                },
+                "JNGFR02": {
+                    "constants": {
+                        "BadPixelsDark10Hz": {
+                            "ccv_id": 41893,
+                            "creation-time": "2020-03-04T15:16:34.000+01:00",
+                            "dataset": "/Jungfrau_M125/BadPixelsDark10Hz/0",
+                            "path": "/gpfs/exfel/d/cal/caldb_store/xfel/cal/jungfrau-type/jungfrau_m125/cal.1583336679.5835564.h5",  # noqa
+                        },
+                        "Offset10Hz": {
+                            "ccv_id": 41889,
+                            "creation-time": "2020-03-04T15:16:34.000+01:00",
+                            "dataset": "/Jungfrau_M125/Offset10Hz/0",
+                            "path": "/gpfs/exfel/d/cal/caldb_store/xfel/cal/jungfrau-type/jungfrau_m125/cal.1583336672.760199.h5",  # noqa
+                        },
+                    },
+                    "physical-name": "Jungfrau_M125",
+                },
+            }
+        }
+
+
+def test_reorder_axes():
+    a = np.zeros((10, 32, 256, 3))
+    from_order = ('cells', 'slow_scan', 'fast_scan', 'gain')
+    to_order = ('slow_scan', 'fast_scan', 'cells', 'gain')
+    assert reorder_axes(a, from_order, to_order).shape == (32, 256, 10, 3)
+
+    to_order = ('gain', 'fast_scan', 'slow_scan', 'cells')
+    assert reorder_axes(a, from_order, to_order).shape == (3, 256, 32, 10)
diff --git a/tests/test_jungfraulib.py b/tests/test_jungfraulib.py
index eb7580ad72beaf422871516340648c754b59eaa1..00a3ae5b9f2e4761360733d95f62e308da08016e 100644
--- a/tests/test_jungfraulib.py
+++ b/tests/test_jungfraulib.py
@@ -1,7 +1,7 @@
 import pytest
 from extra_data import RunDirectory
 
-from cal_tools.jungfraulib import JungfrauCtrl
+from cal_tools.jungfrau.jungfraulib import JungfrauCtrl, sort_runs_by_gain
 
 # TODO: replace with mocked RAW data as in tests/test_agipdlib.py
 JF = JungfrauCtrl(
@@ -45,3 +45,22 @@ def test_get_gain_setting(settings, result):
 def test_get_gain_mode(mode, result):
     JF.run_mode = mode
     assert JF.get_gain_mode() == result
+
+@pytest.mark.parametrize(
+    'original_runs,sorted_runs',
+    [
+        ([9035, 9036, 9037], [9035, 9036, 9037]),
+        ([9035, 9037, 9036], [9035, 9036, 9037]),
+        ([9033, 9032, 9031], [9031, 9032, 9033]),
+        ([9033, 9031, 9032], [9031, 9032, 9033]),
+    ]
+)
+# TODO: missing fixed gain dark runs for JUNGFRAU from test proposal.
+# TODO: missing fixed and adaptive runs after the JF control updated.
+def test_sort_runs_by_gain(original_runs, sorted_runs):
+    raw_folder = "/gpfs/exfel/exp/CALLAB/202130/p900203/raw"
+    validated_runs = sort_runs_by_gain(
+        raw_folder=raw_folder,
+        runs=original_runs,
+        ctrl_src="FXE_XAD_JF1M/DET/CONTROL")
+    assert validated_runs == sorted_runs
diff --git a/tests/test_reference_runs/__init__.py b/tests/test_reference_runs/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/tests/test_reference_runs/callab_tests.py b/tests/test_reference_runs/callab_tests.py
new file mode 100644
index 0000000000000000000000000000000000000000..b566e8598b2d492f124829e5b24100cd6b7e8181
--- /dev/null
+++ b/tests/test_reference_runs/callab_tests.py
@@ -0,0 +1,684 @@
+automated_test_config = {
+    "SPB_DET_AGIPD1M-1-CORRECT-FIXED": {
+        "det_type": "AGIPD",
+        "cal_type": "CORRECT",
+        "config": {
+            "blc-stripes": True,
+            "out-folder": "{}/{}/{}",
+            # "/gpfs/exfel/exp/SPB/202131/p900215/raw"
+            "in-folder": "/gpfs/exfel/exp/CALLAB/202130/p900203/raw",
+            "run": "9014",  # Original run: "95"
+            "karabo-id-control": "SPB_IRU_AGIPD1M1",
+            "karabo-id": "SPB_DET_AGIPD1M-1",
+            "slurm-mem": "750",
+            "sequences": "0",
+            "rel-gain": True,
+            "xray-gain": True,
+            "n-cores-files": 2,
+            "ctrl-source-template": "{}/MDL/FPGA_COMP",
+        },
+        "reference-folder": "{}/{}/{}",
+    },
+    "SPB_DET_AGIPD1M-1-DARK-FIXED": {
+        "det_type": "AGIPD",
+        "cal_type": "DARK",
+        "config": {
+            "out-folder": "{}/{}/{}",
+            # "/gpfs/exfel/exp/SPB/202131/p900215/raw"
+            "in-folder": "/gpfs/exfel/exp/CALLAB/202130/p900203/raw",
+            # Unsorted dark runs
+            "run-high": "9013",  # Original run "93"
+            "run-med": "9012",  # Original run: "92"
+            "run-low": "9011",  # Original run: "91"
+            "karabo-id-control": "SPB_IRU_AGIPD1M1",
+            "karabo-id": "SPB_DET_AGIPD1M-1",
+            "ctrl-source-template": "{}/MDL/FPGA_COMP",
+        },
+        "reference-folder": "{}/{}/{}",
+    },
+    "SPB_DET_AGIPD1M-1-CORRECT-ADAPTIVE": {
+        "det_type": "AGIPD",
+        "cal_type": "CORRECT",
+        "config": {
+            "blc-stripes": True,
+            "rel-gain": True,
+            "xray-gain": True,
+            "out-folder": "{}/{}/{}",
+            # "/gpfs/exfel/exp/SPB/202131/p900215/raw"
+            "in-folder": "/gpfs/exfel/exp/CALLAB/202130/p900203/raw",
+            "run": "9018",  # Original run 262
+            "karabo-id-control": "SPB_IRU_AGIPD1M1",
+            "karabo-id": "SPB_DET_AGIPD1M-1",
+            "slurm-mem": "750",
+            "sequences": "0",
+            "n-cores-files": 2,
+            "ctrl-source-template": "{}/MDL/FPGA_COMP",
+        },
+        "reference-folder": "{}/{}/{}",
+    },
+    "SPB_DET_AGIPD1M-1-DARK-ADAPTIVE": {
+        "det_type": "AGIPD",
+        "cal_type": "DARK",
+        "config": {
+            "out-folder": "{}/{}/{}",
+            # "/gpfs/exfel/exp/SPB/202131/p900215/raw"
+            "in-folder": "/gpfs/exfel/exp/CALLAB/202130/p900203/raw",
+            "run-high": "9015",  # Original run: "216",
+            "run-med": "9016",  # Original run: "217",
+            "run-low": "9017",  # Original run: "218",
+            "karabo-id-control": "SPB_IRU_AGIPD1M1",
+            "karabo-id": "SPB_DET_AGIPD1M-1",
+            "ctrl-source-template": "{}/MDL/FPGA_COMP",
+        },
+        "reference-folder": "{}/{}/{}",
+    },
+    "MID_DET_AGIPD1M-1-CORRECT-ADAPTIVE": {
+        "det_type": "AGIPD",
+        "cal_type": "CORRECT",
+        "config": {
+            "blc-stripes": True,
+            "rel-gain": True,
+            "xray-gain": True,
+            "out-folder": "{}/{}/{}",
+            # "/gpfs/exfel/exp/MID/202121/p002929/raw"
+            "in-folder": "/gpfs/exfel/exp/CALLAB/202130/p900203/raw",
+            "run": "9022", # Original run: "21",
+            "karabo-id-control": "MID_EXP_AGIPD1M1",
+            "karabo-id": "MID_DET_AGIPD1M-1",
+            "slurm-mem": "750",
+            "sequences": "0,1",
+            "n-cores-files": 2,
+            "ctrl-source-template": "{}/MDL/FPGA_COMP",
+        },
+        "reference-folder": "{}/{}/{}",
+    },
+    "MID_DET_AGIPD1M-1-CORRECT-SELECT_CELLS_CM": {
+        "det_type": "AGIPD",
+        "cal_type": "CORRECT",
+        "config": {
+            "blc-stripes": True,
+            "rel-gain": True,
+            "xray-gain": True,
+            "out-folder": "{}/{}/{}",
+            # "/gpfs/exfel/exp/MID/202121/p002929/raw"
+            "in-folder": "/gpfs/exfel/exp/CALLAB/202130/p900203/raw",
+            "run": "9022", # Original run: "21",
+            "max-pulses": [0, 160, 2],
+            "common-mode": True,
+            "karabo-id-control": "MID_EXP_AGIPD1M1",
+            "karabo-id": "MID_DET_AGIPD1M-1",
+            "slurm-mem": "750",
+            "sequences": "0,1",
+            "ctrl-source-template": "{}/MDL/FPGA_COMP",
+            "n-cores-files": 2,
+            "sequences-per-node": 1,
+        },
+        "reference-folder": "{}/{}/{}",
+    },
+    "MID_DET_AGIPD1M-1-CORRECT-SELECT_CELLS_NOCM": {
+        "det_type": "AGIPD",
+        "cal_type": "CORRECT",
+        "config": {
+            "blc-stripes": True,
+            "rel-gain": True,
+            "xray-gain": True,
+            "out-folder": "{}/{}/{}",
+            # "/gpfs/exfel/exp/MID/202121/p002929/raw"
+            "in-folder": "/gpfs/exfel/exp/CALLAB/202130/p900203/raw",
+            "run": "9022", # Original run: "21",
+            "max-pulses": [0, 160, 2],
+            "no-common-mode": True,
+            "karabo-id-control": "MID_EXP_AGIPD1M1",
+            "karabo-id": "MID_DET_AGIPD1M-1",
+            "slurm-mem": "750",
+            "sequences": "0,1",
+            "n-cores-files": 2,
+            "ctrl-source-template": "{}/MDL/FPGA_COMP",
+        },
+        "reference-folder": "{}/{}/{}",
+    },
+    "MID_DET_AGIPD1M-1-DARK-ADAPTIVE": {
+        "det_type": "AGIPD",
+        "cal_type": "DARK",
+        "config": {
+            "out-folder": "{}/{}/{}",
+            # "/gpfs/exfel/exp/MID/202121/p002929/raw"
+            "in-folder": "/gpfs/exfel/exp/CALLAB/202130/p900203/raw",
+            "run-high": "9019",  # Original run: "4"
+            "run-med": "9020",  # Original run: "5"
+            "run-low": "9021",  # Original run: "6"
+            "karabo-id-control": "MID_EXP_AGIPD1M1",
+            "karabo-id": "MID_DET_AGIPD1M-1",
+            "ctrl-source-template": "{}/MDL/FPGA_COMP",
+        },
+        "reference-folder": "{}/{}/{}",
+    },
+    "HED_DET_AGIPD500K2G-CORRECT-ADAPTIVE": {
+        "det_type": "AGIPD",
+        "cal_type": "CORRECT",
+        "config": {
+            "blc-stripes": True,
+            "rel-gain": True,
+            "xray-gain": True,
+            "out-folder": "{}/{}/{}",
+            # "/gpfs/exfel/exp/HED/202131/p900228/raw"
+            "in-folder": "/gpfs/exfel/exp/CALLAB/202130/p900203/raw",
+            "run": "9026", # Original run: "29",
+            "karabo-id-control": "HED_EXP_AGIPD500K2G",
+            "karabo-id": "HED_DET_AGIPD500K2G",
+            "slurm-mem": "750",
+            "n-cores-files": 2,
+            "sequences": "0,1",
+            "ctrl-source-template": "{}/MDL/FPGA_COMP",
+        },
+        "reference-folder": "{}/{}/{}",
+    },
+    "HED_DET_AGIPD500K2G-DARK-ADAPTIVE": {
+        "det_type": "AGIPD",
+        "cal_type": "DARK",
+        "config": {
+            "out-folder": "{}/{}/{}",
+            # "/gpfs/exfel/exp/HED/202131/p900228/raw"
+            "in-folder": "/gpfs/exfel/exp/CALLAB/202130/p900203/raw",
+            # Unsorted dark runs
+            "run-high": "9023", # Original run: "25",
+            "run-med": "9025",  # Original run: "27",
+            "run-low": "9024",  # Original run: "26",
+            "karabo-id-control": "HED_EXP_AGIPD500K2G",
+            "karabo-id": "HED_DET_AGIPD500K2G",
+            "ctrl-source-template": "{}/MDL/FPGA_COMP",
+        },
+        "reference-folder": "{}/{}/{}",
+    },
+    "FXE_XAD_JF500K-DARK-SINGLE": {
+        "det_type": "JUNGFRAU",
+        "cal_type": "DARK",
+        "config": {
+            "out-folder": "{}/{}/{}",
+            # "/gpfs/exfel/exp/FXE/202101/p002478/raw/"
+            "in-folder": "/gpfs/exfel/exp/CALLAB/202130/p900203/raw",
+            "run-high": "9031",  # Original run: "47"
+            "run-med": "9032",  # Original run: "48"
+            "run-low": "9033",  # Original run: "49"
+            "karabo-id-control": "",
+            "karabo-id": "FXE_XAD_JF500K",
+            "karabo-da": "JNGFR03",
+        },
+        "reference-folder": "{}/{}/{}",
+    },
+    "FXE_XAD_JF500K-CORRECT-SINGLE": {
+        "det_type": "JUNGFRAU",
+        "cal_type": "CORRECT",
+        "config": {
+            "out-folder": "{}/{}/{}",
+            # "/gpfs/exfel/exp/FXE/202101/p002478/raw/"
+            "in-folder": "/gpfs/exfel/exp/CALLAB/202130/p900203/raw",
+            "run": "9030",  # Original from: 52
+            "sequences": "0,50",
+            "karabo-id-control": "",
+            "karabo-id": "FXE_XAD_JF500K",
+            "karabo-da": "JNGFR03",
+        },
+        "reference-folder": "{}/{}/{}",
+    },
+    "FXE_XAD_JF1M-DARK-BURST": {
+        "det_type": "JUNGFRAU",
+        "cal_type": "DARK",
+        "config": {
+            "out-folder": "{}/{}/{}",
+            # "/gpfs/exfel/exp/FXE/202131/p900226/raw/"
+            "in-folder": "/gpfs/exfel/exp/CALLAB/202130/p900203/raw",
+            "run-high": "9035",  # Original run: "108"
+            "run-med": "9036",  # Original run: "109"
+            "run-low": "9037",  # Original run: "110"
+            "karabo-id-control": "",
+            "karabo-id": "FXE_XAD_JF1M",
+            "karabo-da": ["JNGFR01", "JNGFR02"],
+        },
+        "reference-folder": "{}/{}/{}",
+    },
+    "FXE_XAD_JF1M-CORRECT-BURST": {
+        "det_type": "JUNGFRAU",
+        "cal_type": "CORRECT",
+        "config": {
+            "out-folder": "{}/{}/{}",
+            "in-folder": "/gpfs/exfel/exp/CALLAB/202130/p900203/raw",
+            # "/gpfs/exfel/exp/FXE/202131/p900226/raw"
+            "run": "9034", # Run cloned from run 106.
+            "sequences": "0,1",
+            "karabo-id-control": "",
+            "karabo-id": "FXE_XAD_JF1M",
+            "gain-mode": 0,
+            "karabo-da": ["JNGFR01", "JNGFR02"],
+        },
+        "reference-folder": "{}/{}/{}",
+    },
+#     "FXE_XAD_JF1M-DARK-BURST_LONGRUN": {
+#         "det_type": "JUNGFRAU",
+#         "cal_type": "DARK",
+#         "config": {
+#             "out-folder": "{}/{}/{}",
+#             "in-folder": "/gpfs/exfel/exp/FXE/202321/p004576/raw",
+#             "run-high": "112",
+#             "run-med": "113",
+#             "run-low": "118",
+#             "karabo-id-control": "",
+#             "karabo-id": "FXE_XAD_JF1M",
+#             "karabo-da": ["JNGFR01", "JNGFR02"],
+#         },
+#         "reference-folder": "{}/{}/{}",
+#     },
+    "FXE_XAD_JF1M-CORRECT-SINGLE": {
+        "det_type": "JUNGFRAU",
+        "cal_type": "CORRECT",
+        "config": {
+            "out-folder": "{}/{}/{}",
+            # "/gpfs/exfel/exp/FXE/202101/p002478/raw/"
+            "in-folder": "/gpfs/exfel/exp/CALLAB/202130/p900203/raw",
+            "run": "9030",  # Original from: 52
+            "sequences": "1,30,49",
+            "karabo-id-control": "",
+            "karabo-id": "FXE_XAD_JF1M",
+            "karabo-da": ["JNGFR01", "JNGFR02"],
+        },
+        "reference-folder": "{}/{}/{}",
+    },
+    "FXE_XAD_JF1M-DARK-SINGLE": {
+        "det_type": "JUNGFRAU",
+        "cal_type": "DARK",
+        "config": {
+            "out-folder": "{}/{}/{}",
+            # "/gpfs/exfel/exp/FXE/202101/p002478/raw/"
+            "in-folder": "/gpfs/exfel/exp/CALLAB/202130/p900203/raw",
+            "run-high": "9031",  # Original run: "47"
+            "run-med": "9032",  # Original run: "48"
+            "run-low": "9033",  # Original run: "49"
+            "karabo-id-control": "",
+            "karabo-id": "FXE_XAD_JF1M",
+            "karabo-da": ["JNGFR01", "JNGFR02"],
+        },
+        "reference-folder": "{}/{}/{}",
+    },
+    "SPB_IRDA_JF4M-CORRECT-SINGLE": {
+        "det_type": "JUNGFRAU",
+        "cal_type": "CORRECT",
+        "config": {
+            "out-folder": "{}/{}/{}",
+            "in-folder": "/gpfs/exfel/exp/CALLAB/202130/p900203/raw",
+            "run": "9007",
+            "karabo-id-control": "",
+            "karabo-id": "SPB_IRDA_JF4M",
+            "karabo-da": [
+                "JNGFR01",
+                "JNGFR02",
+                "JNGFR03",
+                "JNGFR04",
+                "JNGFR05",
+                "JNGFR06",
+                "JNGFR07",
+                "JNGFR08",
+            ],
+        },
+        "reference-folder": "{}/{}/{}",
+    },
+    "SPB_IRDA_JF4M-DARK-ADAPTIVE": {
+        "det_type": "JUNGFRAU",
+        "cal_type": "DARK",
+        "config": {
+            "out-folder": "{}/{}/{}",
+            # /gpfs/exfel/exp/SPB/202130/p900204/raw
+            "in-folder": "/gpfs/exfel/exp/CALLAB/202130/p900203/raw",
+            "run-high": "9004",  # 88
+            "run-med": "9005",  # 89
+            "run-low": "9006",  # 90
+            "karabo-id-control": "",
+            "karabo-id": "SPB_IRDA_JF4M",
+            "karabo-da": [
+                "JNGFR01",
+                "JNGFR02",
+                "JNGFR03",
+                "JNGFR04",
+                "JNGFR05",
+                "JNGFR06",
+                "JNGFR07",
+                "JNGFR08",
+            ],
+        },
+        "reference-folder": "{}/{}/{}",
+    },
+    # MISSING FROM REFERENCE RUNS.
+    # "SPB_IRDA_JF4M-DARK-FIXED":
+    # {
+    #     "det_type": "JUNGFRAU",
+    #     "cal_type": "DARK",
+    #     "config":
+    #         {
+    #         "out-folder": "{}/{}/{}",
+    #         "in-folder": "/gpfs/exfel/exp/SPB/202202/p003051/raw",
+    #         "run-high": "166",
+    #         "run-med": "168",
+    #         "run-low": "169",
+    #         "karabo-id-control": "",
+    #         "karabo-id": "SPB_IRDA_JF4M",
+    #         "karabo-da": [
+    #             "JNGFR01", "JNGFR02", "JNGFR03", "JNGFR04",
+    #             "JNGFR05", "JNGFR06", "JNGFR07", "JNGFR08"
+    #         ],
+    #         },
+    #     "reference-folder": "{}/{}/{}",
+    # },
+    "HED_IA1_JF500K1-DARK-ADAPTIVE": {
+        "det_type": "JUNGFRAU",
+        "cal_type": "DARK",
+        "config": {
+            "out-folder": "{}/{}/{}",
+            # "/gpfs/exfel/exp/HED/202102/p002656/raw"
+            "in-folder": "/gpfs/exfel/exp/CALLAB/202130/p900203/raw",
+            "run-high": "9039",  # Original run: "219",
+            "run-med": "9040",  # Original run: "220",
+            "run-low": "9041",  # Original run: "221",
+            "karabo-id": "HED_IA1_JF500K1",
+            "karabo-da": "JNGFR01",
+        },
+        "reference-folder": "{}/{}/{}",
+    },
+    "HED_IA1_JF500K2-DARK-ADAPTIVE": {
+        "det_type": "JUNGFRAU",
+        "cal_type": "DARK",
+        "config": {
+            "out-folder": "{}/{}/{}",
+            # "/gpfs/exfel/exp/HED/202102/p002656/raw"
+            "in-folder": "/gpfs/exfel/exp/CALLAB/202130/p900203/raw",
+            # The 3 runs are arranged in a wrong way on purpose.
+            "run-high": "9040",  # Original run: "219",
+            "run-med": "9041",  # Original run: "220",
+            "run-low": "9039",  # Original run: "221",
+            "karabo-id": "HED_IA1_JF500K2",
+            "karabo-da": "JNGFR02",
+        },
+        "reference-folder": "{}/{}/{}",
+    },
+    "HED_IA1_JF500K3-DARK-ADAPTIVE": {
+        "det_type": "JUNGFRAU",
+        "cal_type": "DARK",
+        "config": {
+            "out-folder": "{}/{}/{}",
+            # "/gpfs/exfel/exp/HED/202102/p002656/raw"
+            "in-folder": "/gpfs/exfel/exp/CALLAB/202130/p900203/raw",
+            "run-high": "9039",  # Original run: "219",
+            "run-med": "9040",  # Original run: "220",
+            "run-low": "9041",  # Original run: "221",
+            "karabo-id": "HED_IA1_JF500K3",
+            "karabo-da": "JNGFR03",
+        },
+        "reference-folder": "{}/{}/{}",
+    },
+#    "HED_IA1_JF500K1-DARK-FIXED":
+#     {
+#         "det_type": "JUNGFRAU",
+#         "cal_type": "DARK",
+#         "config":
+#             {
+#             "out-folder": "{}/{}/{}",
+#             "in-folder": "/gpfs/exfel/exp/HED/202131/p900227/raw",
+#             "run-high": "65",
+#             "run-med": "66",
+#             "run-low": "67",
+#             "karabo-id": "HED_IA1_JF500K1",
+#             "karabo-da": "JNGFR01",
+#             },
+#         "reference-folder": "{}/{}/{}",
+#     },
+    "HED_IA1_JF500K1-CORRECT-ADAPTIVE": {
+        "det_type": "JUNGFRAU",
+        "cal_type": "CORRECT",
+        "config": {
+            "out-folder": "{}/{}/{}",
+            # "/gpfs/exfel/exp/HED/202102/p002656/raw"
+            "in-folder": "/gpfs/exfel/exp/CALLAB/202130/p900203/raw",
+            "run": "9038",  # Original run: "230"
+            "sequences": "0,2,4",
+            "karabo-id": "HED_IA1_JF500K1",
+            "karabo-da": "JNGFR01",
+        },
+        "reference-folder": "{}/{}/{}",
+    },
+    "HED_IA1_JF500K2-CORRECT-ADAPTIVE": {
+        "det_type": "JUNGFRAU",
+        "cal_type": "CORRECT",
+        "config": {
+            "out-folder": "{}/{}/{}",
+            # "/gpfs/exfel/exp/HED/202102/p002656/raw"
+            "in-folder": "/gpfs/exfel/exp/CALLAB/202130/p900203/raw",
+            "run": "9038",  # Original run: "230"
+            "sequences": "0",
+            "karabo-id": "HED_IA1_JF500K2",
+            "karabo-da": "JNGFR02",
+        },
+        "reference-folder": "{}/{}/{}",
+    },
+    "HED_IA1_JF500K3-CORRECT-ADAPTIVE": {
+        "det_type": "JUNGFRAU",
+        "cal_type": "CORRECT",
+        "config": {
+            "out-folder": "{}/{}/{}",
+            # "/gpfs/exfel/exp/HED/202102/p002656/raw"
+            "in-folder": "/gpfs/exfel/exp/CALLAB/202130/p900203/raw",
+            "run": "9038",  # Original run: "230"
+            "sequences": "1,3",
+            "karabo-id": "HED_IA1_JF500K3",
+            "karabo-da": "JNGFR03",
+        },
+        "reference-folder": "{}/{}/{}",
+    },
+    "HED_IA1_EPX100-1-DARK": {
+        "det_type": "EPIX100",
+        "cal_type": "DARK",
+        "config": {
+            "out-folder": "{}/{}/{}",
+            # "/gpfs/exfel/exp/HED/202102/p002656/raw"
+            "in-folder": "/gpfs/exfel/exp/CALLAB/202130/p900203/raw",
+            "run": "9047",  # Original run: "219",
+            "karabo-id": "HED_IA1_EPX100-1",
+            "karabo-da": "EPIX01",
+        },
+        "reference-folder": "{}/{}/{}",
+    },
+    "MID_EXP_EPIX-1-CORRECT": {
+        "det_type": "EPIX100",
+        "cal_type": "CORRECT",
+        "config": {
+            "out-folder": "{}/{}/{}",
+            # Original proposal 2936
+            "in-folder": "/gpfs/exfel/exp/CALLAB/202130/p900203/raw",
+            "run": "9042",  # Original run: "160"
+            "karabo-id": "MID_EXP_EPIX-1",
+            "karabo-da": "EPIX01",
+            "sequences": "1,2,3",
+            "fix-temperature": 290,
+        },
+        "reference-folder": "{}/{}/{}",
+    },
+    "HED_IA1_EPX100-1-CORRECT": {
+        "det_type": "EPIX100",
+        "cal_type": "CORRECT",
+        "config": {
+            "out-folder": "{}/{}/{}",
+            # "/gpfs/exfel/exp/HED/202102/p002656/raw"
+            "in-folder": "/gpfs/exfel/exp/CALLAB/202130/p900203/raw",
+            "run": "9038",  # Original run: "230"
+            "karabo-id": "HED_IA1_EPX100-1",
+            "karabo-da": "EPIX01",
+            "sequences": "1,2,3",
+            "fix-temperature": 290,
+        },
+        "reference-folder": "{}/{}/{}",
+    },
+    "MID_EXP_EPIX-2-DARK": {
+        "det_type": "EPIX100",
+        "cal_type": "DARK",
+        "config": {
+            "out-folder": "{}/{}/{}",
+            # Original proposal: 2655
+            "in-folder": "/gpfs/exfel/exp/CALLAB/202130/p900203/raw",
+            "run": "9045",  # Original run: 224
+            "karabo-id": "MID_EXP_EPIX-2",
+            "karabo-da": "EPIX02",
+        },
+        "reference-folder": "{}/{}/{}",
+    },
+    "SQS_NQS_PNCCD1MP-CORRECT": {
+        "det_type": "PNCCD",
+        "cal_type": "CORRECT",
+        "config": {
+            "out-folder": "{}/{}/{}",
+            # "/gpfs/exfel/exp/SQS/202031/p900166/raw",
+            "in-folder": "/gpfs/exfel/exp/CALLAB/202130/p900203/raw",
+            "run": "9001",  # Original run: "347",
+            "karabo-id": "SQS_NQS_PNCCD1MP",
+            "karabo-da": "PNCCD01",
+        },
+        "reference-folder": "{}/{}/{}",
+    },
+    "SQS_NQS_PNCCD1MP-DARK": {
+        "det_type": "PNCCD",
+        "cal_type": "DARK",
+        "config": {
+            "out-folder": "{}/{}/{}",
+            # "/gpfs/exfel/exp/SQS/202031/p900166/raw"
+            "in-folder": "/gpfs/exfel/exp/CALLAB/202130/p900203/raw",
+            "run": "9000",  # Original run: "345",
+            "karabo-id": "SQS_NQS_PNCCD1MP",
+            "karabo-da": "PNCCD01",
+        },
+        "reference-folder": "{}/{}/{}",
+    },
+    "FXE_DET_LPD1M-1-DARK": {
+        "det_type": "LPD",
+        "cal_type": "DARK",
+        "config": {
+            "out-folder": "{}/{}/{}",
+            # "/gpfs/exfel/exp/FXE/202131/p900226/raw"
+            "in-folder": "/gpfs/exfel/exp/CALLAB/202130/p900203/raw",
+            "run-high": "9008",  # Original run: "145",
+            "run-med": "9009",  # Original run: "146",
+            "run-low": "9010",  # Original run: "147",
+            "karabo-id": "FXE_DET_LPD1M-1",
+        },
+        "reference-folder": "{}/{}/{}",
+    },
+    "FXE_DET_LPD1M-1-CORRECT": {  # THIS IS A DARK RUN.
+        "det_type": "LPD",
+        "cal_type": "CORRECT",
+        "config": {
+            "out-folder": "{}/{}/{}",
+            # "/gpfs/exfel/exp/FXE/202131/p900226/raw"
+            "in-folder": "/gpfs/exfel/exp/CALLAB/202130/p900203/raw",
+            "run": "9008",  # Original run: "145",
+            "sequences": "0",
+            "karabo-id": "FXE_DET_LPD1M-1",
+            "slurm-mem": "750",
+            "num-workers": 4,
+        },
+        "reference-folder": "{}/{}/{}",
+    },
+    "SCS_DET_DSSC1M-1-DARK": {
+        "det_type": "DSSC",
+        "cal_type": "DARK",
+        "config": {
+            "out-folder": "{}/{}/{}",
+            # "/gpfs/exfel/exp/SCS/202122/p002937/raw"
+            "in-folder": "/gpfs/exfel/exp/CALLAB/202130/p900203/raw",
+            "run": "9028",  # Original run: "1723",
+            "karabo-id": "SCS_DET_DSSC1M-1",
+            "slow-data-path": "SCS_CDIDET_DSSC/FPGA/PPT_Q",
+            "slow-data-aggregators": [1, 2, 3, 4]
+        },
+        "reference-folder": "{}/{}/{}",
+    },
+    "SCS_DET_DSSC1M-1-CORRECT": {
+        "det_type": "DSSC",
+        "cal_type": "CORRECT",
+        "config": {
+            "out-folder": "{}/{}/{}",
+            # "/gpfs/exfel/exp/SCS/202122/p002937/raw"
+            "in-folder": "/gpfs/exfel/exp/CALLAB/202130/p900203/raw",
+            "run": "9028",  # Original run: "1723",
+            "karabo-id": "SCS_DET_DSSC1M-1",
+            "slow-data-path": "SCS_CDIDET_DSSC/FPGA/PPT_Q",
+            "slow-data-aggregators": [1, 2, 3, 4]
+        },
+        "reference-folder": "{}/{}/{}",
+    },
+    "SQS_DET_DSSC1M-1-DARK": {
+        "det_type": "DSSC",
+        "cal_type": "DARK",
+        "config": {
+            "out-folder": "{}/{}/{}",
+            # "/gpfs/exfel/exp/SQS/202131/p900210/raw"
+            "in-folder": "/gpfs/exfel/exp/CALLAB/202130/p900203/raw",
+            "run": "9029",  # Original run: "437",
+            "karabo-id": "SQS_DET_DSSC1M-1",
+            "slow-data-path": "SQS_NQS_DSSC/FPGA/PPT_Q",
+            "slow-data-aggregators":
+                - 1
+                - 2
+                - 3
+                - 4
+        },
+        "reference-folder": "{}/{}/{}",
+    },
+    "SQS_DET_DSSC1M-1-CORRECT": {
+        "det_type": "DSSC",
+        "cal_type": "CORRECT",
+        "config": {
+            "out-folder": "{}/{}/{}",
+            # "/gpfs/exfel/exp/SQS/202131/p900210/raw"
+            "in-folder": "/gpfs/exfel/exp/CALLAB/202130/p900203/raw",
+            "run": "9029",  # Original run: "437",
+            "karabo-id": "SQS_DET_DSSC1M-1",
+            "slow-data-path": "SQS_NQS_DSSC/FPGA/PPT_Q",
+        },
+        "reference-folder": "{}/{}/{}",
+    },
+    # "FXE_XAD_G2XES-DARK": {
+    #     "det_type": "Gotthard2",
+    #     "cal_type": "DARK",
+    #     "config": {
+    #         "out-folder": "{}/{}/{}",
+    #         "in-folder": "/gpfs/exfel/exp/FXE/202231/p900298/raw",
+    #         "karabo-da": "GH201",
+    #         "run-high": "7",
+    #         "run-med": "8",
+    #         "run-low": "9",
+    #         "karabo-id": "FXE_XAD_G2XES",
+    #     },
+    #     "reference-folder": "{}/{}/{}",
+    # },
+    # "FXE_XAD_G2XES-CORRECT": {
+    #     "det_type": "Gotthard2",
+    #     "cal_type": "CORRECT",
+    #     "config": {
+    #         "out-folder": "{}/{}/{}",
+    #         "in-folder": "/gpfs/exfel/exp/FXE/202231/p900298/raw",
+    #         "karabo-da": "GH201",
+    #         "run": "7",
+    #         "karabo-id": "FXE_XAD_G2XES",
+    #         "no-offset-correction": True,
+    #     },
+    #     "reference-folder": "{}/{}/{}",
+    # },
+    # "SPB_50UM_GH2-CORRECT": {
+    #     "det_type": "Gotthard2",
+    #     "cal_type": "CORRECT",
+    #     "config": {
+    #         "out-folder": "{}/{}/{}",
+    #         "in-folder": "/gpfs/exfel/exp/SPB/202321/p004577/raw",
+    #         "karabo-da": "GH200",
+    #         "run": "98",
+    #         "sequences": "0,3,6",
+    #         "karabo-id": "SPB_50UM_GH2",
+    #     },
+    #     "reference-folder": "{}/{}/{}",
+    # },
+}
diff --git a/tests/test_reference_runs/conftest.py b/tests/test_reference_runs/conftest.py
new file mode 100644
index 0000000000000000000000000000000000000000..c2c50991ea1806d3454a79a8169bc1f324cef2ec
--- /dev/null
+++ b/tests/test_reference_runs/conftest.py
@@ -0,0 +1,97 @@
+import pytest
+
+
+def pytest_addoption(parser):
+
+    parser.addoption(
+        "--release-test",
+        action="store_true",
+        default=False,
+        help="Start release test for all supported calibration notebooks",
+    )
+
+    parser.addoption(
+        "--detectors",
+        default=["all"],
+        nargs="+",
+    )
+
+    parser.addoption(
+        "--calibration",
+        type=str.lower,
+        choices=["correct", "dark", "all"],
+        default="all",
+    )
+
+    parser.addoption(
+        "--no-numerical-validation",
+        action="store_true",
+        default=False,
+        help="Skips tests for numerical validation for produced h5files.",
+    )
+
+    parser.addoption(
+        "--validation-only",
+        action="store_true",
+        default=False,
+        help=("Skips running xfel-calibrate CLI and "
+              "apply validation test on numerical data only."),
+    )
+
+    parser.addoption(
+        "--use-slurm",
+        action="store_true",
+        default=False,
+        help=("Run xfel-calibrate CLI without "
+              "--no-cluster-job option and use Slurm"),
+    )
+
+    parser.addoption(
+        "--picked-test",
+        type=str,
+        default=None,
+    )
+
+    parser.addoption(
+        "--out-folder",
+        type=str,
+    )
+
+    parser.addoption(
+        "--reference-folder",
+        type=str,
+    )
+
+
+@pytest.fixture
+def release_test_config(request):
+    detectors = request.config.getoption("--detectors")
+    calibration = request.config.getoption("--calibration")
+    skip_numerical_validation = request.config.getoption(
+        "--no-numerical-validation")
+    validate_only = request.config.getoption(
+        "--validation-only")
+    use_slurm = request.config.getoption(
+        "--use-slurm")
+    picked_test = request.config.getoption("--picked-test")
+    reference_folder = request.config.getoption("--reference-folder")
+    out_folder = request.config.getoption("--out-folder")
+    return (
+        detectors, calibration, picked_test,
+        skip_numerical_validation, validate_only,
+        use_slurm, reference_folder, out_folder,
+    )
+
+
+def pytest_configure(config):
+    config.addinivalue_line(
+        "markers",
+        "manual_run(): marks skips for tests that required to be run manually",
+    )
+
+
+def pytest_runtest_setup(item):
+    if list(item.iter_markers(name="manual_run")) and not item.config.getoption(
+        "--release-test"
+    ):
+        pytest.skip("Test initialized manually")
diff --git a/tests/test_reference_runs/test_pre_deployment.py b/tests/test_reference_runs/test_pre_deployment.py
new file mode 100644
index 0000000000000000000000000000000000000000..cdd8a0f015baca42d15d9f3aa123ff992786fbdb
--- /dev/null
+++ b/tests/test_reference_runs/test_pre_deployment.py
@@ -0,0 +1,367 @@
+import io
+import logging
+import multiprocessing
+import pathlib
+import time
+from contextlib import redirect_stdout
+from dataclasses import dataclass
+from datetime import datetime
+from functools import partial
+from subprocess import PIPE, run
+from typing import Any, Dict, List, Tuple
+
+import h5py
+import numpy as np
+import pytest
+
+import xfel_calibrate.calibrate as calibrate
+
+from .callab_tests import automated_test_config
+
+LOGGER = logging.getLogger(__name__)
+
+
+@dataclass
+class ComparisonResult:
+    filename: str
+    new_dsets: list
+    missing_dsets: list
+    changed_dsets: list
+
+    def found_differences(self):
+        return bool(self.new_dsets or self.missing_dsets or self.changed_dsets)
+
+    def show(self):
+        if not self.found_differences():
+            print(f"{self.filename} - ✓ no changes")
+            return
+
+        print(self.filename)
+        for ds in self.new_dsets:
+            print(f"  + NEW: {ds}")
+        for ds in self.missing_dsets:
+            print(f"  - MISSING: {ds}")
+        for ds, detail in self.changed_dsets:
+            print(f"  ~ CHANGED: {ds} ({detail})")
+
+
+def gather_dsets(f: h5py.File):
+    res = set()
+
+    def visitor(name, obj):
+        if isinstance(obj, h5py.Dataset):
+            res.add(name)
+
+    f.visititems(visitor)
+    return res
+
+
+def iter_sized_chunks(ds: h5py.Dataset, chunk_size: int):
+    """Make slices of the dataset along the first axis
+
+    Aims for block_size bytes per block"""
+    if ds.ndim == 0:  # Scalar
+        yield ()
+        return
+
+    chunk_l = min(chunk_size // (ds.dtype.itemsize * np.prod(ds.shape[1:])), 1)
+    for start in range(0, ds.shape[0], chunk_l):
+        yield slice(start, start + chunk_l)
+
+
+def validate_file(
+    ref_folder: pathlib.PosixPath,
+    out_folder: pathlib.PosixPath,
+    exclude_dsets: set,
+    test_file: str,
+) -> ComparisonResult:
+    ref_file = ref_folder / test_file
+    out_file = out_folder / test_file
+    with h5py.File(ref_file) as fref, h5py.File(out_file) as fout:
+        ref_dsets = gather_dsets(fref)
+        out_dsets = gather_dsets(fout)
+        changed = []
+        for dsname in sorted((ref_dsets & out_dsets) - exclude_dsets):
+            ref_ds = fref[dsname]
+            out_ds = fout[dsname]
+            if out_ds.shape != ref_ds.shape:
+                changed.append((
+                    dsname, f"Shape: {ref_ds.shape} -> {out_ds.shape}"
+                ))
+            elif out_ds.dtype != ref_ds.dtype:
+                changed.append((
+                    dsname, f"Dtype: {ref_ds.dtype} -> {out_ds.dtype}"
+                ))
+            else:
+                floaty = np.issubdtype(ref_ds.dtype, np.floating) \
+                        or np.issubdtype(ref_ds.dtype, np.complexfloating)
+
+                # Compare data incrementally rather than loading it all at once;
+                # read in blocks of ~64 MB (arbitrary limit) along first axis.
+                for chunk_slice in iter_sized_chunks(ref_ds, 64 * 1024 * 1024):
+                    ref_chunk = ref_ds[chunk_slice]
+                    out_chunk = out_ds[chunk_slice]
+                    if floaty:
+                        eq = np.allclose(ref_chunk, out_chunk, equal_nan=True)
+                    else:
+                        eq = np.array_equal(ref_chunk, out_chunk)
+                    if not eq:
+                        # If just 1 entry, show the values
+                        if ref_ds.size == 1:
+                            r, o = np.squeeze(ref_chunk), np.squeeze(out_chunk)
+                            changed.append((dsname, f"Value: {r} -> {o}"))
+                        else:
+                            changed.append((dsname, "Data changed"))
+                        break
+
+    return ComparisonResult(
+        test_file,
+        new_dsets=sorted(out_dsets - ref_dsets),
+        missing_dsets=sorted(ref_dsets - out_dsets),
+        changed_dsets=changed,
+    )
+
+
+def parse_config(
+        cmd: List[str], config: Dict[str, Any], out_folder: str
+) -> List[str]:
+    """Convert a dictionary to a list of arguments.
+
+    Values that are not strings will be cast.
+    Lists will be converted to several strings following their `--key`
+    flag.
+    Booleans will be converted to a `--key` flag, where `key` is the
+    dictionary key.
+    """
+
+    for key, value in config.items():
+        if " " in key or (isinstance(value, str) and " " in value):
+            raise ValueError("Spaces are not allowed", key, value)
+
+        if isinstance(value, list):
+            cmd.append(f"--{key}")
+            cmd += [str(v) for v in value]
+        elif isinstance(value, bool):
+            if value:
+                cmd += ["--{}".format(key)]
+        else:
+            if value in ['""', "''"]:
+                value = ""
+            if key == "out-folder":
+                value = out_folder
+            cmd += ["--{}".format(key), str(value)]
+    return cmd
+
+
+def validate_hdf5_files(
+    out_folder: pathlib.Path,
+    reference_folder: pathlib.Path,
+    cal_type: str,
+):
+    """Apply HDF5 data validation.
+
+    Args:
+        test_key (str): The test name.
+        out_folder (pathlib.Path): The OUT folder for the tested data.
+        reference_folder (pathlib.Path): The Reference folder for
+          the reference data to validate against
+        cal_type (str): The type of calibration processing.
+          e.g. dark or correct.
+    """
+    print("\n--- Compare HDF5 files  ----")
+    print("REF:", reference_folder)
+    print("NEW:", out_folder)
+    ok = True
+
+    result_h5files = {p.name for p in out_folder.glob("*.h5")}
+    ref_h5files = {p.name for p in reference_folder.glob("*.h5")}
+    missing_files = ref_h5files - result_h5files
+    if missing_files:
+        print("Files missing from result (*.h5):", ", ".join(missing_files))
+        ok = False
+    new_files = result_h5files - ref_h5files
+    if new_files:
+        print("New files in result (*.h5):", ", ".join(new_files))
+        ok = False
+
+    files_to_check = sorted(result_h5files & ref_h5files)
+
+    # Hard coded datasets to exclude from numerical validation.
+    # These datasets are know to be updated everytime.
+    if cal_type.lower() == "correct":
+        exclude_attrs = {"METADATA/creationDate", "METADATA/updateDate"}
+    else:
+        exclude_attrs = {"report"}
+
+    _validate_file = partial(
+        validate_file,
+        reference_folder,
+        out_folder,
+        exclude_attrs,
+    )
+    with multiprocessing.Pool(processes=8) as pool:
+        for comparison in pool.imap(_validate_file, files_to_check):
+            comparison.show()
+            if comparison.found_differences():
+                ok = False
+
+    return ok
+
+
+def slurm_watcher(test_key: str, std_out: str):
+    """
+    Watch for submitted slurm jobs and wait for them to finish.
+    After they finish apply first test and check
+    if they were `COMPLETED`, successfully.
+
+    Args:
+        test_key (str): Test name.
+        out_str (str): xfel-calibrate CLU std output.
+    """
+    slurm_watcher = True
+
+    LOGGER.info(f"{test_key} - xfel-calibrate std out: {std_out}")
+
+    for r in std_out.split("\n"):
+        if "Submitted the following SLURM jobs:" in r:
+            _, jobids = r.split(":")
+
+    # Adding a sleep for the slurm jobs initialization
+    time.sleep(len(jobids.split(",")))
+    jobids = jobids.strip()
+    while slurm_watcher:
+        cmd = ["sacct", "-j", jobids, "--format=state"]
+
+        res = run(cmd, stdout=PIPE)
+        states = res.stdout.decode().split("\n")[2:-1]
+
+        if not any(
+            s.strip()
+            in [
+                "COMPLETING",
+                "RUNNING",
+                "CONFIGURING",
+                "PENDING",
+            ]
+            for s in states
+        ):
+            slurm_watcher = False
+        else:
+            time.sleep(2)
+
+    # 1st check that all jobs were COMPLETED without errors.
+    states = res.stdout.decode().split("\n")[2:-1]
+    assert all(
+        s.strip() == "COMPLETED" for s in states
+    ), f"{test_key} failure, calibration jobs were not completed. {jobids}: {states}"  # noqa
+    LOGGER.info(f"{test_key}'s jobs were COMPLETED")
+
+
+@pytest.mark.manual_run
+@pytest.mark.parametrize(
+    "test_key, val_dict",
+    list(automated_test_config.items()),
+    ids=list(automated_test_config.keys()),
+)
+def test_xfel_calibrate(
+        test_key: str, val_dict: dict, release_test_config: Tuple
+):
+    """Test xfel calibrate detectors and calibrations written
+    in the given callab_test YAML file.
+    Args:
+        test_key : Key for the xfel-calibrate test.
+        val_dict: Dictionary of the configurations for the running test.
+        release_test_config: Tuple of booleans to pick or skip tests
+            based on the given boolean configs.
+    """
+
+    (
+        detectors,
+        calibration,
+        picked_test,
+        skip_numerical_validation,
+        only_validate,
+        use_slurm,
+        reference_dir_base,
+        out_dir_base,
+    ) = release_test_config
+
+    cal_type = val_dict["cal_type"]
+    det_type = val_dict["det_type"]
+
+    if not picked_test:
+        # Skip non-selected detectors
+        if detectors != ["all"] and det_type.lower() not in [
+            d.lower() for d in detectors
+        ]:
+            pytest.skip()
+
+        # Skip non-selected calibration
+        if calibration != "all" and cal_type.lower() != calibration:
+            pytest.skip()
+    else:
+        if test_key != picked_test:
+            pytest.skip()
+
+    cmd = ["xfel-calibrate", det_type, cal_type]
+
+    cal_conf = val_dict["config"]
+
+    out_folder = pathlib.Path(cal_conf["out-folder"].format(
+        out_dir_base, cal_conf["karabo-id"], test_key
+    ))
+    reference_folder = pathlib.Path(
+        val_dict["reference-folder"].format(
+            reference_dir_base, cal_conf["karabo-id"], test_key
+        )
+    )
+
+    report_name = out_folder / f"{test_key}_{datetime.now():%y%m%d_%H%M%S}"
+
+    cal_conf["report-to"] = str(report_name)
+
+    cmd = parse_config(cmd, cal_conf, out_folder)
+
+    if only_validate:
+        assert validate_hdf5_files(
+            out_folder,  reference_folder, cal_type
+        ), "HDF5 files changed - see details above"
+        return
+
+    if not use_slurm:  # e.g. for Gitlab CI.
+        cmd += ["--no-cluster-job"]
+
+    cmd += [
+        "--slurm-name",
+        test_key,
+        "--cal-db-interface",
+        "tcp://max-exfl-cal001:8015#8045",
+    ]
+    f = io.StringIO()
+    LOGGER.info(f"Submitting CL: {cmd}")
+    with redirect_stdout(f):
+        errors = calibrate.run(cmd)
+        out_str = f.getvalue()
+
+    if use_slurm:
+        slurm_watcher(test_key, out_str)
+    else:
+        # confirm that all jobs succeeded.
+        assert errors == 0
+
+    time_to_wait = 5
+    time_counter = 0
+    # 2nd check for report availability.
+    report_file = out_folder / f"{report_name}.pdf"
+    while not report_file.exists():
+        time.sleep(1)
+        time_counter += 1
+        if time_counter > time_to_wait:
+            assert False, f"{test_key} failure, report doesn't exists."
+    LOGGER.info("Report found.")
+
+    # Stop tests at this point, if desired.
+    if not skip_numerical_validation:
+        assert validate_hdf5_files(
+            out_folder,  reference_folder, cal_type
+        ), "HDF5 files changed - see details above"
diff --git a/tests/test_update_config.py b/tests/test_update_config.py
index 7a4c79965051b5a31847ca9c02875fd08eaf1252..910af2e70c95f468e6918e0fc8d004359018b80a 100644
--- a/tests/test_update_config.py
+++ b/tests/test_update_config.py
@@ -9,6 +9,7 @@ import yaml
 import zmq
 
 from webservice.update_config import (
+    _find_cycle,
     _add_available_configs_to_arg_parser,
     _create_new_config_from_args_input,
     main,
@@ -40,7 +41,7 @@ def test_main_sys_exit(capsys):
         with pytest.raises(SystemExit):
             main()
         out, _ = capsys.readouterr()
-    assert out == "Need to define all fields\n"
+    assert out == "Need to define all required fields\n"
 
 
 EXPECTED_ZMQ_REQ = [
@@ -70,6 +71,7 @@ def test_main(capsys):
             "--rel-gain", "true",
             "--webservice-address", "inproc://socket",
             "--correct",
+            "--verbose"
         ],
     ):
         with patch("zmq.Context", return_value=context):
@@ -84,13 +86,15 @@ def test_main(capsys):
 
 EXPECTED_CONF = [
     {
+        'common-mode': {'type': bool},
         'force-hg-if-below': {'type': int},
         'rel-gain': {'type': bool},
         'xray-gain': {'type': bool},
         'blc-noise': {'type': bool},
         'blc-set-min': {'type': bool},
-        'dont-zero-nans': {'type': bool},
-        'dont-zero-orange': {'type': bool},
+        'blc-stripes': {'type': bool},
+        'zero-nans': {'type': bool},
+        'zero-orange': {'type': bool},
         'max-pulses': {'type': list,
                        'msg': 'Range list of maximum pulse indices '
                               '(--max-pulses start end step). '
@@ -98,12 +102,14 @@ EXPECTED_CONF = [
         'use-litframe-finder': {'type': str},
         'litframe-device-id': {'type': str},
         'energy-threshold': {'type': int},
+        'no-common-mode': {'type': bool},
         'no-rel-gain': {'type': bool},
         'no-xray-gain': {'type': bool},
         'no-blc-noise': {'type': bool},
         'no-blc-set-min': {'type': bool},
-        'no-dont-zero-nans': {'type': bool},
-        'no-dont-zero-orange': {'type': bool}
+        'no-blc-stripes': {'type': bool},
+        'no-zero-nans': {'type': bool},
+        'no-zero-orange': {'type': bool}
     },
     {
         'karabo-da': {
@@ -126,22 +132,24 @@ args_1 = {
     "cycle": 000000,
     "correct": True,
     "apply": False,
-    "webservice_port": "tcp://max-exfl016:5555",
+    "webservice_port": "tcp://max-exfl-cal001:5555",
     "instrument": None,
     "force_hg_if_below": None,
     "rel_gain": True,
     "xray_gain": None,
     "blc_noise": None,
     "blc_set_min": None,
-    "dont_zero_nans": None,
-    "dont_zero_orange": None,
+    "blc_stripes": None,
+    "zero_nans": None,
+    "zero_orange": None,
     "max_pulses": None,
     "no_rel_gain": None,
     "no_xray_gain": None,
     "no_blc_noise": None,
     "no_blc_set_min": None,
-    "no_dont_zero_nans": None,
-    "no_dont_zero_orange": None,
+    "no_blc_stripes": None,
+    "no_zero_nans": None,
+    "no_zero_orange": None,
     "karabo_da": None,
 }
 
@@ -173,3 +181,19 @@ def test_create_new_config_from_args_input(instrument, args, expected):
         available_conf=EXPECTED_CONF,
     )
     assert new_conf == expected
+
+
+def test_find_cycle(tmp_path):
+    proposal_path = tmp_path / 'CALLAB' / '202301' / 'p002003'
+    proposal_path.mkdir(parents=True, exist_ok=True)
+
+    assert _find_cycle('2003', tmp_path) == '202301'
+    assert _find_cycle('002003', tmp_path) == '202301'
+
+    with pytest.raises(ValueError):
+        # Not existing proposal.
+        _find_cycle('2004', tmp_path)
+
+    with pytest.raises(ValueError):
+        # Not a number.
+        _find_cycle('p2004', tmp_path)
diff --git a/tests/test_xfel_calibrate/conftest.py b/tests/test_xfel_calibrate/conftest.py
index a98c410259385d6dc4a7d837b374331681635996..21161c99edb124c607acca3de6a60c10a4fda88e 100644
--- a/tests/test_xfel_calibrate/conftest.py
+++ b/tests/test_xfel_calibrate/conftest.py
@@ -215,7 +215,9 @@ class CalibrateCall:
         self.in_folder = in_folder
         self.out_folder = out_folder
 
-        self.args = [command, detector, cal_type, '--report-to', str(reports_dir)]
+        self.args = [
+            command, detector, cal_type, '--report-to', str(reports_dir / 'test.pdf')
+        ]
         if in_folder:
             self.args.extend(["--in-folder", str(self.in_folder)])
         if out_folder:
diff --git a/webservice/README.md b/webservice/README.md
index 1bced8a95363b6f49d8c60b6f338fb9723da4f3b..aecce1822ecaf50f7bce16139cc222cbd01ddd3e 100644
--- a/webservice/README.md
+++ b/webservice/README.md
@@ -159,11 +159,11 @@ to display a list of available options.
 Testing
 -------
 
-There is a test environment on ``max-exfl017``, separate from the production
+There is a test environment on ``max-exfl-cal002``, separate from the production
 instance.
 
 ```bash
-ssh xcaltst@max-exfl017.desy.de
+ssh xcaltst@max-exfl-cal002.desy.de
 cd /home/xcaltst/pycalibration
 
 # Get the code you want to test
@@ -189,3 +189,34 @@ status in myMdC should update as the processing occurs.
 
 The command ``squeue -u xcaltst`` will show running & pending Slurm jobs started
 by this test system.
+
+Manually Submitting Jobs
+------------------------
+
+A script `manual_launch.py` is provided to manually submit jobs to the service.
+
+```bash
+usage: manual_launch.py [-h] --proposal PROPOSAL [--delay DELAY] [--noconfirm] [--really] slices [slices ...]
+
+Manually submit calibration jobs.
+
+positional arguments:
+  slices               slices (or single numbers) of runs to process, inclusive range, starting at 1 (e.g. 1:3 parsed to {1, 2, 3}, 10 parsed to {10}, :10
+                       parsed to {1, 2, ..., 10})
+
+optional arguments:
+  -h, --help           show this help message and exit
+  --proposal PROPOSAL  proposal number
+  --delay DELAY        delay in seconds between submissions
+  --noconfirm          skip confirmation
+  --really             actually submit jobs instead of just printing them
+
+To run in the background use `nohup PYTHONUNBUFFERED=1 python manual_launch.py ... &` followed by `disown`.
+```
+
+Slices inclusive, so `1:10` would mean runs 1 to 10 inclusive of 1 and 10. The
+'slice' can also be a single number.
+
+Example of usage would be `python3 ./manual_launch.py 1 10:12 160:-1 --delay 60
+--proposal 2222 --really` to submit runs 1, 10 to 12, and 160+ for calibration,
+for proposal 2222, with a 60 second delay between submissions.
diff --git a/webservice/check_run_status.py b/webservice/check_run_status.py
new file mode 100644
index 0000000000000000000000000000000000000000..c97668fa52002019889e61aadbbcfc4db2e3d9ba
--- /dev/null
+++ b/webservice/check_run_status.py
@@ -0,0 +1,36 @@
+"""Usage: check_run_status.py <proposal> <run>
+
+e.g. check_run_status.py 3279 168
+"""
+
+import sqlite3
+import sys
+
+from config import webservice as config
+
+proposal, run = sys.argv[1:3]
+proposal = proposal.zfill(6)
+run = int(run)
+
+conn = sqlite3.connect(config['web-service']['job-db'])
+
+req_cur = conn.execute("""
+    SELECT req_id, timestamp FROM requests
+    WHERE proposal = ? AND run = ? AND action = 'CORRECT'
+""", (proposal, run))
+
+for req_id, req_time in req_cur:
+    print(f"Request {req_id} at {req_time}")
+    for exec_id, karabo_id in conn.execute(
+        "SELECT exec_id, karabo_id FROM executions WHERE req_id = ?", (req_id,)
+    ):
+        print(f"- {karabo_id}")
+        jobs_by_status = {}
+        for job_id, status in conn.execute(
+            "SELECT job_id, status FROM slurm_jobs WHERE exec_id = ?", (exec_id,)
+        ):
+            jobs_by_status.setdefault(status, []).append(job_id)
+
+        for status, job_ids in sorted(jobs_by_status.items()):
+            print(f"  {status}:", *job_ids)
+    print() 
diff --git a/webservice/config/serve_overview.yaml b/webservice/config/serve_overview.yaml
index 6dfc314c11787f5aae84eb3e97b9a0d9364900d0..2da8f8cbc71c7b0a5328ef7ba2ccce12c366b9c3 100644
--- a/webservice/config/serve_overview.yaml
+++ b/webservice/config/serve_overview.yaml
@@ -21,7 +21,7 @@ run-candidates:
 
 server-config:
     port: 8008
-    host: max-exfl016.desy.de
+    host: max-exfl-cal001.desy.de
     dark-timeout: 30
     n-calib: 10
 
diff --git a/webservice/config/webservice.yaml b/webservice/config/webservice.yaml
index 78b13b61402a3a00770a4b952f9bbcdc385bebfc..2ed4d41f9871a3f5ec44edce2687c3d1a69eaa03 100644
--- a/webservice/config/webservice.yaml
+++ b/webservice/config/webservice.yaml
@@ -52,7 +52,7 @@ correct:
     --slurm-name {action}_{instrument}_{detector}_{cycle}_p{proposal}_{runs}
     --report-to /gpfs/exfel/exp/{instrument}/{cycle}/p{proposal}/usr/Reports/{runs}/{det_instance}_{action}_{proposal}_{runs}_{time_stamp}
     --cal-db-timeout 300000
-    --cal-db-interface tcp://max-exfl016:8015#8044
+    --cal-db-interface tcp://max-exfl-cal001:8015#8044
 
 dark:
   in-folder: /gpfs/exfel/exp/{instrument}/{cycle}/p{proposal}/raw
@@ -67,5 +67,5 @@ dark:
     --request-time {request_time}
     --slurm-name {action}_{instrument}_{detector}_{cycle}_p{proposal}_{runs}
     --report-to /gpfs/exfel/d/cal/caldb_store/xfel/reports/{instrument}/{det_instance}/{action}/{action}_{proposal}_{runs}_{time_stamp}
-    --cal-db-interface tcp://max-exfl016:8015#8044
+    --cal-db-interface tcp://max-exfl-cal001:8015#8044
     --db-output
diff --git a/webservice/job_monitor.py b/webservice/job_monitor.py
index d0662a20351a9cca97797f919142d16dfc2b7ed2..cde702c69c71e2935027490b4073ffd0955e4951 100644
--- a/webservice/job_monitor.py
+++ b/webservice/job_monitor.py
@@ -15,14 +15,23 @@ from kafka.errors import KafkaError
 try:
     from .config import webservice as config
     from .messages import MDC, Errors, MigrationError, Success
-    from .webservice import init_job_db, init_md_client
+    from .webservice import init_job_db, init_md_client, time_db_transaction
 except ImportError:
     from config import webservice as config
     from messages import MDC, Errors, MigrationError, Success
-    from webservice import init_job_db, init_md_client
+    from webservice import init_job_db, init_md_client, time_db_transaction
 
 log = logging.getLogger(__name__)
 
+STATES_FINISHED = {  # https://slurm.schedmd.com/squeue.html#lbAG
+    'BOOT_FAIL',  'CANCELLED', 'COMPLETED',  'DEADLINE', 'FAILED',
+    'OUT_OF_MEMORY', 'SPECIAL_EXIT', 'TIMEOUT',
+}
+STATE_ABBREVS = {
+    'PENDING': 'PD',
+    'RUNNING': 'R',
+}
+
 
 class NoOpProducer:
     """Fills in for Kafka producer object when setting that up fails"""
@@ -50,21 +59,25 @@ def slurm_status(filter_user=True):
     :return: a dictionary indexed by slurm jobid and containing a tuple
              of (status, run time) as values.
     """
-    cmd = ["squeue"]
+    cmd = ["squeue", "--states=all", "--format=%i %T %M"]
     if filter_user:
         cmd += ["--me"]
-    res = run(cmd, stdout=PIPE)
+    res = run(cmd, stdout=PIPE, stderr=PIPE)
     if res.returncode == 0:
         rlines = res.stdout.decode().split("\n")
         statii = {}
         for r in rlines[1:]:
             try:
-                jobid, _, _, _, status, runtime, _, _ = r.split()
+                jobid, status, runtime = r.split()
                 jobid = jobid.strip()
                 statii[jobid] = status, runtime
             except ValueError:  # not enough values to unpack in split
                 pass
         return statii
+    else:
+        log.error("Running squeue failed. stdout: %r, stderr: %r",
+                  res.stdout.decode(), res.stderr.decode())
+        return None
 
 
 def slurm_job_status(jobid):
@@ -148,8 +161,12 @@ class JobsMonitor:
 
         Newly completed executions are present with an empty list.
         """
-        c = self.job_db.cursor()
-        c.execute("SELECT job_id, exec_id FROM slurm_jobs WHERE finished = 0")
+        jobs_to_check = self.job_db.execute(
+            "SELECT job_id, exec_id FROM slurm_jobs WHERE finished = 0"
+        ).fetchall()
+        if not jobs_to_check:
+            log.debug("No unfinished jobs to check")
+            return {}
 
         statii = slurm_status()
         # Check that slurm is giving proper feedback
@@ -158,27 +175,31 @@ class JobsMonitor:
         log.debug(f"SLURM info {statii}")
 
         ongoing_jobs_by_exn = {}
-        for r in c.fetchall():
+        updates = []
+        for r in jobs_to_check:
             log.debug(f"Job in DB before update: %s", tuple(r))
             execn_ongoing_jobs = ongoing_jobs_by_exn.setdefault(r['exec_id'], [])
 
             if str(r['job_id']) in statii:
                 # statii contains jobs which are still going (from squeue)
                 slstatus, runtime = statii[str(r['job_id'])]
-                finished = False
-                execn_ongoing_jobs.append(f"{slstatus}-{runtime}")
-
             else:
                 # These jobs have finished (successfully or otherwise)
                 _, runtime, slstatus = slurm_job_status(r['job_id'])
-                finished = True
 
-            c.execute(
+            finished = slstatus in STATES_FINISHED
+            if not finished:
+                short_state = STATE_ABBREVS.get(slstatus, slstatus)
+                execn_ongoing_jobs.append(f"{short_state}-{runtime}")
+
+            updates.append((finished, runtime, slstatus, r['job_id']))
+
+        with time_db_transaction(self.job_db, 'Update jobs'):
+            self.job_db.executemany(
                 "UPDATE slurm_jobs SET finished=?, elapsed=?, status=? WHERE job_id = ?",
-                (finished, runtime, slstatus, r['job_id'])
+                updates
             )
 
-        self.job_db.commit()
         return ongoing_jobs_by_exn
 
     def process_request_still_going(self, req_id, running_jobs_info):
@@ -216,7 +237,7 @@ class JobsMonitor:
             "WHERE exec_id = ?",
             (exec_id,)
         ).fetchone()
-        with self.job_db:
+        with time_db_transaction(self.job_db, 'Update execution'):
             self.job_db.execute(
                 "UPDATE executions SET success = ? WHERE exec_id = ?",
                 (success, exec_id)
@@ -279,7 +300,12 @@ class JobsMonitor:
         log.debug("Update MDC for %s, %s: %s", r['action'], r['mymdc_id'], msg)
 
         if r['action'] == 'CORRECT':
-            status = 'A' if success else 'E'  # Available/Error
+            if success:
+                status = 'A'  # Available
+            elif set(krb_id_successes.values()) == {0, 1}:
+                status = 'AW' # Available with Warning (failed for some detectors)
+            else:
+                status = 'E'  # Error
             self.mymdc_update_run(r['mymdc_id'], msg, status)
         else:  # r['action'] == 'DARK'
             status = 'F' if success else 'E'  # Finished/Error
diff --git a/webservice/manual_launch.py b/webservice/manual_launch.py
index 4753bbdafc2797cfc259355c92b8c1bf9598acf8..a5a53aa9f93ee8322231de145627923741b607b5 100644
--- a/webservice/manual_launch.py
+++ b/webservice/manual_launch.py
@@ -1,26 +1,294 @@
+from __future__ import annotations
+
+import argparse
+import datetime as dt
+import time
+from contextlib import contextmanager
+from pathlib import Path
+from typing import Generator, Optional
+
 import zmq
+from config import webservice as config
+from httpx import Client, Response
+from rich import print
+from rich.progress import (
+    MofNCompleteColumn,
+    Progress,
+    SpinnerColumn,
+    TextColumn,
+    TimeElapsedColumn,
+)
+from rich.prompt import Confirm
+
+parser = argparse.ArgumentParser(
+    description="Manually submit calibration jobs.",
+    epilog="""To run in the background use `nohup PYTHONUNBUFFERED=1 python
+    manual_launch.py ... &` followed by `disown`.""",
+)
+
+parser.add_argument(
+    "slices",
+    type=str,
+    nargs="+",
+    help="""slices (or single numbers) of runs to process, inclusive range, starting at
+    1 (e.g. 1:3 parsed to {1, 2, 3}, 10 parsed to {10}, :10 parsed to {1, 2, ...,
+    10})""",
+)
+parser.add_argument(
+    "--proposal",
+    type=int,
+    help="proposal number",
+    required=True,
+)
+parser.add_argument(
+    "--delay",
+    default=30,
+    type=int,
+    help="delay in seconds between submissions",
+    required=False,
+)
+parser.add_argument(
+    "--noconfirm",
+    action="store_true",
+    help="skip confirmation",
+)
+parser.add_argument(
+    "--really",
+    action="store_true",
+    help="actually submit jobs instead of just printing them",
+)
+
+
+BEARER = {
+    "access_token": "",
+    "expires_at": dt.datetime.now(),
+}
+
+
+def pre_checks():
+    # Fail fast if we don't have the required configs set
+    required_keys = ["token-url", "user-id", "user-secret", "user-email"]
+    for k in required_keys:
+        if config["metadata-client"][k] is None:
+            print(
+                f"Missing key [bold red]`{k}`[/bold red] in metadata client configuration"
+            )
+            print("[bold red]Aborted[/bold red]")
+            exit(1)
+
+
+def get_bearer_token() -> str:
+    if BEARER["access_token"] and BEARER["expires_at"] > dt.datetime.now():
+        return BEARER["access_token"]
+
+    with Client() as client:
+        data = {
+            "grant_type": "client_credentials",
+            "client_id": config["metadata-client"]["user-id"],
+            "client_secret": config["metadata-client"]["user-secret"],
+        }
+
+        # With a real user the scope must be set to public
+        if not str(config["metadata-client"]["user-email"]).endswith("example.com"):
+            data["scope"] = "public"
+
+        response = client.post(f"{config['metadata-client']['token-url']}", data=data)
+
+    data = response.json()
+
+    if any(k not in data for k in ["access_token", "expires_in"]):
+        print(
+            "Response from MyMdC missing required fields, check webservice `user-id`"
+            f"and `user-secret`. Response: {data=}",
+        )
+        raise ValueError("Invalid response from MyMdC")
+
+    expires_in = dt.timedelta(seconds=data["expires_in"])
+
+    BEARER["access_token"] = data["access_token"]
+    BEARER["expires_at"] = dt.datetime.now() + expires_in
+
+    return BEARER["access_token"]
+
+
+@contextmanager
+def get_client() -> Generator[Client, None, None]:
+    bearer_token = get_bearer_token()
+
+    with Client() as client:
+        headers = {
+            "accept": "application/json; version=1",
+            "X-User-Email": config["metadata-client"]["user-email"],
+            "Authorization": f"Bearer {bearer_token}",
+        }
+
+        client.headers.update(headers)
+
+        yield client
+
+
+def _get_runs_by_proposal(number: int, client: Client, page: int = 1) -> Response:
+    return client.get(
+        f"{config['metadata-client']['base-api-url']}/runs/runs_by_proposal",
+        params={"proposal_number": number, "page": page},
+        timeout=10,
+    )
+
+
+def get_runs_by_proposal_all(number: int) -> list[dict]:
+    with get_client() as client:
+        res = _get_runs_by_proposal(number, client, 1)
+        if res.status_code != 200:
+            raise ValueError(res.url, res.text)
+        runs = res.json()
+        for page in range(2, int(res.headers.get("x-total-pages", 1)) + 1):
+            _ = _get_runs_by_proposal(number, client, page)
+            runs.extend(_.json())
+
+    return runs
+
+
+def main(
+    proposal_no: int,
+    slices: list[slice],
+    delay: int,
+    noconfirm: Optional[bool] = False,
+    really: Optional[bool] = False,
+):
+    with Progress(transient=True) as progress:
+        task_submission = progress.add_task(
+            "[yellow]Querying FS for proposal information", total=None
+        )
+        exp = Path("/gpfs/exfel/exp")
+        proposal_paths = list(exp.glob(f"*/*/p{proposal_no:06d}"))
+        if len(proposal_paths) != 1:
+            raise ValueError(f"Proposal {proposal_no} not found")
+
+        proposal_path = proposal_paths[0]
+        instrument = proposal_path.parts[4]
+        cycle = proposal_path.parts[5]
+
+        progress.update(task_submission, description="[yellow]Querying MyMdC for runs")
+
+        all_runs = get_runs_by_proposal_all(proposal_no)
+
+    run_no_id_map = {run["run_number"]: run["id"] for run in all_runs}
+    max_run_no = max(run_no_id_map.keys())
+    requested_ranges = [range(*s.indices(max_run_no)) for s in slices]
+    requested_run_nos = {run_no for r in requested_ranges for run_no in r}
+
+    requests = dict(
+        sorted(
+            {
+                run_no: run_no_id_map[run_no]
+                for run_no in requested_run_nos
+                if run_no in run_no_id_map
+            }.items()
+        )
+    )
+
+    if missing_run_ids := set(requested_run_nos) - set(run_no_id_map.keys()):
+        print(
+            f"[bold red]Missing run IDs for run number(s) {missing_run_ids}[/bold red]"
+        )
+
+    if not really:
+        print("[yellow]`--really` flag missing, not submitting jobs")
+
+    if not noconfirm and not Confirm.ask(
+        f"Submit [red bold]{len(requests)}[/red bold] jobs for proposal "
+        f"[bold]{proposal_no}[/bold]?",
+        default=False,
+    ):
+        print("[bold red]Aborted[/bold red]")
+        exit(1)
+
+    with Progress(
+        SpinnerColumn(),
+        TextColumn("[progress.description]{task.description}"),
+        MofNCompleteColumn(),
+        TimeElapsedColumn(),
+    ) as progress:
+        description = f"[green]Submitted request for p{proposal_no:05d}/{{run_str}} "
+        task_submission = progress.add_task(
+            f"{description}r---[------]", total=len(requests)
+        )
+        con = zmq.Context()
+        socket = con.socket(zmq.REQ)
+        socket.connect("tcp://max-exfl-cal001:5555")
+
+        if not really:
+            #  Fake socket for testing, just logs what would have been sent via ZMQ
+            socket = lambda: None
+            socket.send = lambda x: progress.console.log(
+                f"mock `zmq.REQ` socket send: {x}"
+            )
+            socket.recv = lambda: "mock `zmq.REQ` socket response"
+
+        last_run_no = list(requests.keys())[-1]
+
+        for run_no, run_id in requests.items():
+            args = (
+                "correct",
+                str(run_id),
+                "_",
+                str(instrument),
+                str(cycle),
+                f"{proposal_no:06d}",
+                str(run_no),
+                "-",
+            )
+            msg = f"""['{"','".join(args)}']""".encode()
+            progress.console.log(args)
+            socket.send(msg)
+
+            progress.update(
+                task_submission,
+                advance=1,
+                description=description.format(
+                    run_str=f"[bold yellow]r{run_no:03d}[{run_id:06d}]"
+                ),
+            )
+
+            res = socket.recv()
+            progress.console.log(res)
+
+            if run_no != last_run_no:
+                progress.console.log(f"sleeping for {delay}s")
+                time.sleep(delay)
+            else:
+                progress.update(task_submission, description="[green]Done")
+
+
+if __name__ == "__main__":
+    args = vars(parser.parse_args())
+
+    slices = []
+    for s in args["slices"]:
+        slice_split = tuple(map(lambda x: int(x) if x else None, s.split(":")))
+        sep = None
+        if len(slice_split) == 1:
+            start, stop = slice_split[0], slice_split[0]
+        elif len(slice_split) == 2:
+            start, stop = slice_split
+        else:
+            start, stop, sep = slice_split
+
+        # Python slice indices are 0-based, but we want to be 1-based
+        if start is None or start == 0:
+            start = 1
+
+        if stop:
+            stop = stop + 1 if stop != -1 else stop
+
+        slices.append(slice(start, stop, sep))
+
+    pre_checks()
 
-con = zmq.Context()
-socket = con.socket(zmq.REQ)
-con = socket.connect("tcp://max-exfl017:5555")
-
-action = 'dark_request'
-dark_run_id = '258'
-sase = 'sase1'
-instrument = 'CALLAB'
-cycle = '202031'
-proposal = '900113'
-detector_id = 'SPB_DET_AGIPD1M-1'
-pdu_physical_names = '["AGIPD00 (Q1M1)"', '"AGIPD01 (Q1M2)"', '"AGIPD02 (Q1M3)"', '"AGIPD03 (Q1M4)"', '"AGIPD04 (Q2M1)"', '"AGIPD05 (Q2M2)"', '"AGIPD06 (Q2M3)"', '"AGIPD07 (Q2M4)"', '"AGIPD08 (Q3M1)"', '"AGIPD09 (Q3M2)"', '"AGIPD10 (Q3M3)"', '"AGIPD11 (Q3M4)"', '"AGIPD12 (Q4M1)"', '"AGIPD13 (Q4M2)"', '"AGIPD14 (Q4M3)"', '"AGIPD15 (Q4M4)"]'  # noqa
-pdu_karabo_das = '["AGIPD00"', ' "AGIPD01"', ' "AGIPD02"', ' "AGIPD03"', ' "AGIPD04"', ' "AGIPD05"', ' "AGIPD06"', ' "AGIPD07"', ' "AGIPD08"', ' "AGIPD09"', ' "AGIPD10"', ' "AGIPD11"', ' "AGIPD12"', ' "AGIPD13"', ' "AGIPD14"', ' "AGIPD15"]'  # noqa
-operation_mode = 'FIXED_GAIN'
-run_numbers = '[9985,]'
-
-
-data = [action, dark_run_id, sase, instrument, cycle, proposal, detector_id,
-        operation_mode, *pdu_physical_names, *pdu_karabo_das, run_numbers]
-stuff = [action, dark_run_id, sase, instrument, cycle, proposal, 'SPB_DET_AGIPD1M-1', 'ADAPTIVE_GAIN', '["AGIPD00 (Q1M1)"', '"AGIPD01 (Q1M2)"', '"AGIPD02 (Q1M3)"', '"AGIPD03 (Q1M4)"', '"AGIPD04 (Q2M1)"', '"AGIPD05 (Q2M2)"', '"AGIPD06 (Q2M3)"', '"AGIPD07 (Q2M4)"', '"AGIPD08 (Q3M1)"', '"AGIPD09 (Q3M2)"', '"AGIPD10 (Q3M3)"', '"AGIPD11 (Q3M4)"', '"AGIPD12 (Q4M1)"', '"AGIPD13 (Q4M2)"', '"AGIPD14 (Q4M3)"', '"AGIPD15 (Q4M4)"]', '["AGIPD00"', ' "AGIPD01"', ' "AGIPD02"', ' "AGIPD03"', ' "AGIPD04"', ' "AGIPD05"', ' "AGIPD06"', ' "AGIPD07"', ' "AGIPD08"', ' "AGIPD09"', ' "AGIPD10"', ' "AGIPD11"', ' "AGIPD12"', ' "AGIPD13"', ' "AGIPD14"', ' "AGIPD15"]', '[9992', ' 9991', ' 9990]']
-
-socket.send(str(stuff).encode())
-resp = socket.recv_multipart()[0]
-print(resp.decode())
+    main(
+        args["proposal"],
+        slices,
+        args["delay"],
+        args["noconfirm"],
+        args["really"],
+    )
diff --git a/webservice/messages.py b/webservice/messages.py
index 4928545df757c3e8560e51eb714de4aca2ebdfb2..48b17b367cc4465c365fa129edf260c7d80ee1bb 100644
--- a/webservice/messages.py
+++ b/webservice/messages.py
@@ -43,3 +43,4 @@ class Success:
     REPROD_QUEUED = "SUCCESS: Queued proposal {}, run {} for reproducing previous offline calibration"
     DONE_CORRECTION = "SUCCESS: Finished correction: proposal {}. run {}"
     DONE_CHAR = "SUCCESS: Finished dark characterization: proposal {}, run {}"
+    ALREADY_REQUESTED = "SUCCESS: Correction already queued/running for proposal {}, run {}"
diff --git a/webservice/request_darks.py b/webservice/request_darks.py
index b9847f1b411ac95ff016f21fc587f2411f3172c1..cbc739c1787e58783d13f63d0bf5ab2abc2fbd42 100644
--- a/webservice/request_darks.py
+++ b/webservice/request_darks.py
@@ -35,7 +35,7 @@ args = vars(parser.parse_args())
 
 con = zmq.Context()
 socket = con.socket(zmq.REQ)
-con = socket.connect("tcp://max-exfl016:5555")
+con = socket.connect("tcp://max-exfl-cal001:5555")
 
 uuid = str(datetime.now().timestamp().as_integer_ratio()[0])
 
diff --git a/webservice/request_repeat.py b/webservice/request_repeat.py
index 0851f99d5ca2b9164450318238fe65d431a56fb9..f05b96ee6e8a021796edc83a27a2bf441cc06726 100644
--- a/webservice/request_repeat.py
+++ b/webservice/request_repeat.py
@@ -13,8 +13,8 @@ parser = argparse.ArgumentParser(description='Request repeat correction.')
 parser.add_argument('proposal', type=int, help='The proposal number')
 parser.add_argument('run', type=int, help='The run number')
 parser.add_argument('--mymdc-id', type=int, default=0, help="Run ID in myMdC")
-parser.add_argument('--endpoint', default='tcp://max-exfl016:5555',
-                help="The ZMQ endpoint to connect to (max-exfl017 for testing)")
+parser.add_argument('--endpoint', default='tcp://max-exfl-cal001:5555',
+                help="The ZMQ endpoint to connect to (max-exfl-cal002 for testing)")
 
 args = parser.parse_args()
 
diff --git a/webservice/templates/last_characterizations.html b/webservice/templates/last_characterizations.html
index 1a9b893740aabbe1a7ffe80f26668a9e9276e18e..d874bff9d53767358c729154335d0f2e5c92d396 100644
--- a/webservice/templates/last_characterizations.html
+++ b/webservice/templates/last_characterizations.html
@@ -4,7 +4,12 @@
        <h3>{{ instrument }}</h3>
        <dl>
            <dt>Requested:</dt><dd>{{ data['requested'] }}</dd>
-           <dt>Check in DB:</dt><dd><a href="https://in.xfel.eu/calibration/admin/calibration_constant_version?model_name=calibration_constant_version&utf8=%E2%9C%93&f%5Bphysical_device%5D%5B02524%5D%5Bo%5D=like&f%5Bphysical_device%5D%5B02524%5D%5Bv%5D={{ data['device_type'] }}&f%5Bcalibration_constant%5D%5B02697%5D%5Bo%5D=like&f%5Bcalibration_constant%5D%5B02697%5D%5Bv%5D=Offset&query=" target="_blank"> Open in calDB </a></dd>
+           <dt>Check in DB:</dt><dd>
+           {% for pdf in data['pdfs'] %}
+                <a href="https://in.xfel.eu/calibration/reports/by_file/{{ pdf[1] }}" target="_blank">Open in calDB</a>
+           {% endfor %}
+           &nbsp;
+           </dd>
            <dt>Output path:</dt><dd>{{ data['out_path'] }}</dd>
            <dt>Input path:</dt><dd>{{ data['in_path'] }}</dd>
            <dt>Input runs:</dt><dd>{{ data['runs'] }}</dd>
diff --git a/webservice/update_config.py b/webservice/update_config.py
index f6a6f9d1f9bb7c7474724d955fd359f3f4fa2d5f..f1ae56d369ddbadf4e0d6ca2d158593b9fe47941 100755
--- a/webservice/update_config.py
+++ b/webservice/update_config.py
@@ -1,3 +1,7 @@
+#!/usr/bin/env python3
+
+
+from pathlib import Path
 import argparse
 import json
 import sys
@@ -9,13 +13,15 @@ import zmq
 AGIPD_CONFIGURATIONS = {
     "correct":
     {
+        "common-mode": {'type': bool},
         "force-hg-if-below": {'type': int},
         "rel-gain": {'type': bool},
         "xray-gain": {'type': bool},
         "blc-noise": {'type': bool},
         "blc-set-min": {'type': bool},
-        "dont-zero-nans": {'type': bool},
-        "dont-zero-orange": {'type': bool},
+        "blc-stripes": {'type': bool},
+        "zero-nans": {'type': bool},
+        "zero-orange": {'type': bool},
         "max-pulses": {'type': list,
                        'msg': "Range list of maximum pulse indices "
                               "(--max-pulses start end step). "
@@ -40,17 +46,56 @@ AGIPD_CONFIGURATIONS = {
     }
 }
 
-DATA_MAPPING = {
-        "karabo-da": {
-            'type': list,
-            'choices': [f"AGIPD{i:02d}" for i in range(16)],
-            'msg': "Choices: [AGIPD00 ... AGIPD15]. "
-        }
+REMI_CONFIGURATIONS = {
+    'correct':
+    {
+        'first-pulse-offset': {'type': int}
+    }
+}
+
+TIMEPIX_CONFIGURATIONS = {
+    'correct':
+    {
+        'max-num-centroids': {'type': int},
+        'clustering-epsilon': {'type': float},
+        'clustering-tof-scale': {'type': float},
+        'clustering-min-samples': {'type': int},
+        'clustering-n-jobs': {'type': int},
+        'threshold-tot': {'type': int},
+        'raw-timewalk-lut-filepath': {'type': str},
+        'centroiding-timewalk-lut-filepath': {'type': str}
+    }
+}
+
+AGIPD_DATA_MAPPING = {
+    "karabo-da": {
+        'type': list,
+        'choices': [f"AGIPD{i:02d}" for i in range(16)],
+        'msg': "Choices: [AGIPD00 ... AGIPD15]. "
+    }
+}
+
+REMI_DATA_MAPPING = {
+    "karabo-da": {
+        "type": list,
+        "choices": ["DIGI02"],
+        "msg": "Choices: [DIGI02]. "
+    }
+}
+
+TIMEPIX_DATA_MAPPING = {
+    "karabo-da": {
+        "type": list,
+        "choices": ["DA02"],
+        "msg": "Choices: [DA02]. "
+    }
 }
 
 AVAILABLE_DETECTORS = {
-    "SPB_DET_AGIPD1M-1": [AGIPD_CONFIGURATIONS, DATA_MAPPING],
-    "MID_DET_AGIPD1M-1": [AGIPD_CONFIGURATIONS, DATA_MAPPING],
+    "SPB_DET_AGIPD1M-1": [AGIPD_CONFIGURATIONS, AGIPD_DATA_MAPPING],
+    "MID_DET_AGIPD1M-1": [AGIPD_CONFIGURATIONS, AGIPD_DATA_MAPPING],
+    "SQS_REMI_DLD6": [REMI_CONFIGURATIONS, REMI_DATA_MAPPING],
+    "SQS_AQS_CAM": [TIMEPIX_CONFIGURATIONS, TIMEPIX_DATA_MAPPING]
 }
 
 
@@ -67,11 +112,12 @@ required_args = parser.add_argument_group('required arguments')
 
 required_args.add_argument(
     '--karabo-id', type=str,
-    choices=['SPB_DET_AGIPD1M-1', 'MID_DET_AGIPD1M-1'])
+    choices=list(AVAILABLE_DETECTORS.keys()))
 required_args.add_argument(
     '--proposal', type=str,
-    help='The proposal number, without leading p, but with leading zeros.')
-required_args.add_argument('--cycle', type=str, help='The facility cycle.')
+    help='The proposal number, without leading p, but with leading zeros. ')
+required_args.add_argument('--cycle', type=str, help='The facility cycle, '
+                           'detected automatically if omitted')
 
 action_group = required_args.add_mutually_exclusive_group()
 action_group.add_argument(
@@ -79,13 +125,16 @@ action_group.add_argument(
 action_group.add_argument(
     '--dark', '-d', action='store_true')
 
+parser.add_argument(
+    '--verbose', '-v', action='store_true',
+    help='More verbose output, i.a. print the entire configuration written.')
 parser.add_argument(
     '--apply', action='store_true',
     help='Apply and push the requested configuration update to the git.')
 parser.add_argument(
     '--webservice-address',
     type=str,
-    default="tcp://max-exfl016:5555",
+    default="tcp://max-exfl-cal001:5555",
     help=('The port of the webservice to update '
           'calibration configurations repository.')
 )
@@ -97,6 +146,21 @@ parser.add_argument(
 )
 
 
+def _find_cycle(proposal: str, exp_root: Path = Path('/gpfs/exfel/exp')) -> str:
+    try:
+        proposal_no = int(proposal)
+    except ValueError:
+        raise ValueError('proposal number cannot be converted to a number')
+
+    # /gpfs/exfel/exp/<instrument>/<cycle>/p<proposal>/
+    proposal_path = next(exp_root.glob(f'*/*/p{proposal_no:06d}'), None)
+
+    if proposal_path is None:
+        raise ValueError('could not locate proposal on GPFS')
+
+    return proposal_path.parts[-2]
+
+
 def _add_available_configs_to_arg_parser(karabo_id: str, action: str):
     """Add the available configuration for the selected detector
     to the argument parser.
@@ -105,7 +169,7 @@ def _add_available_configs_to_arg_parser(karabo_id: str, action: str):
     along with the arguments.
     """
 
-    available_conf = [{}, DATA_MAPPING]
+    available_conf = [{}, AVAILABLE_DETECTORS[karabo_id][1]]
     # adding "no" bools to available configurations
 
     # Loop over action configurations in available_detectors dictionary.
@@ -175,7 +239,7 @@ def _create_new_config_from_args_input(
                     continue
 
                 # checking if data-mapping was updated.
-                if key in DATA_MAPPING.keys():
+                if key in AVAILABLE_DETECTORS[karabo_id][1].keys():
                     if 'data-mapping' not in new_conf.keys():
                         new_conf['data-mapping'] = {karabo_id: {key: {}}}
                     new_conf['data-mapping'][karabo_id][key] = value
@@ -219,13 +283,11 @@ def main():
 
     args = vars(parser.parse_args(argv))
 
-    if (
-        instrument is None or
-        proposal is None or
-        cycle is None
-    ):
-        print("Need to define all fields")
+    if instrument is None or proposal is None:
+        print("Need to define all required fields")
         sys.exit(1)
+    elif cycle is None:
+        cycle = _find_cycle(proposal)
 
     new_conf = _create_new_config_from_args_input(
         instrument=instrument,
@@ -241,7 +303,7 @@ def main():
         print("-" * 80)
 
     pyaml = yaml.dump(new_conf, default_flow_style=False)
-    print(f"Sending the following update:\n {pyaml}")
+    print(f"# Sending the following update:\n{pyaml}")
     print("-" * 80)
     con = zmq.Context()
     socket = con.socket(zmq.REQ)
@@ -251,15 +313,24 @@ def main():
         "SASEX",
         args["karabo_id"],
         instrument,
-        args["cycle"],
+        cycle,
         args["proposal"],
         json.dumps(new_conf),
         str(args["apply"]),
     ])
     socket.send(f"['{msg}']".encode())
     resp = socket.recv_multipart()[0]
-    print("Configuration now in place is:")
-    print(resp.decode())
+    print("# Configuration now in place is:")
+
+    if args['verbose']:
+        print(resp.decode())
+    else:
+        total_config = yaml.safe_load(resp.decode())
+        print(yaml.dump({
+            action: {instrument: {
+                karabo_id: total_config[action][instrument][karabo_id]
+            }}
+        }, default_flow_style=False))
 
 
 if __name__ == '__main__':
diff --git a/webservice/update_mdc_darks.py b/webservice/update_mdc_darks.py
new file mode 100644
index 0000000000000000000000000000000000000000..df03f8727510b81ff195e0e52210d58b8e0f1019
--- /dev/null
+++ b/webservice/update_mdc_darks.py
@@ -0,0 +1,46 @@
+import argparse
+from pathlib import Path
+
+from metadata_client.metadata_client import MetadataClient
+
+from .config import webservice as config
+
+parser = argparse.ArgumentParser(
+    description='Update run status at MDC for a given run id.')
+#  TODO: unify configuration argument names across the project
+parser.add_argument('--conf-file', type=str, help='Path to webservice config',
+                    default=None)
+parser.add_argument('--flg', type=str, choices=["IP", "F", "E"], required=True,
+                    help='Status flag for MDC: In Progress/Finished/Error.')  # noqa
+parser.add_argument('id', type=int, help='Dark run id from MDC')
+parser.add_argument('--msg', type=str, help='Message string to MDC',
+                    default='Error while job submission')
+parser.add_argument('--really', action='store_true',
+                    help="Actually make changes (otherwise dry-run)")
+args = parser.parse_args()
+
+if args.conf_file is not None:
+    config.configure(includes_for_dynaconf=[Path(args.conf_file).absolute()])
+
+mdconf = config['metadata-client']
+client_conn = MetadataClient(client_id=mdconf['user-id'],
+                             client_secret=mdconf['user-secret'],
+                             user_email=mdconf['user-email'],
+                             token_url=mdconf['token-url'],
+                             refresh_url=mdconf['refresh-url'],
+                             auth_url=mdconf['auth-url'],
+                             scope=mdconf['scope'],
+                             base_api_url=mdconf['base-api-url'])
+
+print(f"Updating dark run {args.id} to status {args.flg} at {mdconf['base-api-url']}")
+if args.really:
+    response = client_conn.update_dark_run_api(args.id, {
+        'dark_run': {'flg_status': args.flg, 'calcat_feedback': args.msg}
+    })
+
+    if response.status_code == 200:
+        print('Run is updated')
+    else:
+        print(f'Update failed {response}')
+else:
+    print("Add --really to actually make these changes")
diff --git a/webservice/webservice.py b/webservice/webservice.py
index 335cacdbed4826e034fc430c4019a98039b5e0bf..0652255f006d1e2d5c91f6eeb525d07f428e5be9 100644
--- a/webservice/webservice.py
+++ b/webservice/webservice.py
@@ -1,3 +1,5 @@
+import time
+
 import argparse
 import ast
 import asyncio
@@ -60,6 +62,7 @@ def init_job_db(config):
             action,
             timestamp
         );
+        CREATE INDEX IF NOT EXISTS req_by_run ON requests(proposal, run, action);
         CREATE TABLE IF NOT EXISTS executions(
             exec_id INTEGER PRIMARY KEY,
             req_id REFERENCES requests(req_id),
@@ -301,6 +304,33 @@ def parse_config(cmd: List[str], config: Dict[str, Any]) -> List[str]:
     return cmd
 
 
+class time_db_transaction:
+    """Record time taken to write to the database
+
+    Use as a context manager. When leaving the block, the transaction will be
+    committed (or rolled back, on error), and the time taken logged.
+    """
+    t_start = 0
+
+    def __init__(self, conn: sqlite3.Connection, label: str):
+        self.conn = conn
+        self.label = label
+
+    def __enter__(self):
+        self.conn.__enter__()
+        self.t_start = time.perf_counter()
+        return
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        t1 = time.perf_counter()
+        self.conn.__exit__(exc_type, exc_val, exc_tb)
+        t2 = time.perf_counter()
+        t_open = (t1 - self.t_start) * 1000
+        t_finish = (t2 - t1) * 1000
+        op = 'commit' if exc_val is None else 'rollback'
+        logging.debug("DB change (%s): %.1f ms in transaction, %.1f ms %s",
+                      self.label, t_open, t_finish, op)
+        return False
 
 
 async def run_action(job_db, cmd, mode, proposal, run, exec_id) -> str:
@@ -332,7 +362,6 @@ async def run_action(job_db, cmd, mode, proposal, run, exec_id) -> str:
             message = Success.START_CORRECTION.format(proposal, run)
 
         # Save submitted jobs to persistent database.
-        c = job_db.cursor()  # FIXME: asyncio
         rstr = stdout.decode()
 
         for r in rstr.split("\n"):
@@ -342,11 +371,11 @@ async def run_action(job_db, cmd, mode, proposal, run, exec_id) -> str:
                 jobs = []
                 for jobid in jobids.split(','):
                     jobs.append((int(jobid.strip()), exec_id))
-                c.executemany(
-                    "INSERT INTO slurm_jobs VALUES (?, ?, 'PD', 0, 0)",
-                    jobs
-                )
-        job_db.commit()
+                with time_db_transaction(job_db, 'Insert jobs'):
+                    job_db.executemany(
+                        "INSERT INTO slurm_jobs VALUES (?, ?, 'PD', 0, 0)",
+                        jobs
+                    )
 
     else:  # mode == "sim"
         if "DARK" in cmd:
@@ -927,17 +956,23 @@ class ActionsServer:
         This will trigger a correction process to be launched for that run in
         the given cycle and proposal.
         """
-        request_time = datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
+        request_time = datetime.now()
         try:
             runnr = runnr.strip('r')
 
             proposal = self._normalise_proposal_num(proposal)
             pconf_full = self.load_proposal_config(cycle, proposal)
 
-            with self.job_db:
+            if self.check_unfinished_correction(proposal, int(runnr)):
+                # A correction is already running for this run
+                msg = Success.ALREADY_REQUESTED.format(proposal, runnr)
+                logging.debug(msg)
+                return msg.encode()
+
+            with time_db_transaction(self.job_db, 'Insert request'):
                 cur = self.job_db.execute(
                     "INSERT INTO requests VALUES (NULL, ?, ?, ?, 'CORRECT', ?)",
-                    (rid, proposal, int(runnr), request_time)
+                    (rid, proposal, int(runnr), request_time.isoformat())
                 )
                 req_id = cur.lastrowid
 
@@ -994,8 +1029,15 @@ class ActionsServer:
                     dconfig = data_conf[karabo_id]
 
                     # check for files according to mapping in raw run dir.
+
+                    # data-mapping for LPD mini uses karabo-da names like
+                    # LPDMINI00/8 to identify individual modules. The /8 is not
+                    # part of the file name
+                    data_agg_names = {
+                        kda.split('/')[0] for kda in dconfig['karabo-da']
+                    }
                     if any(y in x for x in fl
-                           for y in dconfig['karabo-da']):
+                           for y in data_agg_names):
                         thisconf = copy.copy(dconfig)
                         if isinstance(pconf[karabo_id], dict):
                             thisconf.update(copy.copy(pconf[karabo_id]))
@@ -1054,10 +1096,16 @@ class ActionsServer:
         request_time = datetime.now()
 
         try:
-            with self.job_db:
+            if self.check_unfinished_correction(proposal, int(runnr)):
+                # A correction is already running for this run
+                msg = Success.ALREADY_REQUESTED.format(proposal, runnr)
+                logging.debug(msg)
+                return msg.encode()
+
+            with time_db_transaction(self.job_db, 'Insert request'):
                 cur = self.job_db.execute(
                     "INSERT INTO requests VALUES (NULL, ?, ?, ?, 'CORRECT', ?)",
-                    (rid, proposal, int(runnr), request_time.strftime('%Y-%m-%dT%H:%M:%S'))
+                    (rid, proposal, int(runnr), request_time.isoformat())
                 )
                 req_id = cur.lastrowid
 
@@ -1122,10 +1170,10 @@ class ActionsServer:
                     '--env-cache',
                     f'/gpfs/exfel/data/scratch/{getuser()}/calib-repeat-envs',
                     '--report-to',
-                    f'{reports_dir}/{karabo_id}_RECORRECT_{request_time:%y%m%d_%H%M%S}'
+                    f'{reports_dir}/{karabo_id}_RECORRECT_{request_time:%y%m%d_%H%M%S_%f}'
                 ]
 
-                with self.job_db:
+                with time_db_transaction(self.job_db, 'Insert execution'):
                     cur = self.job_db.execute(
                         "INSERT INTO executions VALUES (NULL, ?, ?, NULL, ?, NULL)",
                         (req_id, shlex.join(cmd), karabo_id)
@@ -1168,7 +1216,7 @@ class ActionsServer:
         :param runnr: is the run number in integer form, i.e. without leading
                      "r"
         """
-        request_time = datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
+        request_time = datetime.now()
         try:
             pdus, karabo_das, wait_runs = ast.literal_eval(','.join(extra))
 
@@ -1185,10 +1233,10 @@ class ActionsServer:
                 karabo_id=karabo_id,
             )
 
-            with self.job_db:
+            with time_db_transaction(self.job_db, 'Insert request'):
                 cur = self.job_db.execute(
                     "INSERT INTO requests VALUES (NULL, ?, ?, ?, 'DARK', ?)",
-                    (rid, proposal, int(wait_runs[-1]), request_time)
+                    (rid, proposal, int(wait_runs[-1]), request_time.isoformat())
                 )
                 req_id = cur.lastrowid
 
@@ -1222,60 +1270,79 @@ class ActionsServer:
         async def _continue():
             """Runs in the background after we reply to the 'dark_request' request"""
             await update_mdc_status(self.mdc, 'dark_request', rid, queued_msg)
-            transfer_complete = await wait_transfers(
-                self.mdc, runs, proposal, cycle, instrument
-            )
-            if not transfer_complete:
-                # Timed out
-                await update_mdc_status(
-                    self.mdc, 'dark_request', rid, MDC.MIGRATION_TIMEOUT
+            try:
+                transfer_complete = await wait_transfers(
+                    self.mdc, runs, proposal, cycle, instrument
+                )
+                if not transfer_complete:
+                    # Timed out
+                    await update_mdc_status(
+                        self.mdc, 'dark_request', rid, MDC.MIGRATION_TIMEOUT
+                    )
+                    return
+
+                # Notebooks require one or three runs, depending on the
+                # detector type and operation mode.
+                triple = any(
+                    det in karabo_id for det in
+                    [
+                        "LPD",
+                        "AGIPD",
+                        "JUNGFRAU",
+                        "JF",
+                        "JNGFR",
+                        "JUNGF",
+                        "GH2",
+                        "G2",
+                    ])
+
+                # This fails silently if the hardcoded strings above are
+                # ever changed (triple = False) but the underlying notebook
+                # still expects run-high/run-med/run-low.
+                if triple and len(runs) == 1:
+                    runs_dict = {'run-high': runs[0],
+                                 'run-med': '0',
+                                 'run-low': '0'}
+                elif triple and len(runs) == 3:
+                    runs_dict = {'run-high': runs[0],
+                                 'run-med': runs[1],
+                                 'run-low': runs[2]}
+                else:  # single
+                    runs_dict = {'run': runs[0]}
+
+                # We assume that MyMDC does not allow dark request if the data
+                # is not migrated, thus skipping some validation here.
+                thisconf = copy.copy(data_conf[karabo_id])
+
+                # Pop internal key to avoid propagation to xfel-calibrate.
+                thisconf.pop('disable-correct', None)
+
+                if (karabo_id in pconf
+                        and isinstance(pconf[karabo_id], dict)):
+                    thisconf.update(copy.copy(pconf[karabo_id]))
+
+                thisconf['in-folder'] = in_folder
+                thisconf['out-folder'] = out_folder
+                thisconf['karabo-id'] = karabo_id
+                thisconf['karabo-da'] = karabo_das
+                thisconf['operation-mode'] = operation_mode
+
+                thisconf.update(runs_dict)
+
+                detectors = {karabo_id: thisconf}
+
+                ret, report_path = await self.launch_jobs(
+                    runs, req_id, detectors, 'dark', instrument, cycle, proposal,
+                    request_time
+                )
+            except Exception as e:
+                msg = Errors.JOB_LAUNCH_FAILED.format('dark', e)
+                logging.error(msg, exc_info=e)
+                asyncio.ensure_future(
+                    update_mdc_status(self.mdc, 'dark_request', rid, msg)
                 )
                 return
 
-            # Notebooks require one or three runs, depending on the
-            # detector type and operation mode.
-            triple = any(det in karabo_id for det in
-                         ["LPD", "AGIPD", "JUNGFRAU", "JF", "JNGFR", "GH2"])
-
-            # This fails silently if the hardcoded strings above are
-            # ever changed (triple = False) but the underlying notebook
-            # still expects run-high/run-med/run-low.
-            if triple and len(runs) == 1:
-                runs_dict = {'run-high': runs[0],
-                             'run-med': '0',
-                             'run-low': '0'}
-            elif triple and len(runs) == 3:
-                runs_dict = {'run-high': runs[0],
-                             'run-med': runs[1],
-                             'run-low': runs[2]}
-            else:  # single
-                runs_dict = {'run': runs[0]}
-
-            # We assume that MyMDC does not allow dark request if the data
-            # is not migrated, thus skipping some validation here.
-            thisconf = copy.copy(data_conf[karabo_id])
-
-            # Pop internal key to avoid propagation to xfel-calibrate.
-            thisconf.pop('disable-correct', None)
-
-            if (karabo_id in pconf
-                    and isinstance(pconf[karabo_id], dict)):
-                thisconf.update(copy.copy(pconf[karabo_id]))
-
-            thisconf['in-folder'] = in_folder
-            thisconf['out-folder'] = out_folder
-            thisconf['karabo-id'] = karabo_id
-            thisconf['karabo-da'] = karabo_das
-            thisconf['operation-mode'] = operation_mode
-
-            thisconf.update(runs_dict)
-
-            detectors = {karabo_id: thisconf}
-
-            ret, report_path = await self.launch_jobs(
-                runs, req_id, detectors, 'dark', instrument, cycle, proposal,
-                request_time
-            )
             await update_mdc_status(self.mdc, 'dark_request', rid, ret)
             if len(report_path) == 0:
                 logging.warning("Failed to identify report path for dark_request")
@@ -1340,6 +1407,16 @@ class ActionsServer:
 
     # Helper methods for handlers ---------------------------------------------
 
+    def check_unfinished_correction(self, proposal: str, runnr: int):
+        row = self.job_db.execute(
+            "SELECT job_id FROM slurm_jobs "
+            "INNER JOIN executions USING (exec_id) "
+            "INNER JOIN requests USING (req_id) "
+            "WHERE proposal = ? AND run = ? AND action = 'CORRECT' "
+            "  AND slurm_jobs.finished = 0", (proposal, runnr)
+        ).fetchone()
+        return row is not None
+
     @staticmethod
     def _normalise_proposal_num(p: str) -> str:
         return "{:06d}".format(int(p.strip('p')))
@@ -1387,14 +1464,14 @@ class ActionsServer:
                 action=action, instrument=instrument,
                 cycle=cycle, proposal=proposal,
                 runs="_".join([f"r{r}" for r in run_nrs]),
-                time_stamp=datetime.now().strftime('%y%m%d_%H%M%S'),
+                time_stamp=request_time.strftime('%y%m%d_%H%M%S_%f'),
                 det_instance=karabo_id,
-                request_time=request_time
+                request_time=request_time.isoformat(),
             ).split()
             cmd = parse_config(cmd, dconfig)
 
-            with self.job_db:
-                cur = self.job_db.execute(
+            with time_db_transaction(self.job_db, 'Insert execution'):
+                cur = self.job_db.execute(  # 2
                     "INSERT INTO executions VALUES (NULL, ?, ?, ?, ?, NULL)",
                     (req_id, shlex.join(cmd), detector, karabo_id)
                 )