From 747d3de225b5b8dc6c29645dbb0a3713ee69cfe0 Mon Sep 17 00:00:00 2001
From: Karim Ahmed <karim.ahmed@xfel.eu>
Date: Thu, 7 Nov 2019 15:20:25 +0100
Subject: [PATCH] rebase onto master and update only-offset method

---
 README.rst                                    |  68 +-
 cal_tools/cal_tools/agipdlib.py               |  45 +-
 cal_tools/cal_tools/tools.py                  |   8 +-
 .../AGIPD/AGIPD_Correct_and_Verify.ipynb      |  83 ++-
 notebooks/AGIPD/PlotFromCalDB_AGIPD_NBC.ipynb | 694 ------------------
 ...haracterize_Darks_NewDAQ_FastCCD_NBC.ipynb | 574 ---------------
 ...s_NewDAQ_FastCCD_NBC_New_Common_Mode.ipynb | 346 ++++++---
 .../FastCCD/PlotFromCalDB_FastCCD_NBC.ipynb   | 505 -------------
 .../Jungfrau/PlotFromCalDB_Jungfrau_NBC.ipynb | 574 ---------------
 notebooks/LPD/PlotFromCalDB_LPD_NBC.ipynb     | 673 -----------------
 .../ePix/PlotFromCalDB_ePix100_NBC.ipynb      | 481 ------------
 reportservice/manual_run.py                   |   2 +-
 requirements.txt                              |   3 +-
 webservice/messages.py                        |   2 +
 webservice/request_darks.py                   |  11 +-
 webservice/serve_overview.yaml                |   2 +-
 webservice/webservice.py                      |  37 +-
 xfel_calibrate/calibrate.py                   |  23 +-
 18 files changed, 438 insertions(+), 3693 deletions(-)
 delete mode 100644 notebooks/AGIPD/PlotFromCalDB_AGIPD_NBC.ipynb
 delete mode 100644 notebooks/FastCCD/Characterize_Darks_NewDAQ_FastCCD_NBC.ipynb
 delete mode 100644 notebooks/FastCCD/PlotFromCalDB_FastCCD_NBC.ipynb
 delete mode 100644 notebooks/Jungfrau/PlotFromCalDB_Jungfrau_NBC.ipynb
 delete mode 100644 notebooks/LPD/PlotFromCalDB_LPD_NBC.ipynb
 delete mode 100644 notebooks/ePix/PlotFromCalDB_ePix100_NBC.ipynb

diff --git a/README.rst b/README.rst
index f781a0607..4fba76a33 100644
--- a/README.rst
+++ b/README.rst
@@ -1,7 +1,66 @@
+Offline Calibration
+===================
+
+The offline calibration is a package that consists of different services,
+responsible for applying most of the offline calibration and characterization
+for the detectors.
+
+Offline calibration installation
+================================
+
+It's recommended to install the offline calibration (pycalibration) package
+over maxwell, using anaconda/3 environment.
+
+Installation using Anaconda
+---------------------------
+
+First you need to load the anaconda/3 environment through::
+
+    1. module load anaconda/3
+
+If installing into other python enviroments, this step can be skipped.
+
+Then the package for the offline calibration can be obtained from the git repository::
+
+    2. git clone https://git.xfel.eu/gitlab/detectors/pycalibration.git
+
+
+You can then install all requirements of this tool chain in your home directory by running::
+
+    3. pip install -r requirements.txt . --user
+
+in pycalibration's root directory.
+
+After installation, you should make sure that the home directory is in the PATH environment variable::
+
+    4. export PATH=$HOME/.local/bin:$PATH
+
+
+Development Installation
+------------------------
+
+For a development installation in your home directory, which automatically
+picks up (most) changes, first install the dependencies as above,
+but then install the tool-chain separately in development mode::
+
+   pip install -e . --user
+
+
+Activate Offline calibration
+============================
+
+For using pycalibration package one needs to activate it through::
+
+    source activate
+
+from inside of the pycalibration directory. This will automatically load 
+all needed modules and export the $PATH for the home directory.
+
+
 Python Scripted Calibration
 ===========================
 
-First: do not run this on the Maxell gateway. Rather, `salloc`
+First: do not run this on the Maxwell gateway. Rather, `salloc`
 a node for yourself first::
 
    salloc -p exfel/upex -t 01:00:00
@@ -47,10 +106,3 @@ to provid accurate relative gain correction constants.
 
 You'll get a series of plots in the output directory as well.
 
-
-
-
-
-
-
-
diff --git a/cal_tools/cal_tools/agipdlib.py b/cal_tools/cal_tools/agipdlib.py
index 7d44cba5b..639697e0e 100644
--- a/cal_tools/cal_tools/agipdlib.py
+++ b/cal_tools/cal_tools/agipdlib.py
@@ -77,12 +77,11 @@ class AgipdCorrections:
                  bins_dig_gain_vs_signal, raw_fmt_version=2, chunk_size=512,
                  h5_data_path="INSTRUMENT/SPB_DET_AGIPD1M-1/DET/{}CH0:xtdf/",
                  h5_index_path="INDEX/SPB_DET_AGIPD1M-1/DET/{}CH0:xtdf/",
-                 do_rel_gain=True, chunk_size_idim=512, il_mode=False,
+                 chunk_size_idim=512, il_mode=False,
                  cal_det_instance="AGIPD1M1", karabo_data_mode=False,
                  force_hg_if_below=None, force_mg_if_below=None,
-                 mask_noisy_adc=False, adjust_mg_baseline=False,
-                 acquisition_rate=None, dont_zero_nans=False,
-                 dont_zero_orange=False):
+                 mask_noisy_adc=False, acquisition_rate=None,
+                 corr_bools=None):
         """
         Initialize an AgipdCorrections Class
 
@@ -104,7 +103,6 @@ class AgipdCorrections:
             image/data section
         :param h5_index_path: path in HDF5 file which is prefixed to the
             index section
-        :param do_rel_gain: do relative gain corrections
         :param chunk_size_idim: chunking size on image dimension when
             writing data out
         :param il_mode: set to true if AGIPD data is interlaced (pre-Nov 2017 data)
@@ -112,6 +110,8 @@ class AgipdCorrections:
         :param force_hg_if_below: set to a value different to None/0 to force a pixels
                                   gain to high, if the pixel value is below the given
                                   value after high gain offset subtraction.
+        :param corr_bools: A dict with all of the correction booleans selected or
+                           available
         """
         self.agipd_base = h5_data_path.format(channel)
         self.idx_base = h5_index_path.format(channel)
@@ -135,15 +135,11 @@ class AgipdCorrections:
         self.bins_signal_high_range = bins_signal_high_range
         self.bins_dig_gain_vs_signal = bins_dig_gain_vs_signal
         self.cidx = 0
-        self.do_rel_gain = do_rel_gain
         self.sig_zero_mask = None
         self.base_offset = None
-        self.baseline_corr_using_noise = False
         self.baseline_corr_noise_threshold = 100
         self.baseline_corr_using_hmatch = True
-        self.correct_asic_diag = True
         self.melt_snow = SnowResolution.NONE
-        self.match_asics = True
         self.chunk_size_idim = chunk_size_idim
         self.dohigh = 0
         self.gsfun = self.split_gain_il if il_mode else self.split_gain
@@ -155,12 +151,10 @@ class AgipdCorrections:
         self.mask_noisy_adc = mask_noisy_adc
         self.adc_mask = None
         self.gain_stats = [0, 0, 0]
-        self.adjust_mg_baseline = adjust_mg_baseline
         self.mg_bl_adjust = 0
         self.acquisition_rate = acquisition_rate
-        self.dont_zero_nans = dont_zero_nans
-        self.dont_zero_orange = dont_zero_orange
         self.valid_indices = None
+        self.corr_bools = corr_bools
 
     def get_iteration_range(self):
         """Returns a range expression over which to iterate in chunks
@@ -275,7 +269,7 @@ class AgipdCorrections:
             # existing PC constants.
             # self.offset[..., 1] += doff
 
-        if self.adjust_mg_baseline and slopesPC is not None:
+        if self.corr_bools.get('adjust_mg_baseline') and slopesPC is not None:
             x = np.linspace(0, 1000, 1000)
             m_h = np.moveaxis(
                 np.moveaxis(slopesPC[..., :self.max_cells, 0], 0, 2), 0, 1)
@@ -956,14 +950,14 @@ class AgipdCorrections:
         # before doing relative gain correction we need to evaluate any
         # baseline shifts
         # as they are effectively and additional offset in the data
-        if self.baseline_corr_using_noise or self.baseline_corr_using_hmatch:
+        if self.corr_bools.get('blc_noise') or self.baseline_corr_using_hmatch:
 
             # do this image wise, as the shift is per image
             for i in range(im.shape[0]):
 
                 # first correction requested may be to evaluate shift via
                 # noise peak
-                if self.baseline_corr_using_noise:
+                if self.corr_bools.get('blc_noise'):
                     mn_noise = np.nanmean(self.noise[cellid[i], ..., 0])
                     dd = self.baseline_correct_via_noise(im[i, ...],
                                                          mn_noise,
@@ -982,7 +976,7 @@ class AgipdCorrections:
                     im[i, ...] = np.maximum(dd, dd2)
 
                     # finally correct diagonal effects if requested
-                    if self.correct_asic_diag:
+                    if self.corr_bools.get('corr_asic_diag'):
                         ii = im[i, ...]
                         gg = gain[i, ...]
                         adim = self.correct_baseline_via_hist_asic(ii, gg)
@@ -993,10 +987,10 @@ class AgipdCorrections:
                     im[i, ...] = dd
 
         # now we can correct for relative gain if requested
-        if self.do_rel_gain and hasattr(self, "rel_gain"):
+        if self.corr_bools.get("do_rel_gain") and hasattr(self, "rel_gain"):
             im *= rel_cor
 
-        if self.adjust_mg_baseline:
+        if self.corr_bools.get("adjust_mg_baseline"):
             mgbc = self.mg_bl_adjust[cellid, ...]
             im[gain == 1] += 1.5 * mgbc[gain == 1]
 
@@ -1011,18 +1005,18 @@ class AgipdCorrections:
 
         # finally, with all corrections performed we can match ASIC borders
         # if needed
-        if self.match_asics:
+        if self.corr_bools.get("match_asics"):
             im = self.match_asic_borders(im)
 
         # create a bad pixel mask for the data
         # we add any non-finite values to the mask
-        if not self.dont_zero_nans:
+        if not self.corr_bools.get("dont_zero_nans"):
             bidx = ~np.isfinite(im)
             im[bidx] = 0
             msk[bidx] |= BadPixels.VALUE_IS_NAN.value
 
         # and such with have unrealistically high and low pixel values
-        if not self.dont_zero_orange:
+        if self.corr_bools.get("dont_zero_orange"):
             bidx = (im < -1e7) | (im > 1e7)
             im[bidx] = 0
             msk[bidx] |= BadPixels.VALUE_OUT_OF_RANGE.value
@@ -1354,7 +1348,7 @@ class AgipdCorrections:
                 (self.low_edges, self.high_edges, self.signal_edges,
                  self.dig_signal_edges))
 
-    def initialize_from_db(self, dbparms, qm, apply_constants, only_dark=False):
+    def initialize_from_db(self, dbparms, qm, corr_bools, only_dark=False):
         """ Initialize calibration constants from the calibration database
 
         :param dbparms: a tuple containing relevant database parameters,
@@ -1375,8 +1369,9 @@ class AgipdCorrections:
 
         :param qm: quadrant and module of the constants to load in Q1M1
         notation
-        :param apply_constants: a dict with a bools for applying and
-        retrieving certain constants.
+        :param corr_bools: a dict with bools for applying corrections
+                           and can be used here to avoid retrieving unneeded
+                           calibration parameters (e.g SlopesPC or SlopesFF).
         :param only_dark: load only dark image derived constants. This
             implies that a `calfile` is used to load the remaining
             constants. Useful to reduce DB traffic and interactions
@@ -1489,7 +1484,7 @@ class AgipdCorrections:
 
         dinstance = getattr(Detectors, self.cal_det_instance)
         when = {}
-        if apply_constants['offset']:
+        if self.corr_bools.get('only_offset'):
             only_dark = True
 
         offset, when['offset'] = \
diff --git a/cal_tools/cal_tools/tools.py b/cal_tools/cal_tools/tools.py
index 556095361..eb791019f 100644
--- a/cal_tools/cal_tools/tools.py
+++ b/cal_tools/cal_tools/tools.py
@@ -173,9 +173,10 @@ def make_timing_summary(run_path, joblist):
 
     with open("{}/timing_summary.rst".format(run_path), "w+") as gfile:
 
-        table = tabulate.tabulate(pars_vals, tablefmt='latex',
-                                  headers=pars_name)
-        gfile.write(dedent(tmpl.render(table=table.split('\n'))))
+        if len(pars_vals)>0:
+            table = tabulate.tabulate(pars_vals, tablefmt='latex',
+                                      headers=pars_name)
+            gfile.write(dedent(tmpl.render(table=table.split('\n'))))
 
 
 def make_report(run_path, tmp_path, out_path, project, author, version,
@@ -226,6 +227,7 @@ def make_report(run_path, tmp_path, out_path, project, author, version,
 
     with open("{}/conf.py.tmp".format(run_path), "w") as mf:
         latex_elements = {'extraclassoptions': ',openany, oneside',
+                          'preamble': r'\usepackage{longtable}',
                           'maketitle': r'\input{titlepage.tex.txt}'}
         mf.write("latex_elements = {}\n".format(latex_elements))
         mf.write("latex_logo = '{}/{}'\n".format(module_path,
diff --git a/notebooks/AGIPD/AGIPD_Correct_and_Verify.ipynb b/notebooks/AGIPD/AGIPD_Correct_and_Verify.ipynb
index a8e51df71..4c2208aaa 100644
--- a/notebooks/AGIPD/AGIPD_Correct_and_Verify.ipynb
+++ b/notebooks/AGIPD/AGIPD_Correct_and_Verify.ipynb
@@ -22,16 +22,15 @@
    },
    "outputs": [],
    "source": [
-    "in_folder = \"/gpfs/exfel/exp/MID/201931/p900090/raw\" # the folder to read data from, required\n",
-    "run = 563 # runs to process, required\n",
+    "in_folder = \"/gpfs/exfel/exp/MID/201931/p900107/raw\" # the folder to read data from, required\n",
+    "run = 10 # runs to process, required\n",
     "out_folder =  \"/gpfs/exfel/data/scratch/ahmedk/test/AGIPD_Corr\"  # the folder to output to, required\n",
     "calfile =  \"/gpfs/exfel/data/scratch/haufs/agipd_on_demand/agipd_store_mid.h5\" # path to calibration file. Leave empty if all data should come from DB\n",
     "sequences =  [-1] # sequences to correct, set to -1 for all, range allowed\n",
     "mem_cells = 0 # number of memory cells used, set to 0 to automatically infer\n",
     "interlaced = False # whether data is in interlaced layout\n",
     "overwrite = True # set to True if existing data should be overwritten\n",
-    "no_relative_gain = False # do not do relative gain correction\n",
-    "relative_gain = False # do not do relative gain correction\n",
+    "relative_gain = False # do relative gain correction\n",
     "cluster_profile = \"noDB\"\n",
     "max_pulses = [0, 500, 1] # range list [st, end, step] of maximum pulse indices. 3 allowed maximum list input elements.   \n",
     "local_input = False\n",
@@ -87,6 +86,43 @@
     "    "
    ]
   },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Fill dictionaries comprising bools and arguments for correction and data analysis\n",
+    "\n",
+    "# Here the herarichy and dependability for correction booleans are defined \n",
+    "corr_bools = {}\n",
+    "\n",
+    "# offset is at the bottom of AGIPD correction pyramid.\n",
+    "corr_bools[\"only_offset\"] = only_offset\n",
+    "\n",
+    "# Dont apply any corrections if only_offset is requested \n",
+    "if not only_offset:\n",
+    "    \n",
+    "    # Dont apply PC correction of only FF is requested\n",
+    "    if not only_slopesff:\n",
+    "        corr_bools[\"SlopesPC\"] = only_slopespc\n",
+    "    \n",
+    "    # Dont apply FF correction of only PC is requested\n",
+    "    if not only_slopespc:\n",
+    "        corr_bools[\"SlopesFF\"] = only_slopesff\n",
+    "        \n",
+    "    corr_bools[\"adjust_mg_baseline\"] = adjust_mg_baseline\n",
+    "    corr_bools[\"do_rel_gain\"] = relative_gain\n",
+    "    corr_bools[\"blc_noise\"] = blc_noise\n",
+    "    corr_bools[\"match_asics\"] = match_asics\n",
+    "    corr_bools[\"corr_asic_diag\"] = corr_asic_diag\n",
+    "    corr_bools[\"dont_zero_nans\"] = dont_zero_nans\n",
+    "    corr_bools[\"dont_zero_orange\"] = dont_zero_orange\n",
+    "\n",
+    "# Here the herarichy and dependability for data analysis booleans and arguments are defined \n",
+    "data_analysis_parms = {}"
+   ]
+  },
   {
    "cell_type": "code",
    "execution_count": null,
@@ -140,9 +176,6 @@
     "if sequences[0] == -1:\n",
     "    sequences = None\n",
     "\n",
-    "do_rel_gain = relative_gain  \n",
-    "do_rel_gain = not no_relative_gain\n",
-    "\n",
     "QUADRANTS = 4\n",
     "MODULES_PER_QUAD = 4\n",
     "DET_FILE_INSET = \"AGIPD\"\n",
@@ -165,7 +198,7 @@
     "from cal_tools.agipdlib import SnowResolution\n",
     "melt_snow = False if melt_snow == \"\" else SnowResolution(melt_snow)\n",
     "\n",
-    "special_opts = blc_noise, blc_noise_threshold, blc_hist, match_asics, corr_asic_diag, melt_snow\n",
+    "special_opts = blc_noise_threshold, blc_hist, melt_snow\n",
     "\n",
     "loc = None\n",
     "if instrument == \"SPB\":\n",
@@ -348,12 +381,11 @@
    "source": [
     "import copy\n",
     "from functools import partial\n",
-    "def correct_module(max_cells, do_rel_gain, index_v, CHUNK_SIZE, total_sequences, sequences_qm, \n",
+    "def correct_module(max_cells, index_v, CHUNK_SIZE, total_sequences, sequences_qm, \n",
     "                   bins_gain_vs_signal, bins_signal_low_range, bins_signal_high_range,\n",
     "                   bins_dig_gain_vs_signal, max_pulses, dbparms, fileparms, nodb, chunk_size_idim,\n",
     "                   special_opts, il_mode, loc, dinstance, force_hg_if_below, force_mg_if_below,\n",
-    "                   mask_noisy_adc, adjust_mg_baseline, acq_rate, dont_zero_nans, dont_zero_orange,\n",
-    "                   apply_constants, inp):\n",
+    "                   mask_noisy_adc, acq_rate, corr_bools, inp):\n",
     "    print(\"foo\")\n",
     "    import numpy as np\n",
     "    import copy\n",
@@ -444,30 +476,26 @@
     "            agipd_corr = AgipdCorrections(infile, outfile, max_cells, channel, max_pulses,\n",
     "                                          bins_gain_vs_signal, bins_signal_low_range,\n",
     "                                          bins_signal_high_range, bins_dig_gain_vs_signal,\n",
-    "                                          do_rel_gain=do_rel_gain, chunk_size_idim=chunk_size_idim,\n",
+    "                                          chunk_size_idim=chunk_size_idim,\n",
     "                                          il_mode=il_mode, raw_fmt_version=index_v, \n",
     "                                          h5_data_path=\"INSTRUMENT/{}/DET/{{}}CH0:xtdf/\".format(loc),\n",
     "                                          h5_index_path=\"INDEX/{}/DET/{{}}CH0:xtdf/\".format(loc),\n",
     "                                          cal_det_instance=dinstance, force_hg_if_below=force_hg_if_below,\n",
     "                                          force_mg_if_below=force_mg_if_below, mask_noisy_adc=mask_noisy_adc,\n",
-    "                                          adjust_mg_baseline=adjust_mg_baseline, acquisition_rate=acq_rate,\n",
-    "                                          dont_zero_nans=dont_zero_nans, dont_zero_orange=dont_zero_orange)\n",
+    "                                          acquisition_rate=acq_rate, corr_bools=corr_bools)\n",
     "\n",
-    "            blc_noise, blc_noise_threshold, blc_hist, match_asics, corr_asic_diag, melt_snow = special_opts\n",
-    "            if not apply_constants[\"offset\"]:\n",
-    "                agipd_corr.baseline_corr_using_noise = blc_noise\n",
+    "            blc_noise_threshold, blc_hist, melt_snow = special_opts\n",
+    "            if not corr_bools[\"only_offset\"]:\n",
     "                agipd_corr.baseline_corr_noise_threshold = blc_noise_threshold\n",
-    "            agipd_corr.baseline_corr_using_hmatch = blc_hist\n",
-    "            agipd_corr.match_asics = match_asics\n",
-    "            agipd_corr.correct_asic_diag = corr_asic_diag\n",
-    "            agipd_corr.melt_snow = melt_snow\n",
+    "                agipd_corr.baseline_corr_using_hmatch = blc_hist\n",
+    "                agipd_corr.melt_snow = melt_snow\n",
     "            try:\n",
     "                agipd_corr.get_valid_image_idx()\n",
     "            except IOError:\n",
     "                return\n",
     "            if not nodb:\n",
-    "                when = agipd_corr.initialize_from_db(dbparms, qm, apply_constants, only_dark=(fileparms != \"\"))\n",
-    "            if fileparms != \"\" and not apply_constants[\"offset\"]:\n",
+    "                when = agipd_corr.initialize_from_db(dbparms, qm, corr_bools, only_dark=(fileparms != \"\"))\n",
+    "            if fileparms != \"\" and not corr_bools[\"only_offset\"]:\n",
     "                agipd_corr.initialize_from_file(fileparms, qm, with_dark=nodb)\n",
     "            print(\"Initialized constants\")\n",
     "\n",
@@ -528,11 +556,6 @@
     "\n",
     "fileparms = calfile\n",
     "\n",
-    "apply_constants = {}\n",
-    "apply_constants['offset'] = only_offset\n",
-    "apply_constants['SlopesPC'] = only_slopespc\n",
-    "apply_constants['SlopesFF'] = only_slopesff\n",
-    "\n",
     "all_cells = []\n",
     "whens = []\n",
     "errors = []\n",
@@ -556,11 +579,11 @@
     "    first = False\n",
     "    if len(inp) >= min(MAX_PAR, left):\n",
     "        print(\"Running {} tasks parallel\".format(len(inp)))\n",
-    "        p = partial(correct_module, max_cells, do_rel_gain, index_v, CHUNK_SIZE, total_sequences,\n",
+    "        p = partial(correct_module, max_cells, index_v, CHUNK_SIZE, total_sequences,\n",
     "                    sequences_qm, bins_gain_vs_signal, bins_signal_low_range, bins_signal_high_range,\n",
     "                    bins_dig_gain_vs_signal, max_pulses, dbparms, fileparms, nodb, chunk_size_idim,\n",
     "                    special_opts, il_mode, loc, dinstance, force_hg_if_below, force_mg_if_below,\n",
-    "                    mask_noisy_adc, adjust_mg_baseline, acq_rate, dont_zero_nans, dont_zero_orange, apply_constants)\n",
+    "                    mask_noisy_adc, acq_rate, corr_bools)\n",
     "\n",
     "        r = view.map_sync(p, inp)\n",
     "\n",
diff --git a/notebooks/AGIPD/PlotFromCalDB_AGIPD_NBC.ipynb b/notebooks/AGIPD/PlotFromCalDB_AGIPD_NBC.ipynb
deleted file mode 100644
index 842f9db5b..000000000
--- a/notebooks/AGIPD/PlotFromCalDB_AGIPD_NBC.ipynb
+++ /dev/null
@@ -1,694 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "# Statistical analysis of calibration factors#\n",
-    "\n",
-    "Author: Mikhail Karnevskiy, Steffen Hauf, Version 0.2\n",
-    "\n",
-    "Calibration constants for AGIPD1M detector from the data base with injection time between start_date and end_date are considered.\n",
-    "\n",
-    "To be visualized, calibration constants are averaged per ASICs. Plots shows calibration constant over time for each constant and for each module. Summary plots overall modules are created.\n",
-    "\n",
-    "In additional gain-slopes flat-field and pulse-capacitor are combined to relative-gain constant and presented as well. Noise in electron units is derived using gain factors and presented.\n",
-    "\n",
-    "Values shown in plots are saved in h5 files.\n",
-    "\n",
-    "All presented values corresponds to high and medium gain stages."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "cluster_profile = \"noDB\"  # The ipcluster profile to use\n",
-    "start_date = \"2019-01-01\"  # Date to start investigation interval from\n",
-    "end_date = \"NOW\"  # Date to end investigation interval at, can be \"now\"\n",
-    "nconstants = 20 # Number of time stamps to plot. If not 0, overcome start_date.\n",
-    "constants = [\"Noise\", \"Offset\", \"SlopesFF\", \"SlopesPC\"]  # Constants to plot\n",
-    "modules = [1]  # Modules, set to -1 for all, range allowed\n",
-    "bias_voltages = [300]  # Bias voltage\n",
-    "mem_cells = [250]  # Number of used memory cells. Typically: 4,32,64,128,176.\n",
-    "acquisition_rate = [0.0, 1.1, 2.2, 4.5]\n",
-    "photon_energy = 9.2  # Photon energy of the beam\n",
-    "out_folder = \"/gpfs/exfel/data/scratch/karnem/test_AGIPD55/\"  # Output folder, required\n",
-    "use_existing = \"\" # If not empty, constants stored in given folder will be used\n",
-    "cal_db_timeout = 120000 # timeout on caldb requests\",\n",
-    "adu_to_photon = 33.17 # ADU to photon conversion factor (8000 / 3.6 / 67.)\n",
-    "nMemToShow = 32 # Number of memory cells to be shown in plots over ASICs\n",
-    "db_module = \"AGIPD1M1\"  # detector entry in the DB to investigate\n",
-    "dclass = \"AGIPD\"  # Detector class\n",
-    "cal_db_interface = \"tcp://max-exfl016:8015#8025\" # the database interface to use\n",
-    "max_time = 15 # the max margin in min. for the matching closest bad pixels\n",
-    "range_offset = [4000., 5500, 6500, 8500] # plotting range for offset: high gain l, r, medium gain l, r \n",
-    "range_noise = [2.5, 15, 7.5, 17.0] # plotting range for noise: high gain l, r, medium gain l, r \n",
-    "range_gain = [0.8, 1.2, 0.8, 1.2] # plotting range for gain: high gain l, r, medium gain l, r \n",
-    "range_noise_e = [85., 500., 85., 500.] # plotting range for noise in [e-]: high gain l, r, medium gain l, r \n",
-    "range_slopesPC = [22.0, 27.0, -0.5, 1.5] # plotting range for slope PC: high gain l, r, medium gain l, r \n",
-    "range_slopesFF = [0.8, 1.2, 0.6, 1.2] # plotting range for slope FF: high gain l, r, medium gain l, r \n",
-    "plot_range = 3 # range for plotting in units of median absolute deviations\n",
-    "x_labels = ['Acquisition rate', 'Memory cells'] # parameters to be shown on X axis"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "scrolled": true
-   },
-   "outputs": [],
-   "source": [
-    "import copy\n",
-    "import datetime\n",
-    "import dateutil.parser\n",
-    "import numpy as np\n",
-    "import os\n",
-    "import sys\n",
-    "import warnings\n",
-    "warnings.filterwarnings('ignore')\n",
-    "\n",
-    "from iCalibrationDB import Constants, Conditions, Detectors, ConstantMetaData\n",
-    "from cal_tools.tools import get_from_db, get_random_db_interface\n",
-    "from cal_tools.ana_tools import (save_dict_to_hdf5, load_data_from_hdf5, \n",
-    "                                 combine_constants, HMType, IMType,\n",
-    "                                 hm_combine, combine_lists, get_range)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# Prepare variables\n",
-    "nMem = max(mem_cells) # Number of mem Cells to store\n",
-    "spShape = (64,64) # Shape of superpixel\n",
-    "\n",
-    "if modules[0] == -1:\n",
-    "    modules = range(16)\n",
-    "    \n",
-    "modules = [\"Q{}M{}\".format(x // 4 + 1, x % 4 + 1) for x in modules]\n",
-    "\n",
-    "acquisition_rate[acquisition_rate==0] = None\n",
-    "\n",
-    "constantsDark = {\"SlopesFF\": 'BadPixelsFF',\n",
-    "                 'SlopesPC': 'BadPixelsPC',\n",
-    "                 'Noise': 'BadPixelsDark',\n",
-    "                 'Offset': 'BadPixelsDark'}\n",
-    "print('Bad pixels data: ', constantsDark)\n",
-    "\n",
-    "# Define parameters in order to perform loop over time stamps\n",
-    "start = datetime.datetime.now() if start_date.upper() == \"NOW\" else dateutil.parser.parse(\n",
-    "    start_date)\n",
-    "end = datetime.datetime.now() if end_date.upper() == \"NOW\" else dateutil.parser.parse(\n",
-    "    end_date)\n",
-    "\n",
-    "# Create output folder\n",
-    "os.makedirs(out_folder, exist_ok=True)\n",
-    "\n",
-    "# Get getector conditions\n",
-    "det = getattr(Detectors, db_module)\n",
-    "dconstants = getattr(Constants, dclass)\n",
-    "\n",
-    "print('CalDB Interface: {}'.format(cal_db_interface))\n",
-    "print('Start time at: ', start)\n",
-    "print('End time at: ', end)\n",
-    "print('Modules: ', modules)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "parameter_list = combine_lists(bias_voltages, modules, mem_cells, acquisition_rate,\n",
-    "                               names = ['bias_voltage', 'module', 'mem_cells', 'acquisition_rate'])\n",
-    "print(parameter_list)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "scrolled": false
-   },
-   "outputs": [],
-   "source": [
-    "# Retrieve list of meta-data\n",
-    "constant_versions = []\n",
-    "constant_parameters = []\n",
-    "constantBP_versions = []\n",
-    "\n",
-    "# Loop over constants\n",
-    "for c, const in enumerate(constants):\n",
-    "    \n",
-    "    if use_existing != \"\":\n",
-    "        break\n",
-    "    \n",
-    "    # Loop over parameters\n",
-    "    for pars in parameter_list:\n",
-    "    \n",
-    "        if (const in [\"Offset\", \"Noise\", \"SlopesPC\"] or \"DARK\" in const.upper()):\n",
-    "            dcond = Conditions.Dark\n",
-    "            mcond = getattr(dcond, dclass)(\n",
-    "                        memory_cells=pars['mem_cells'],\n",
-    "                        bias_voltage=pars['bias_voltage'],\n",
-    "                        acquisition_rate=pars['acquisition_rate'])\n",
-    "        else:\n",
-    "            dcond = Conditions.Illuminated\n",
-    "            mcond = getattr(dcond, dclass)(\n",
-    "                        memory_cells=pars['mem_cells'],\n",
-    "                        bias_voltage=pars['bias_voltage'],\n",
-    "                        acquisition_rate=pars['acquisition_rate'],\n",
-    "                        photon_energy=photon_energy)\n",
-    "\n",
-    "        print('Request: ', const, 'with paramters:', pars)\n",
-    "        # Request Constant versions for given parameters and module\n",
-    "        data = get_from_db(getattr(det, pars['module']),\n",
-    "                           getattr(dconstants,\n",
-    "                                   const)(),\n",
-    "                           copy.deepcopy(mcond), None,\n",
-    "                           cal_db_interface,\n",
-    "                           creation_time=start,\n",
-    "                           verbosity=2,\n",
-    "                           timeout=cal_db_timeout,\n",
-    "                           meta_only=True,\n",
-    "                           version_info=True)\n",
-    "\n",
-    "        if not isinstance(data, list):\n",
-    "            continue\n",
-    "        \n",
-    "        # Request BP constant versions\n",
-    "        print('constantDark:', constantsDark[const], )        \n",
-    "        dataBP = get_from_db(getattr(det, pars['module']),\n",
-    "                             getattr(dconstants, \n",
-    "                                     constantsDark[const])(),\n",
-    "                             copy.deepcopy(mcond), None,\n",
-    "                             cal_db_interface,\n",
-    "                             creation_time=start,\n",
-    "                             verbosity=2,\n",
-    "                             timeout=cal_db_timeout,\n",
-    "                             meta_only=True,\n",
-    "                             version_info=True)\n",
-    "        \n",
-    "        for d in data:\n",
-    "            # print('Item: ', d)\n",
-    "            # Match proper BP constant version\n",
-    "            # and get constant version within\n",
-    "            # requested time range\n",
-    "            if d is None:\n",
-    "                print('Time or data is not found!')\n",
-    "                continue\n",
-    "\n",
-    "            dt = dateutil.parser.parse(d['begin_at'])\n",
-    "\n",
-    "            if (dt.replace(tzinfo=None) > end or \n",
-    "                (nconstants==0 and dt.replace(tzinfo=None) < start)):\n",
-    "                continue\n",
-    "                \n",
-    "            closest_BP = None\n",
-    "            closest_BPtime = None\n",
-    "            found_BPmatch = False\n",
-    "                \n",
-    "            if not isinstance(dataBP, list):\n",
-    "                dataBP = []\n",
-    "            \n",
-    "            for dBP in dataBP:\n",
-    "                if dBP is None:\n",
-    "                    print(\"Bad pixels are not found!\")\n",
-    "                    continue\n",
-    "            \n",
-    "                dt = dateutil.parser.parse(d['begin_at'])\n",
-    "                dBPt = dateutil.parser.parse(dBP['begin_at'])\n",
-    "                \n",
-    "                if dt == dBPt:\n",
-    "                    found_BPmatch = True\n",
-    "                else:\n",
-    "\n",
-    "                    if np.abs(dBPt-dt).seconds < (max_time*60):\n",
-    "                        if closest_BP is None:\n",
-    "                            closest_BP = dBP\n",
-    "                            closest_BPtime = dBPt\n",
-    "                        else:\n",
-    "                            if np.abs(dBPt-dt) < np.abs(closest_BPtime-dt):\n",
-    "                                closest_BP = dBP\n",
-    "                                closest_BPtime = dBPt\n",
-    "                    \n",
-    "                    if dataBP.index(dBP) ==  len(dataBP)-1:\n",
-    "                        if closest_BP:\n",
-    "                            dBP = closest_BP\n",
-    "                            dBPt = closest_BPtime\n",
-    "                            found_BPmatch = True\n",
-    "                        else:\n",
-    "                            print('Bad pixels are not found!')\n",
-    "                    \n",
-    "                if found_BPmatch:\n",
-    "                    print(\"Found constant {}: begin at {}\".format(const, dt))\n",
-    "                    print(\"Found bad pixels at {}\".format(dBPt))\n",
-    "                    constantBP_versions.append(dBP)\n",
-    "                    constant_versions.append(d)\n",
-    "                    constant_parameters.append(copy.deepcopy(pars))\n",
-    "                    break\n",
-    "                    \n",
-    "            if not found_BPmatch:\n",
-    "                print('Bad pixels are not matched')\n",
-    "                constantBP_versions.append(None)\n",
-    "                constant_versions.append(d)\n",
-    "                constant_parameters.append(copy.deepcopy(pars))\n",
-    "                    \n",
-    "print('Number of retrieved constants {}'.format(len(constant_versions)))"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "def prepare_to_store(a, nMem):\n",
-    "    shape = list(a.shape[:2])+[nMem, 2]\n",
-    "    b = np.full(shape, np.nan)\n",
-    "    b[:, :, :a.shape[2]] = a[:, :, :, :2]\n",
-    "    return b\n",
-    "\n",
-    "def get_rebined(a, rebin):\n",
-    "    return a.reshape(\n",
-    "                int(a.shape[0] / rebin[0]),\n",
-    "                rebin[0],\n",
-    "                int(a.shape[1] / rebin[1]),\n",
-    "                rebin[1],\n",
-    "                a.shape[2],\n",
-    "                a.shape[3])\n",
-    "    \n",
-    "def modify_const(const, data, isBP = False):\n",
-    "    if const in ['SlopesFF']:\n",
-    "        if (len(data.shape) == 4):\n",
-    "            data = data[:, :, :, 0][..., None]\n",
-    "        else:\n",
-    "            data = data[..., None]\n",
-    "            \n",
-    "        if data.shape[2]<3:\n",
-    "            data = data[:,:,0,None]\n",
-    "\n",
-    "    if not isBP:\n",
-    "        if data.shape[0] != 128:\n",
-    "            data = data.swapaxes(0, 2).swapaxes(1, 3).swapaxes(2, 3)\n",
-    "\n",
-    "        # Copy slope medium to be saved later\n",
-    "        if const in ['SlopesPC']:\n",
-    "            data[:, :, :, 1] = data[:, :, :, 3]\n",
-    "    else:\n",
-    "        if const in ['SlopesPC']:\n",
-    "            if len(data.shape) == 3:\n",
-    "                data = data[:, :, :, None].repeat(10, axis=3)\n",
-    "\n",
-    "        if data.shape[0] != 128:\n",
-    "            data = data.swapaxes(0, 1).swapaxes(1, 2)\n",
-    "        \n",
-    "    if len(data.shape) < 4:\n",
-    "        print(data.shape, \"Unexpected shape!\")\n",
-    "    return data\n",
-    "\n",
-    "\n",
-    "\n",
-    "ret_constants = {}\n",
-    "constant_data = ConstantMetaData()\n",
-    "constant_BP = ConstantMetaData()\n",
-    "# sort over begin_at\n",
-    "idxs, _ = zip(*sorted(enumerate(constant_versions), \n",
-    "                     key=lambda x: x[1]['begin_at'], reverse=True))\n",
-    "\n",
-    "for i in idxs:\n",
-    "    const = constant_versions[i]['data_set_name'].split('/')[-2]\n",
-    "    qm = constant_parameters[i]['module']\n",
-    "    \n",
-    "    if not const in ret_constants:\n",
-    "        ret_constants[const] = {}\n",
-    "    if not qm in ret_constants[const]:\n",
-    "            ret_constants[const][qm] = []\n",
-    "            \n",
-    "    if nconstants>0 and len(ret_constants[const][qm])>=nconstants:\n",
-    "        continue\n",
-    "            \n",
-    "\n",
-    "    constant_data.retrieve_from_version_info(constant_versions[i])\n",
-    "    cdata = constant_data.calibration_constant.data\n",
-    "    ctime = constant_data.calibration_constant_version.begin_at \n",
-    "    cdata = modify_const(const, cdata)\n",
-    "    print(\"constant: {}, module {}, begin_at {}\".format(const, qm, ctime))\n",
-    "\n",
-    "    if constantBP_versions[i]:\n",
-    "        constant_BP.retrieve_from_version_info(constantBP_versions[i])\n",
-    "        cdataBP = constant_BP.calibration_constant.data\n",
-    "        cdataBP = modify_const(const, cdataBP, True)\n",
-    "\n",
-    "        if cdataBP.shape != cdata.shape:\n",
-    "            print('Wrong bad pixel shape! {}, expected {}'.format(cdataBP.shape, cdata.shape))\n",
-    "            cdataBP = np.full_like(cdata, -1)\n",
-    "\n",
-    "        # Apply bad pixel mask\n",
-    "        cdataABP = np.copy(cdata)\n",
-    "        cdataABP[cdataBP > 0] = np.nan\n",
-    "\n",
-    "        # Create superpixels for constants with BP applied\n",
-    "        cdataABP = get_rebined(cdataABP, spShape)\n",
-    "        toStoreBP = prepare_to_store(np.nanmean(cdataABP, axis=(1, 3)), nMem)\n",
-    "        toStoreBPStd = prepare_to_store(np.nanstd(cdataABP, axis=(1, 3)), nMem)\n",
-    "\n",
-    "        # Prepare number of bad pixels per superpixels\n",
-    "        cdataBP = get_rebined(cdataBP, spShape)\n",
-    "        cdataNBP = prepare_to_store(np.nansum(cdataBP > 0, axis=(1, 3)), nMem)\n",
-    "\n",
-    "    # Create superpixels for constants without BP applied\n",
-    "    cdata = get_rebined(cdata, spShape)\n",
-    "    toStoreStd = prepare_to_store(np.nanstd(cdata, axis=(1, 3)), nMem)\n",
-    "    toStore = prepare_to_store(np.nanmean(cdata, axis=(1, 3)), nMem)\n",
-    "    \n",
-    "    if not constantBP_versions[i]:\n",
-    "        toStoreBP = np.full_like(toStore, IMType.NO_BPMAP.value)\n",
-    "        toStoreBPStd = np.full_like(toStore, IMType.NO_BPMAP.value)\n",
-    "        cdataNBP = np.full_like(toStore, IMType.NO_BPMAP.value)\n",
-    "    \n",
-    "    dpar = {p.name: p.value for p in constant_data.detector_condition.parameters}\n",
-    "\n",
-    "    print(\"Store values in dict\", const, qm, ctime)\n",
-    "    ret_constants[const][qm].append({'ctime': ctime,\n",
-    "                                     'nBP': cdataNBP,\n",
-    "                                     'dataBP': toStoreBP,\n",
-    "                                     'dataBPStd': toStoreBPStd,\n",
-    "                                     'data': toStore,\n",
-    "                                     'dataStd': toStoreStd,\n",
-    "                                     'mdata': dpar})    \n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "scrolled": true
-   },
-   "outputs": [],
-   "source": [
-    "if use_existing == \"\":\n",
-    "    print('Save data to /CalDBAna_{}_{}.h5'.format(dclass, modules[0]))\n",
-    "    save_dict_to_hdf5(ret_constants,\n",
-    "                      '{}/CalDBAna_{}_{}.h5'.format(out_folder, dclass, modules[0]))"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "scrolled": true
-   },
-   "outputs": [],
-   "source": [
-    "if use_existing == \"\":\n",
-    "    fpath = '{}/CalDBAna_{}_*.h5'.format(out_folder, dclass)\n",
-    "else:\n",
-    "    fpath = '{}/CalDBAna_{}_*.h5'.format(use_existing, dclass)\n",
-    "\n",
-    "print('Load data from {}'.format(fpath))\n",
-    "ret_constants = load_data_from_hdf5(fpath)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# Combine FF and PC data to calculate Gain\n",
-    "# Estimate Noise in units of electrons\n",
-    "print ('Calculate Gain and Noise in electron units')\n",
-    "\n",
-    "ret_constants[\"Gain\"] = {}\n",
-    "ret_constants[\"Noise-e\"] = {}\n",
-    "for mod in list(range(16)):\n",
-    "    if (\"SlopesFF\" not in ret_constants or\n",
-    "            \"SlopesPC\" not in ret_constants):\n",
-    "        break\n",
-    "\n",
-    "    qm = \"Q{}M{}\".format(mod // 4 + 1, mod % 4 + 1)\n",
-    "    print(qm)\n",
-    "\n",
-    "    if (qm not in ret_constants[\"SlopesFF\"] or\n",
-    "            qm not in ret_constants[\"SlopesPC\"]):\n",
-    "        continue\n",
-    "\n",
-    "    ret_constants[\"Gain\"][qm] = {}\n",
-    "\n",
-    "    dataFF = ret_constants[\"SlopesFF\"][qm]\n",
-    "    dataPC = ret_constants[\"SlopesPC\"][qm]\n",
-    "\n",
-    "    if (len(dataFF) == 0 or len(dataPC) == 0):\n",
-    "        continue\n",
-    "\n",
-    "    ctimesFF = np.array(dataFF[\"ctime\"])\n",
-    "    ctimesPC = np.array(dataPC[\"ctime\"])\n",
-    "\n",
-    "    ctime, icomb = combine_constants(ctimesFF, ctimesPC)\n",
-    "\n",
-    "    cdataPC_vs_time = np.array(dataPC[\"data\"])[..., 0]\n",
-    "    cdataFF_vs_time = np.array(dataFF[\"data\"])[..., 0]\n",
-    "\n",
-    "    cdataFF_vs_time = np.nanmedian(cdataFF_vs_time, axis=3)[..., None]\n",
-    "\n",
-    "    cdataFF_vs_time /= np.nanmedian(cdataFF_vs_time, axis=(1, 2, 3))[:, None,\n",
-    "                       None, None]\n",
-    "    cdataPC_vs_time /= np.nanmedian(cdataPC_vs_time, axis=(1, 2, 3))[:, None,\n",
-    "                       None, None]\n",
-    "\n",
-    "    gain_vs_time = []\n",
-    "    for iFF, iPC in icomb:\n",
-    "        gain_vs_time.append(cdataFF_vs_time[iFF] * cdataPC_vs_time[iPC])\n",
-    "\n",
-    "    print(np.array(gain_vs_time).shape)\n",
-    "    \n",
-    "    ctime_ts = [t.timestamp() for t in ctime]\n",
-    "    \n",
-    "    ret_constants[\"Gain\"][qm][\"ctime\"] = ctime\n",
-    "    ret_constants[\"Gain\"][qm][\"data\"] = np.array(gain_vs_time)\n",
-    "    # Fill missing data for compatibility with plotting code\n",
-    "    ret_constants[\"Gain\"][qm][\"dataBP\"] = np.array(gain_vs_time)\n",
-    "    ret_constants[\"Gain\"][qm][\"nBP\"] = np.array(gain_vs_time)\n",
-    "\n",
-    "    if \"Noise\" not in ret_constants:\n",
-    "        continue\n",
-    "\n",
-    "    if qm not in ret_constants[\"Noise\"]:\n",
-    "        continue\n",
-    "\n",
-    "    dataN = ret_constants[\"Noise\"][qm]\n",
-    "    if len(dataN) == 0:\n",
-    "        continue\n",
-    "\n",
-    "    ret_constants[\"Noise-e\"][qm] = {}\n",
-    "            \n",
-    "    ctimesG = np.array(ctime)\n",
-    "    ctimesN = np.array(dataN[\"ctime\"])\n",
-    "\n",
-    "    ctime, icomb = combine_constants(ctimesG, ctimesN)\n",
-    "\n",
-    "    cdataG_vs_time = np.array(gain_vs_time)\n",
-    "    cdataN_vs_time = np.array(dataN[\"data\"])[..., 0]\n",
-    "\n",
-    "    data_vs_time = []\n",
-    "    for iG, iN in icomb:\n",
-    "        data_vs_time.append(\n",
-    "            cdataN_vs_time[iN] * adu_to_photon / cdataG_vs_time[iG])\n",
-    "\n",
-    "    print(np.array(gain_vs_time).shape)\n",
-    "    ctime_ts = [t.timestamp() for t in ctime]\n",
-    "    ret_constants[\"Noise-e\"][qm][\"ctime\"] = ctime\n",
-    "    ret_constants[\"Noise-e\"][qm][\"data\"] = np.array(data_vs_time)\n",
-    "    # Fill missing data for compatibility with plotting code\n",
-    "    ret_constants[\"Noise-e\"][qm][\"dataBP\"] = np.array(data_vs_time)\n",
-    "    ret_constants[\"Noise-e\"][qm][\"nBP\"] = np.array(data_vs_time)\n",
-    "    \n",
-    "save_dict_to_hdf5({k:v for k,v in ret_constants.items() if k in ['Gain', 'Noise-e']},\n",
-    "                  '{}/CalDBAna_{}_Gain.h5'.format(out_folder, dclass))"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# Parameters for plotting\n",
-    "\n",
-    "# Define range for plotting\n",
-    "rangevals = {\n",
-    "    \"Offset\": [range_offset[0:2], range_offset[2:4]],\n",
-    "    \"Noise\": [range_noise[0:2], range_noise[2:4]],\n",
-    "    \"Gain\": [range_gain[0:2], range_gain[2:4]],\n",
-    "    \"Noise-e\": [range_noise_e[0:2], range_noise_e[2:4]],\n",
-    "    \"SlopesPC\": [range_slopesPC[0:2], range_slopesPC[2:4]],\n",
-    "    \"SlopesFF\": [range_slopesFF[0:2], range_slopesFF[2:4]]\n",
-    "}\n",
-    "\n",
-    "keys = {\n",
-    "    'Mean': ['data', '', 'Mean over pixels'],\n",
-    "    'std': ['dataStd', '', '$\\sigma$ over pixels'],\n",
-    "    'MeanBP': ['dataBP', 'Good pixels only', 'Mean over pixels'],\n",
-    "    'NBP': ['nBP', 'Fraction of BP', 'Fraction of BP'],\n",
-    "    'stdBP': ['dataBPStd', 'Good pixels only', '$\\sigma$ over pixels'],\n",
-    "    'stdASIC': ['', '', '$\\sigma$ over ASICs'],\n",
-    "    'stdCell': ['', '', '$\\sigma$ over Cells'],\n",
-    "}\n",
-    "\n",
-    "gain_name = ['High', 'Medium', 'Low']"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "scrolled": false
-   },
-   "outputs": [],
-   "source": [
-    "print('Plot calibration constants')\n",
-    "\n",
-    "# loop over constat type\n",
-    "for const, mods in ret_constants.items():\n",
-    "\n",
-    "    # Loop over gain\n",
-    "    for gain in range(2):\n",
-    "        print('Const: {}, gain {}'.format(const, gain))\n",
-    "\n",
-    "        if const in [\"Gain\", \"Noise-e\"] and gain == 1:\n",
-    "            continue\n",
-    "        else:\n",
-    "            pass\n",
-    "\n",
-    "        # Loop over modules\n",
-    "        for mod, data in mods.items():\n",
-    "            if mod not in modules:\n",
-    "                continue\n",
-    "\n",
-    "            print(mod)\n",
-    "            ctimes = np.array(data[\"ctime\"])\n",
-    "            ctimes_ticks = [x.strftime('%y-%m-%d') for x in ctimes]\n",
-    "\n",
-    "            if (\"mdata\" in data):\n",
-    "                cmdata = np.array(data[\"mdata\"])\n",
-    "                for i, tick in enumerate(ctimes_ticks):\n",
-    "                    ctimes_ticks[i] = ctimes_ticks[i] + \\\n",
-    "                        ', A={}'.format(cmdata[i].get('Acquisition rate', None)) + \\\n",
-    "                        ', M={:1.0f}'.format(\n",
-    "                        cmdata[i]['Memory cells'])\n",
-    "\n",
-    "            sort_ind = np.argsort(ctimes_ticks)\n",
-    "            ctimes_ticks = list(np.array(ctimes_ticks)[sort_ind])\n",
-    "\n",
-    "            # Create sorted by data dataset\n",
-    "            rdata = {}\n",
-    "            for key, item in keys.items():\n",
-    "                if item[0] in data:\n",
-    "                    rdata[key] = np.array(data[item[0]])[sort_ind]\n",
-    "\n",
-    "            nTimes = rdata['Mean'].shape[0]\n",
-    "            nPixels = rdata['Mean'].shape[1] * rdata['Mean'].shape[2]\n",
-    "            nBins = nMemToShow * nPixels\n",
-    "\n",
-    "            # Select gain\n",
-    "            if const not in [\"Gain\", \"Noise-e\"]:\n",
-    "                for key in rdata:\n",
-    "                    rdata[key] = rdata[key][..., gain]\n",
-    "\n",
-    "            # Avoid to low values\n",
-    "            if const in [\"Noise\", \"Offset\", \"Noise-e\"]:\n",
-    "                rdata['Mean'][rdata['Mean'] < 0.1] = np.nan\n",
-    "                if 'MeanBP' in rdata:\n",
-    "                    rdata['MeanBP'][rdata['MeanBP'] < 0.1] = np.nan\n",
-    "\n",
-    "            if 'NBP' in rdata:\n",
-    "                rdata['NBP'] = rdata['NBP'].astype(float)\n",
-    "                rdata[\"NBP\"][rdata[\"NBP\"] == (spShape[0] * spShape[1])] = np.nan\n",
-    "                rdata[\"NBP\"] = rdata[\"NBP\"] / (spShape[0] * spShape[1]) * 100\n",
-    "\n",
-    "            # Reshape: ASICs over cells for plotting\n",
-    "            pdata = {}\n",
-    "            for key in rdata:\n",
-    "                if len(rdata[key].shape)<3:\n",
-    "                    continue\n",
-    "                pdata[key] = rdata[key][:, :, :, :nMemToShow].reshape(\n",
-    "                    nTimes, nBins).swapaxes(0, 1)\n",
-    "\n",
-    "            # Summary over ASICs\n",
-    "            adata = {}\n",
-    "            for key in rdata:\n",
-    "                if len(rdata[key].shape)<3:\n",
-    "                    continue\n",
-    "                adata[key] = np.nanmean(rdata[key], axis=(1, 2)).swapaxes(0, 1)\n",
-    "\n",
-    "            # Plotting\n",
-    "            for key in pdata:\n",
-    "                vmin,vmax = get_range(pdata[key][::-1], plot_range)\n",
-    "                if const in rangevals and key in ['Mean', 'MeanBP']:\n",
-    "                    vmin = rangevals[const][gain][0]\n",
-    "                    vmax = rangevals[const][gain][1]\n",
-    "\n",
-    "                if key == 'NBP':\n",
-    "                    unit = '[%]'\n",
-    "                else:\n",
-    "                    unit = '[ADU]'\n",
-    "                    if const == 'Noise-e':\n",
-    "                        unit = '[$e^-$]'\n",
-    "\n",
-    "                title = '{}, module {}, {} gain, {}'.format(\n",
-    "                    const, mod, gain_name[gain], keys[key][1])\n",
-    "                cb_label = '{}, {} {}'.format(const, keys[key][2], unit)\n",
-    "\n",
-    "                hm_combine(pdata[key][::-1], htype=HMType.INSET_AXIS,\n",
-    "                          x_label='Creation Time', y_label='ASIC ID',\n",
-    "                          x_ticklabels=ctimes_ticks,\n",
-    "                          x_ticks=np.arange(len(ctimes_ticks))+0.3,\n",
-    "                          title=title, cb_label=cb_label,\n",
-    "                          vmin=vmin, vmax=vmax,\n",
-    "                          fname='{}/{}_{}_g{}_ASIC_{}.png'.format(\n",
-    "                                  out_folder, const, mod, gain, key),\n",
-    "                          y_ticks=np.arange(nBins, step=nMemToShow)+16,\n",
-    "                          y_ticklabels=np.arange(nPixels)[::-1]+1,\n",
-    "                          pad=[0.125, 0.125, 0.12, 0.185])\n",
-    "\n",
-    "                hm_combine(adata[key],\n",
-    "                          x_label='Creation Time', y_label='Memory cell ID',\n",
-    "                          x_ticklabels=ctimes_ticks,\n",
-    "                          x_ticks=np.arange(len(ctimes_ticks))+0.3,\n",
-    "                          title=title, cb_label=cb_label,\n",
-    "                          fname='{}/{}_{}_g{}_MEM_{}.png'.format(\n",
-    "                                  out_folder, const, mod, gain, key),\n",
-    "                          vmin=vmin, vmax=vmax)"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.6.7"
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 2
-}
diff --git a/notebooks/FastCCD/Characterize_Darks_NewDAQ_FastCCD_NBC.ipynb b/notebooks/FastCCD/Characterize_Darks_NewDAQ_FastCCD_NBC.ipynb
deleted file mode 100644
index 6ee85833c..000000000
--- a/notebooks/FastCCD/Characterize_Darks_NewDAQ_FastCCD_NBC.ipynb
+++ /dev/null
@@ -1,574 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "# FastCCD Dark Characterization\n",
-    "\n",
-    "Author: I. Klačková, S. Hauf, Version 1.0\n",
-    "\n",
-    "The following notebook provides dark image analysis of the FastCCD detector.\n",
-    "\n",
-    "Dark characterization evaluates offset and noise of the detector and gives information about bad pixels. Resulting maps are saved as .h5 files for a latter use."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-12-06T10:54:38.999974Z",
-     "start_time": "2018-12-06T10:54:38.983406Z"
-    }
-   },
-   "outputs": [],
-   "source": [
-    "in_folder = \"/gpfs/exfel/exp/SCS/201930/p900074/raw/\" # input folder, required\n",
-    "out_folder = 'gpfs/exfel/data/scratch/haufs/test/' # output folder, required\n",
-    "path_template = 'RAW-R{:04d}-DA05-S{{:05d}}.h5' # the template to use to access data\n",
-    "run = 321 # which run to read data from, required\n",
-    "number_dark_frames = 0 # number of images to be used, if set to 0 all available images are used\n",
-    "cluster_profile = \"noDB\" # ipcluster profile to use\n",
-    "operation_mode = \"FF\" #o r \"FF\". FS stands for frame-store and FF for full-frame opeartion\n",
-    "sigma_noise = 10. # Pixel exceeding 'sigmaNoise' * noise value in that pixel will be masked\n",
-    "h5path = '/INSTRUMENT/SCS_CDIDET_FCCD2M/DAQ/FCCD:daqOutput/data/image/pixels' # path in the HDF5 file the data is at\n",
-    "h5path_t = '/CONTROL/SCS_CDIDET_FCCD2M/CTRL/LSLAN/inputA/crdg/value'  # path to find temperature at\n",
-    "h5path_cntrl = '/RUN/SCS_CDIDET_FCCD2M/DET/FCCD'  # path to control data\n",
-    "cal_db_interface = \"tcp://max-exfl016:8020\" # calibration DB interface to use\n",
-    "local_output = False # output also in as H5 files\n",
-    "temp_limits = 5 # limits within which temperature is considered the same\n",
-    "sequence = 0 # sequence file to use\n",
-    "multi_iteration = False # use multiple iterations\n",
-    "use_dir_creation_date = True # use dir creation date\n",
-    "bad_pixel_offset_sigma = 5. # offset standard deviations above which to consider pixel bad \n",
-    "bad_pixel_noise_sigma = 5. # noise standard deviations above which to consider pixel bad \n",
-    "fix_temperature = 0. # fix temperature to this value, set to 0 to use slow control value"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-12-06T10:54:39.190907Z",
-     "start_time": "2018-12-06T10:54:39.186154Z"
-    }
-   },
-   "outputs": [],
-   "source": [
-    "from iCalibrationDB import ConstantMetaData, Constants, Conditions, Detectors, Versions\n",
-    "from iCalibrationDB.detectors import DetectorTypes"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-12-06T10:54:39.467334Z",
-     "start_time": "2018-12-06T10:54:39.427784Z"
-    }
-   },
-   "outputs": [],
-   "source": [
-    "import XFELDetAna.xfelprofiler as xprof\n",
-    "\n",
-    "profiler = xprof.Profiler()\n",
-    "profiler.disable()\n",
-    "from XFELDetAna.util import env\n",
-    "env.iprofile = cluster_profile\n",
-    "\n",
-    "import warnings\n",
-    "warnings.filterwarnings('ignore')\n",
-    "\n",
-    "from XFELDetAna import xfelpycaltools as xcal\n",
-    "from XFELDetAna import xfelpyanatools as xana\n",
-    "from XFELDetAna.plotting.util import prettyPlotting\n",
-    "prettyPlotting=True\n",
-    "from XFELDetAna.xfelreaders import ChunkReader\n",
-    "from XFELDetAna.detectors.fastccd import readerh5 as fastccdreaderh5\n",
-    "from cal_tools.tools import get_dir_creation_date\n",
-    "\n",
-    "import numpy as np\n",
-    "import h5py\n",
-    "import matplotlib.pyplot as plt\n",
-    "from iminuit import Minuit\n",
-    "\n",
-    "import time\n",
-    "import copy\n",
-    "\n",
-    "from prettytable import PrettyTable\n",
-    "\n",
-    "%matplotlib inline\n",
-    "\n",
-    "def nImagesOrLimit(nImages, limit):\n",
-    "    if limit == 0:\n",
-    "        return nImages\n",
-    "    else:\n",
-    "        return min(nImages, limit)\n",
-    "    \n",
-    "sigmaNoise = sigma_noise"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "proposal = list(filter(None, in_folder.strip('/').split('/')))[-2]\n",
-    "file_loc = 'proposal:{} runs:{}'.format(proposal, run)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-12-06T10:54:40.058101Z",
-     "start_time": "2018-12-06T10:54:40.042615Z"
-    }
-   },
-   "outputs": [],
-   "source": [
-    "if operation_mode == \"FS\":\n",
-    "    x = 960 # rows of the FastCCD to analyze in FS mode \n",
-    "    y = 960 # columns of the FastCCD to analyze in FS mode \n",
-    "    print('\\nYou are analyzing data in FS mode.')\n",
-    "else:\n",
-    "    x = 1934 # rows of the FastCCD to analyze in FF mode \n",
-    "    y = 960 # columns of the FastCCD to analyze in FF mode\n",
-    "    print('\\nYou are analyzing data in FF mode.\\n')\n",
-    "    \n",
-    "ped_dir = \"{}/r{:04d}\".format(in_folder, run)\n",
-    "fp_name = path_template.format(run)\n",
-    "\n",
-    "import datetime\n",
-    "creation_time = None\n",
-    "if use_dir_creation_date:\n",
-    "    creation_time = get_dir_creation_date(in_folder, run)\n",
-    "\n",
-    "fp_path = '{}/{}'.format(ped_dir, fp_name)\n",
-    "\n",
-    "print(\"Reading data from: {}\\n\".format(fp_path))\n",
-    "print(\"Run is: {}\".format(run))\n",
-    "print(\"HDF5 path: {}\".format(h5path))\n",
-    "if creation_time:\n",
-    "    print(\"Using {} as creation time\".format(creation_time.isoformat()))"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-12-06T10:54:40.555804Z",
-     "start_time": "2018-12-06T10:54:40.452978Z"
-    }
-   },
-   "outputs": [],
-   "source": [
-    "filename = fp_path.format(sequence)\n",
-    "sensorSize = [x, y]\n",
-    "chunkSize = 100 #Number of images to read per chunk\n",
-    "#Sensor area will be analysed according to blocksize\n",
-    "blockSize = [sensorSize[0]//2, sensorSize[1]//4] \n",
-    "xcal.defaultBlockSize = blockSize\n",
-    "cpuCores = 8 #Specifies the number of running cpu cores\n",
-    "memoryCells = 1 #FastCCD has 1 memory cell\n",
-    "#Specifies total number of images to proceed\n",
-    "nImages = fastccdreaderh5.getDataSize(filename, h5path)[0] \n",
-    "nImages = nImagesOrLimit(nImages, number_dark_frames)\n",
-    "print(\"\\nNumber of dark images to analyze: \",nImages)\n",
-    "commonModeBlockSize = blockSize\n",
-    "commonModeAxisR = 'row'#Axis along which common mode will be calculated\n",
-    "run_parallel = True\n",
-    "profile = False\n",
-    "\n",
-    "with h5py.File(filename, 'r') as f:\n",
-    "    bias_voltage = int(f['{}/biasclock/bias/value'.format(h5path_cntrl)][0])\n",
-    "    det_gain = int(f['{}/exposure/gain/value'.format(h5path_cntrl)][0])\n",
-    "    integration_time = int(f['{}/acquisitionTime/value'.format(h5path_cntrl)][0])\n",
-    "    temperature = np.mean(f[h5path_t])\n",
-    "    temperature_k = temperature + 273.15\n",
-    "        \n",
-    "    if fix_temperature != 0.:\n",
-    "        temperature_k = fix_temperature\n",
-    "        print(\"Using fixed temperature\")\n",
-    "    print(\"Bias voltage is {} V\".format(bias_voltage))\n",
-    "    print(\"Detector gain is set to x{}\".format(det_gain))\n",
-    "    print(\"Detector integration time is set to {}\".format(integration_time))\n",
-    "    print(\"Mean temperature was {:0.2f} °C / {:0.2f} K\".format(temperature, temperature_k))\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-12-06T10:54:41.584031Z",
-     "start_time": "2018-12-06T10:54:41.578462Z"
-    }
-   },
-   "outputs": [],
-   "source": [
-    "reader = ChunkReader(filename, fastccdreaderh5.readData, \n",
-    "                     nImages, chunkSize, \n",
-    "                     path = h5path, \n",
-    "                     pixels_x = sensorSize[0],\n",
-    "                     pixels_y = sensorSize[1],)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-12-06T10:54:41.899511Z",
-     "start_time": "2018-12-06T10:54:41.864816Z"
-    }
-   },
-   "outputs": [],
-   "source": [
-    "noiseCal = xcal.NoiseCalculator(sensorSize, memoryCells, \n",
-    "                                cores=cpuCores, blockSize=blockSize,\n",
-    "                               runParallel=run_parallel)\n",
-    "histCalRaw = xcal.HistogramCalculator(sensorSize, bins=1000, \n",
-    "                                      range=[0, 10000], parallel=False, \n",
-    "                                      memoryCells=memoryCells, \n",
-    "                                      cores=cpuCores, blockSize=blockSize)\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### First Iteration"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Characterization of dark images with purpose to create dark maps (offset, noise and bad pixel maps) is an iterative process. Firstly, initial offset and noise maps are produced from raw dark data."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-12-06T10:55:21.238009Z",
-     "start_time": "2018-12-06T10:54:54.586435Z"
-    }
-   },
-   "outputs": [],
-   "source": [
-    "for data in reader.readChunks():\n",
-    "    data = np.bitwise_and(data.astype(np.uint16), 0b0011111111111111).astype(np.float32)\n",
-    "    dx = np.count_nonzero(data, axis=(0, 1))\n",
-    "    data = data[:,:,dx != 0]\n",
-    "    histCalRaw.fill(data)\n",
-    "    #Filling calculators with data\n",
-    "    noiseCal.fill(data)\n",
-    "          \n",
-    "offsetMap = noiseCal.getOffset() #Produce offset map\n",
-    "noiseMap = noiseCal.get() #Produce noise map\n",
-    "noiseCal.reset() #Reset noise calculator\n",
-    "print(\"Initial maps were created\")"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-12-06T10:56:20.686534Z",
-     "start_time": "2018-12-06T10:56:11.721829Z"
-    }
-   },
-   "outputs": [],
-   "source": [
-    "#**************OFFSET MAP HISTOGRAM***********#\n",
-    "ho,co = np.histogram(offsetMap.flatten(), bins=700)\n",
-    "\n",
-    "do = {'x': co[:-1],\n",
-    "     'y': ho,\n",
-    "     'y_err': np.sqrt(ho[:]),\n",
-    "     'drawstyle': 'bars',\n",
-    "     'color': 'cornflowerblue',\n",
-    "     }\n",
-    "\n",
-    "fig = xana.simplePlot(do, figsize='1col', aspect=2, \n",
-    "                      x_label = 'Offset (ADU)', \n",
-    "                      y_label=\"Counts\", y_log=True,\n",
-    "                      )\n",
-    "                      \n",
-    "\n",
-    "#*****NOISE MAP HISTOGRAM FROM THE OFFSET CORRECTED DATA*******#\n",
-    "hn,cn = np.histogram(noiseMap.flatten(), bins=200)\n",
-    "\n",
-    "dn = {'x': cn[:-1],\n",
-    "     'y': hn,\n",
-    "     'y_err': np.sqrt(hn[:]),\n",
-    "     'drawstyle': 'bars',\n",
-    "     'color': 'cornflowerblue',\n",
-    "     }\n",
-    "\n",
-    "fig = xana.simplePlot(dn, figsize='1col', aspect=2, \n",
-    "                      x_label = 'Noise (ADU)', \n",
-    "                      y_label=\"Counts\", \n",
-    "                      y_log=True)\n",
-    "\n",
-    "\n",
-    "#**************HEAT MAPS*******************#\n",
-    "fig = xana.heatmapPlot(offsetMap[:,:,0],\n",
-    "                       x_label='Columns', y_label='Rows',\n",
-    "                       lut_label='Offset (ADU)',\n",
-    "                       x_range=(0,y),\n",
-    "                       y_range=(0,x), vmin=3000, vmax=4500)\n",
-    "\n",
-    "fig = xana.heatmapPlot(noiseMap[:,:,0],\n",
-    "                       x_label='Columns', y_label='Rows',\n",
-    "                       lut_label='Noise (ADU)',\n",
-    "                       x_range=(0,y),\n",
-    "                       y_range=(0,x), vmax=2*np.mean(noiseMap))"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-12-06T10:56:22.741284Z",
-     "start_time": "2018-12-06T10:56:20.688393Z"
-    }
-   },
-   "outputs": [],
-   "source": [
-    "\n",
-    "## offset\n",
-    "\n",
-    "metadata = ConstantMetaData()\n",
-    "offset = Constants.CCD(DetectorTypes.fastCCD).Offset()\n",
-    "offset.data = offsetMap.data\n",
-    "metadata.calibration_constant = offset\n",
-    "\n",
-    "# set the operating condition\n",
-    "condition = Conditions.Dark.CCD(bias_voltage=bias_voltage,\n",
-    "                                integration_time=integration_time,\n",
-    "                                gain_setting=det_gain,\n",
-    "                                temperature=temperature_k,\n",
-    "                                pixels_x=1934,\n",
-    "                                pixels_y=960)\n",
-    "for parm in condition.parameters:\n",
-    "    if parm.name == \"Sensor Temperature\":\n",
-    "        parm.lower_deviation = temp_limits\n",
-    "        parm.upper_deviation = temp_limits\n",
-    "\n",
-    "device = Detectors.fastCCD1\n",
-    "\n",
-    "\n",
-    "metadata.detector_condition = condition\n",
-    "\n",
-    "# specify the version for this constant\n",
-    "if creation_time is None:\n",
-    "    metadata.calibration_constant_version = Versions.Now(device=device)\n",
-    "else:\n",
-    "    metadata.calibration_constant_version = Versions.Timespan(device=device, start=creation_time)\n",
-    "metadata.calibration_constant_version.raw_data_location = file_loc\n",
-    "metadata.send(cal_db_interface)\n",
-    "\n",
-    "## noise\n",
-    "\n",
-    "metadata = ConstantMetaData()\n",
-    "noise = Constants.CCD(DetectorTypes.fastCCD).Noise()\n",
-    "noise.data = noiseMap.data\n",
-    "metadata.calibration_constant = noise\n",
-    "\n",
-    "# set the operating condition\n",
-    "condition = Conditions.Dark.CCD(bias_voltage=bias_voltage,\n",
-    "                                integration_time=integration_time,\n",
-    "                                gain_setting=det_gain,\n",
-    "                                temperature=temperature_k,\n",
-    "                                pixels_x=1934,\n",
-    "                                pixels_y=960)\n",
-    "\n",
-    "for parm in condition.parameters:\n",
-    "    if parm.name == \"Sensor Temperature\":\n",
-    "        parm.lower_deviation = temp_limits\n",
-    "        parm.upper_deviation = temp_limits\n",
-    "\n",
-    "\n",
-    "device = Detectors.fastCCD1\n",
-    "\n",
-    "\n",
-    "metadata.detector_condition = condition\n",
-    "\n",
-    "# specify the a version for this constant\n",
-    "if creation_time is None:\n",
-    "    metadata.calibration_constant_version = Versions.Now(device=device)\n",
-    "else:\n",
-    "    metadata.calibration_constant_version = Versions.Timespan(device=device, start=creation_time)\n",
-    "metadata.calibration_constant_version.raw_data_location = file_loc\n",
-    "metadata.send(cal_db_interface)\n",
-    "\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "from cal_tools.enums import BadPixels\n",
-    "bad_pixels = np.zeros(offsetMap.shape, np.uint32)\n",
-    "mnoffset = np.nanmedian(offsetMap)\n",
-    "stdoffset = np.nanstd(offsetMap)\n",
-    "bad_pixels[(offsetMap < mnoffset-bad_pixel_offset_sigma*stdoffset) | \n",
-    "           (offsetMap > mnoffset+bad_pixel_offset_sigma*stdoffset)] = BadPixels.OFFSET_OUT_OF_THRESHOLD.value\n",
-    "\n",
-    "mnnoise = np.nanmedian(noiseMap)\n",
-    "stdnoise = np.nanstd(noiseMap)\n",
-    "bad_pixels[(noiseMap < mnnoise-bad_pixel_noise_sigma*stdnoise) | \n",
-    "           (noiseMap > mnnoise+bad_pixel_noise_sigma*stdnoise)] = BadPixels.NOISE_OUT_OF_THRESHOLD.value\n",
-    "\n",
-    "fig = xana.heatmapPlot(np.log2(bad_pixels[:,:,0]),\n",
-    "                       x_label='Columns', y_label='Rows',\n",
-    "                       lut_label='Bad Pixel Value (ADU)',\n",
-    "                       x_range=(0,y),\n",
-    "                       y_range=(0,x), vmin=0, vmax=32)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "metadata = ConstantMetaData()\n",
-    "badpix = Constants.CCD(DetectorTypes.fastCCD).BadPixelsDark()\n",
-    "badpix.data = bad_pixels.data\n",
-    "metadata.calibration_constant = badpix\n",
-    "\n",
-    "# set the operating condition\n",
-    "condition = Conditions.Dark.CCD(bias_voltage=bias_voltage,\n",
-    "                                integration_time=integration_time,\n",
-    "                                gain_setting=det_gain,\n",
-    "                                temperature=temperature_k,\n",
-    "                                pixels_x=1934,\n",
-    "                                pixels_y=960)\n",
-    "\n",
-    "for parm in condition.parameters:\n",
-    "    if parm.name == \"Sensor Temperature\":\n",
-    "        parm.lower_deviation = temp_limits\n",
-    "        parm.upper_deviation = temp_limits\n",
-    "\n",
-    "\n",
-    "device = Detectors.fastCCD1\n",
-    "\n",
-    "\n",
-    "metadata.detector_condition = condition\n",
-    "\n",
-    "# specify the a version for this constant\n",
-    "if creation_time is None:\n",
-    "    metadata.calibration_constant_version = Versions.Now(device=device)\n",
-    "else:\n",
-    "    metadata.calibration_constant_version = Versions.Timespan(device=device, start=creation_time)\n",
-    "metadata.calibration_constant_version.raw_data_location = file_loc\n",
-    "metadata.send(cal_db_interface)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "histCalCorr = xcal.HistogramCalculator(sensorSize, bins=200, \n",
-    "                                      range=[-200, 200], parallel=False, \n",
-    "                                      memoryCells=memoryCells, \n",
-    "                                      cores=cpuCores, blockSize=blockSize)\n",
-    "\n",
-    "\n",
-    "for data in reader.readChunks():\n",
-    "    data = np.bitwise_and(data.astype(np.uint16), 0b0011111111111111).astype(np.float32)\n",
-    "    data -= offsetMap.data\n",
-    "    histCalCorr.fill(data)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "ho,eo,co,so = histCalCorr.get()\n",
-    "\n",
-    "\n",
-    "d = [{'x': co,\n",
-    "      'y': ho,\n",
-    "      'y_err': np.sqrt(ho[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'Offset corr.'\n",
-    "     },\n",
-    "    \n",
-    "     ]\n",
-    "     \n",
-    "\n",
-    "fig = xana.simplePlot(d, aspect=1, x_label='Energy(ADU)', \n",
-    "                      y_label='Number of occurrences', figsize='2col',\n",
-    "                      y_log=True, x_range=(-50,500),\n",
-    "                      legend='top-center-frame-2col')"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": []
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.6.7"
-  },
-  "latex_envs": {
-   "LaTeX_envs_menu_present": true,
-   "autocomplete": true,
-   "bibliofile": "biblio.bib",
-   "cite_by": "apalike",
-   "current_citInitial": 1,
-   "eqLabelWithNumbers": true,
-   "eqNumInitial": 1,
-   "hotkeys": {
-    "equation": "Ctrl-E",
-    "itemize": "Ctrl-I"
-   },
-   "labels_anchors": false,
-   "latex_user_defs": false,
-   "report_style_numbering": false,
-   "user_envs_cfg": false
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 1
-}
diff --git a/notebooks/FastCCD/Characterize_Darks_NewDAQ_FastCCD_NBC_New_Common_Mode.ipynb b/notebooks/FastCCD/Characterize_Darks_NewDAQ_FastCCD_NBC_New_Common_Mode.ipynb
index 5a3f2c965..0b56642e1 100644
--- a/notebooks/FastCCD/Characterize_Darks_NewDAQ_FastCCD_NBC_New_Common_Mode.ipynb
+++ b/notebooks/FastCCD/Characterize_Darks_NewDAQ_FastCCD_NBC_New_Common_Mode.ipynb
@@ -16,12 +16,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-12-06T10:54:38.999974Z",
-     "start_time": "2018-12-06T10:54:38.983406Z"
-    }
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "# Initial Parameters:\n",
@@ -46,13 +41,14 @@
     "bad_pixel_offset_sigma = 5. # Any pixel whose offset is beyond 5 standard deviations, is a bad pixel\n",
     "bad_pixel_noise_sigma = 5. # Any pixel whose noise is beyond 5 standard deviations, is a bad pixel\n",
     "sigmaNoise = 5. # Any pixel whose signal exceeds 'sigmaNoise'*noiseCM (common mode corrected noise) will be masked\n",
-    "fix_temperature = 0. # Fixed operation temperature in Kelvins. If set to 0, mean value of the data file's temperature is used.\n",
+    "fix_temperature = 0. # Fixed operation temperature in Kelvins. If set to 0, mean value of the data file's temperature is \n",
+    "                     # used.\n",
     "chunkSize = 100 # Number of images to read per chunk\n",
     "cpuCores = 40 # Specifies the number of running cpu cores\n",
     "commonModeAxis = 1 # Axis along which common mode will be calculated (0: along rows, 1: along columns)\n",
-    "ADU_to_electron_upper = 6.1 # According to Table 6.1 of Ivana Klačková's master's thesis, for upper hemisphere: conversion\n",
-    "                            # gain is 1 ADU = 6.1e-\n",
-    "ADU_to_electron_lower = 6.2 # and for lower hemisphere: conversion gain is 1 ADU = 6.2e-\n",
+    "ADU_to_electron_upper_hg = 6.1 # According to Table 6.1 of Ivana Klačková's master's thesis, for upper hemisphere and \n",
+    "                               # high gain: conversion gain is 1 ADU = 6.1e-\n",
+    "ADU_to_electron_lower_hg = 6.2 # and for lower hemisphere and high gain: conversion gain is 1 ADU = 6.2e- \n",
     "run_parallel = True # For parallel computation \n",
     "db_output = True # Output constants to the calibration database"
    ]
@@ -112,8 +108,20 @@
    "source": [
     "# Output Folder Creation:\n",
     "if not os.path.exists(out_folder):\n",
-    "    os.makedirs(out_folder)\n",
-    "\n",
+    "    os.makedirs(out_folder)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2018-12-06T10:54:39.467334Z",
+     "start_time": "2018-12-06T10:54:39.427784Z"
+    }
+   },
+   "outputs": [],
+   "source": [
     "# Number of Images:\n",
     "def nImagesOrLimit(nImages, limit):\n",
     "    if limit == 0:\n",
@@ -193,13 +201,13 @@
     "nImages = fastccdreaderh5.getDataSize(filename, h5path)[0] # Specifies total number of images to proceed\n",
     "nImages = nImagesOrLimit(nImages, number_dark_frames)\n",
     "profile = False\n",
-    "gain_setting = None\n",
     "\n",
     "with h5py.File(filename, 'r') as f:\n",
     "    bias_voltage = int(f['{}/biasclock/bias/value'.format(h5path_cntrl)][0])\n",
     "    det_gain = int(f['{}/exposure/gain/value'.format(h5path_cntrl)][0])\n",
     "    integration_time = int(f['{}/exposure/exposure_time/value'.format(h5path_cntrl)][0])\n",
-    "    temperature = np.mean(f[h5path_t])"
+    "    temperature = np.mean(f[h5path_t])\n",
+    "    temperature = round(temperature, 2)"
    ]
   },
   {
@@ -216,20 +224,23 @@
     "# Printing the Parameters Read from the Data File:\n",
     "\n",
     "display(Markdown('### Evaluated Parameters'))\n",
-    "print(\"Number of dark images to analyze:\",nImages)   \n",
-    "\n",
-    "if det_gain == 8:\n",
-    "    gain_setting = \"high\"\n",
-    "elif det_gain == 2:\n",
-    "    gain_setting = \"medium\"\n",
-    "elif det_gain == 1:\n",
-    "    gain_setting = \"low\"\n",
-    "else:\n",
-    "    gain_setting = \"auto\"\n",
-    "\n",
-    "print(\"Bias voltage is {} V\".format(bias_voltage))\n",
-    "print(\"Detector gain is set to x{}\".format(det_gain), \"({} gain)\".format(gain_setting))\n",
-    "print(\"Detector integration time is set to {}\".format(integration_time), 'ms')\n",
+    "print(\"Number of dark images to analyze:\", nImages)   \n",
+    "\n",
+    "gain_dict = {\n",
+    "        \"high gain\" : 8,\n",
+    "        \"medium gain\" : 2,\n",
+    "        \"low gain\" : 1,\n",
+    "        \"auto gain\" : 0\n",
+    "    }\n",
+    "\n",
+    "for gain, value in gain_dict.items():   \n",
+    "    if det_gain == value:\n",
+    "        gain_setting = gain\n",
+    "                    \n",
+    "print(\"Bias voltage is {} V.\".format(bias_voltage))\n",
+    "print(\"Detector gain is set to x{} ({}).\".format(det_gain, gain_setting))\n",
+    "print(\"Detector integration time is set to {}\".format(integration_time), 'ms.') \n",
+    " \n",
     "\n",
     "if fix_temperature != 0.:\n",
     "    print(\"Using a fixed temperature of {} K\".format(fix_temperature))\n",
@@ -238,9 +249,7 @@
     "    # calibration constant to the DB later\n",
     "    fix_temperature = temperature + 273.15\n",
     "    print(\"Temperature is not fixed.\")\n",
-    "    print(\"Mean temperature was {:0.2f} °C / {:0.2f} K\".format(temperature, fix_temperature))\n",
-    "\n",
-    "print(\"Output: {}\".format(out_folder))"
+    "    print(\"Mean temperature was {:0.2f} °C / {:0.2f} K\".format(temperature, fix_temperature))"
    ]
   },
   {
@@ -300,12 +309,43 @@
    },
    "outputs": [],
    "source": [
+    "counter1 = 1 # To count how many \"if data.shape[2] >= chunkSize\" instances are there.\n",
+    "counter2 = 0 # To count how many \"if data.shape[2] < chunkSize\" instances are there.\n",
+    "chunkSize_new = 0 # See below\n",
+    "\n",
     "for data in reader.readChunks():\n",
     "    data = np.bitwise_and(data.astype(np.uint16), 0b0011111111111111).astype(np.float32)\n",
     "    dx = np.count_nonzero(data, axis=(0, 1))\n",
     "    data = data[:,:,dx != 0]\n",
+    "    # Some sequences may have less than 500 frames in them. To find out how many images there are, we will temporarily \n",
+    "    # change chunkSize to be the same as whatever number of frames the last chunk of data has:\n",
+    "    if data.shape[2] < chunkSize:\n",
+    "        chunkSize_new = data.shape[2]\n",
+    "        print(\"Number of images are less than chunkSize. chunkSize is temporarily changed to {}.\"\n",
+    "              .format(chunkSize_new))\n",
+    "        images = images + chunkSize_new\n",
+    "        counter2 += 1 \n",
+    "    else:\n",
+    "        images = counter1*chunkSize + counter2*chunkSize_new\n",
+    "        counter1 += 1\n",
+    "             \n",
     "    noiseCal.fill(data) # Filling calculators with data\n",
-    "          \n",
+    "    chunkSize = 100 # resetting the chunkSize to its default value for the next sequence or data-chunk\n",
+    "\n",
+    "print('A total number of {} images are processed.'.format(images))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2018-12-06T10:55:21.238009Z",
+     "start_time": "2018-12-06T10:54:54.586435Z"
+    }
+   },
+   "outputs": [],
+   "source": [
     "offsetMap = noiseCal.getOffset() # Producing offset map\n",
     "noiseMap = noiseCal.get() # Producing noise map\n",
     "noiseCal.reset() # Resetting noise calculator\n",
@@ -328,7 +368,7 @@
    "outputs": [],
    "source": [
     "#************** OFFSET MAP HISTOGRAM ***********#\n",
-    "ho,co = np.histogram(offsetMap.flatten(), bins=700) # ho = offset histogram; co = offset bin centers\n",
+    "ho, co = np.histogram(offsetMap.flatten(), bins=700) # ho = offset histogram; co = offset bin centers\n",
     "do = {'x': co[:-1],\n",
     "     'y': ho,\n",
     "     'y_err': np.sqrt(ho[:]),\n",
@@ -337,18 +377,19 @@
     "     'label': 'Raw Signal (ADU)'\n",
     "     }\n",
     "fig = xana.simplePlot(do, figsize='1col', aspect=1, x_label = 'Raw Signal (ADU)', y_label=\"Counts\", \n",
-    "                      x_range = (3400,4000), title = 'Offset Histogram')\n",
+    "                      x_range = (3400, 4400), title = 'Offset Histogram')\n",
     "#fig.savefig('Offset_Hist.svg', format='svg', dpi=1200, bbox_inches='tight') \n",
     "\n",
     "t0 = PrettyTable()\n",
     "t0.title = \"Raw Signal\"\n",
-    "t0.field_names = [\"Mean\",\"Median\", \"Standard Deviation\"]\n",
-    "t0.add_row([\"{:0.3f} (ADU)\".format(np.mean(data)), \"{:0.3f} (ADU)\".format(np.median(data)), \"{:0.3f} (ADU)\".format(np.std(data))])\n",
+    "t0.field_names = [\"Mean\", \"Median\", \"Standard Deviation\"]\n",
+    "t0.add_row([\"{:0.3f} (ADU)\".format(np.mean(data)), \"{:0.3f} (ADU)\".format(np.median(data)), \"{:0.3f} (ADU)\"\n",
+    "            .format(np.std(data))])\n",
     "print(t0,'\\n')\n",
     "\n",
     "#************** OffsetMAP *******************#\n",
     "fig = xana.heatmapPlot(offsetMap[:,:,0], x_label='Column Number', y_label='Row Number',  aspect=1,\n",
-    "                       x_range=(0,y), y_range=(0,x), vmin=3000, vmax=4300, lut_label='Offset (ADU)', \n",
+    "                       x_range=(0, y), y_range=(0, x), vmin=3000, vmax=4300, lut_label='Offset (ADU)', \n",
     "                       panel_x_label='Columns Stat (ADU)', panel_y_label='Rows Stat (ADU)', \n",
     "                       panel_top_low_lim = 3000, panel_top_high_lim = 4500, panel_side_low_lim = 3000, \n",
     "                       panel_side_high_lim = 5000, title = 'OffsetMap')\n",
@@ -356,8 +397,8 @@
     "\n",
     "#************** Raw NoiseMAP *******************#\n",
     "fig = xana.heatmapPlot(noiseMap[:,:,0], x_label='Column Number', y_label='Row Number', aspect=1,\n",
-    "                       lut_label='Uncorrected Noise (ADU)', x_range=(0,y),\n",
-    "                       y_range=(0,x), vmax=2*np.mean(noiseMap), panel_x_label='Columns Stat (ADU)', \n",
+    "                       lut_label='Uncorrected Noise (ADU)', x_range=(0, y),\n",
+    "                       y_range=(0, x), vmax=2*np.mean(noiseMap), panel_x_label='Columns Stat (ADU)', \n",
     "                       panel_y_label='Rows Stat (ADU)', panel_top_low_lim = 0, panel_top_high_lim = 20, \n",
     "                       panel_side_low_lim = 0, panel_side_high_lim = 50, title = 'Uncorrected NoiseMap')\n",
     "#fig.savefig('RawNoiseMap.pdf', format='pdf', dpi=400, bbox_inches='tight')"
@@ -379,7 +420,7 @@
     "# Common Mode Correction:\n",
     "# This is the new method subtracting the median of all pixels that are read out at the same time along a row:\n",
     "cmCorrection = xcal.CommonModeCorrection([data.shape[0], data.shape[1]], [data.shape[0]//2, data.shape[1]], \n",
-    "                                         commonModeAxis, parallel=False, dType=np.float32, stride=10,\n",
+    "                                         commonModeAxis, parallel=run_parallel, dType=np.float32, stride=10,\n",
     "                                         noiseMap=noiseMap.astype(np.float32), minFrac=0)\n",
     "\n",
     "cmCorrection.debug()"
@@ -394,10 +435,10 @@
     "# Histogram Calculators:\n",
     "\n",
     "# For offset corrected data:\n",
-    "histCalCorrected = xcal.HistogramCalculator(sensorSize, bins=600, range=[-200, 200], memoryCells=memoryCells, \n",
+    "histCalCorrected = xcal.HistogramCalculator(sensorSize, bins=400, range=[-200, 200], memoryCells=memoryCells, \n",
     "                                            cores=cpuCores, gains=None, blockSize=blockSize)\n",
     "# For common mode corrected data:\n",
-    "histCalCMCorrected = xcal.HistogramCalculator(sensorSize, bins=600, range=[-200, 200], memoryCells=memoryCells, \n",
+    "histCalCMCorrected = xcal.HistogramCalculator(sensorSize, bins=400, range=[-200, 200], memoryCells=memoryCells, \n",
     "                                              cores=cpuCores, gains=None, blockSize=blockSize)"
    ]
   },
@@ -416,11 +457,27 @@
    "metadata": {},
    "outputs": [],
    "source": [
+    "counter1 = 1 # To count how many \"if data.shape[2] >= chunkSize\" instances are there.\n",
+    "counter2 = 0 # To count how many \"if data.shape[2] < chunkSize\" instances are there.\n",
+    "chunkSize_new = 0 # See below\n",
+    "\n",
     "for data in reader.readChunks():\n",
     "    \n",
-    "    data = data.astype(np.float32)\n",
+    "    data = np.bitwise_and(data.astype(np.uint16), 0b0011111111111111).astype(np.float32)\n",
     "    dx = np.count_nonzero(data, axis=(0, 1))\n",
     "    data = data[:,:,dx != 0] \n",
+    "    # Some sequences may have less than 500 frames in them. To find out how many images there are, we will temporarily \n",
+    "    # change chunkSize to be the same as whatever number of frames the last chunk of data has:\n",
+    "    if data.shape[2] < chunkSize:\n",
+    "        chunkSize_new = data.shape[2]\n",
+    "        print(\"Number of images are less than chunkSize. chunkSize is temporarily changed to {}.\"\n",
+    "              .format(chunkSize_new))\n",
+    "        images = images + chunkSize_new\n",
+    "        counter2 += 1 \n",
+    "    else:\n",
+    "        images = counter1*chunkSize + counter2*chunkSize_new\n",
+    "        counter1 += 1\n",
+    "    \n",
     "    data = offsetCorrection.correct(data) # Offset correction\n",
     "    offset_corr_data = copy.copy(data) # I am copying this so that I can have access to it in the table below \n",
     "    histCalCorrected.fill(data)\n",
@@ -428,10 +485,9 @@
     "    data = cmCorrection.correct(data.astype(np.float32), cellTable=cellTable) # Common mode correction\n",
     "    histCalCMCorrected.fill(data)\n",
     "    noiseCal.fill(data)  # Filling noise calculator with common mode (CM) corrected data\n",
-    "    \n",
-    "noiseMapCM = noiseCal.get() # Produces CM corrected noise map\n",
-    "ho, eo, co , so = histCalCorrected.get()\n",
-    "hCM, eCM, cCM ,sCM = histCalCMCorrected.get()\n",
+    "    chunkSize = 100 # resetting the chunkSize to its default value for the next sequence or data-chunk\n",
+    "\n",
+    "print('A total number of {} images are processed.'.format(images))\n",
     "print(\"Offset and common mode corrections are applied.\")"
    ]
   },
@@ -441,7 +497,18 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "# I am copying these so that I can replot them later after the calculators are reset:\n",
+    "noiseMapCM = noiseCal.get() # Produces CM corrected noise map\n",
+    "ho, eo, co, so = histCalCorrected.get()\n",
+    "hCM, eCM, cCM, sCM = histCalCMCorrected.get()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# We are copying these so that we can replot them later after the calculators are reset:\n",
     "\n",
     "ho_second_trial = copy.copy(ho)\n",
     "co_second_trial = copy.copy(co)\n",
@@ -480,15 +547,17 @@
     "     }]\n",
     "      \n",
     "fig = xana.simplePlot(do, figsize='2col', aspect=1, x_label = 'Corrected Signal (ADU)', y_label=\"Counts\", \n",
-    "                      x_range = (-20,20), legend='top-right-frame-1col', title = 'Corrected Signal - 2nd Iteration')\n",
+    "                      x_range = (-20, 20), legend='top-right-frame-1col', title = 'Corrected Signal - 2nd Iteration')\n",
     "#fig.savefig('Corrected_Signal_Hist_1.svg', format='svg', dpi=1200, bbox_inches='tight') \n",
     "\n",
     "t0 = PrettyTable()\n",
-    "t0.title = \"Comparison of the First Round of Corrections - Bad Pixels Included\"\n",
-    "t0.field_names = [\"After Offset Correction\",\"After Common Mode Correction\"]\n",
+    "t0.title = \"Comparison of the First Round of Corrections - Bad Pixels Not Excluded\"\n",
+    "t0.field_names = [\"After Offset Correction\", \"After Common Mode Correction\"]\n",
     "t0.add_row([\"Mean: {:0.3f} (ADU)\".format(np.mean(offset_corr_data)), \"Mean: {:0.3f} (ADU)\".format(np.mean(data))])\n",
-    "t0.add_row([\"Median: {:0.3f} (ADU)\".format(np.median(offset_corr_data)), \"Median: {:0.3f} (ADU)\".format(np.median(data))])\n",
-    "t0.add_row([\"Standard Deviation: {:0.3f} (ADU)\".format(np.std(offset_corr_data)), \"Standard Deviation: {:0.3f} (ADU)\".format(np.std(data))])\n",
+    "t0.add_row([\"Median: {:0.3f} (ADU)\".format(np.median(offset_corr_data)), \"Median: {:0.3f} (ADU)\"\n",
+    "            .format(np.median(data))])\n",
+    "t0.add_row([\"Standard Deviation: {:0.3f} (ADU)\".format(np.std(offset_corr_data)), \"Standard Deviation: {:0.3f} (ADU)\"\n",
+    "            .format(np.std(data))])\n",
     "print(t0,'\\n')"
    ]
   },
@@ -510,9 +579,9 @@
    "outputs": [],
    "source": [
     "#*****NOISE MAP HISTOGRAM FROM THE COMMON MODE CORRECTED DATA*******#\n",
-    "hn,cn = np.histogram(noiseMap.flatten(), bins=200, range=(2,40)) # hn: histogram of noise, cn: bin centers for noise\n",
-    "hn_CM,cn_CM = np.histogram(noiseMapCM.flatten(), bins=200, range=(2,40))\n",
-    "\n",
+    "hn, cn = np.histogram(noiseMap.flatten(), bins=200, range=(0, 40)) # hn: histogram of noise, cn: bin centers for noise\n",
+    "hn_CM, cn_CM = np.histogram(noiseMapCM.flatten(), bins=200, range=(0, 40))\n",
+    " \n",
     "dn = [{'x': cn[:-1],\n",
     "     'y': hn,\n",
     "     #'y_err': np.sqrt(hn[:]),\n",
@@ -529,15 +598,15 @@
     "     'label': 'Common Mode Corrected Noise'\n",
     "     }]\n",
     "fig = xana.simplePlot(dn, figsize='2col', aspect=1, x_label = 'Noise (ADU)', y_label=\"Counts\", \n",
-    "                      x_range=(0,40), y_range=(0,1e6), y_log=True, legend='top-center-frame-1col',\n",
+    "                      x_range=(0, 40), y_range=(0, 1e6), y_log=True, legend='top-center-frame-1col',\n",
     "                      title = 'Noise Comparison')\n",
     "\n",
     "#fig.savefig('Noise_CM_1_Hist.svg', format='svg', dpi=1200, bbox_inches='tight') \n",
     "\n",
     "fig = xana.heatmapPlot(noiseMapCM[:,:,0], aspect=1, x_label='Column Number', y_label='Row Number',\n",
     "                       lut_label='Common Mode Corrected Noise (ADU)', x_range=(0,y), y_range=(0,x), \n",
-    "                       vmax=2*np.mean(noiseMapCM), panel_top_low_lim = 0, panel_top_high_lim = 20, panel_side_low_lim = 0,\n",
-    "                       panel_side_high_lim = 50, title = 'Common Mode Corrected Noise', \n",
+    "                       vmax=2*np.mean(noiseMapCM), panel_top_low_lim = 0, panel_top_high_lim = 20, \n",
+    "                       panel_side_low_lim = 0, panel_side_high_lim = 50, title = 'Common Mode Corrected Noise', \n",
     "                       panel_x_label='Columns Stat (ADU)', panel_y_label='Rows Stat (ADU)')\n",
     "\n",
     "#fig.savefig('NoiseMapCM.pdf', format='pdf', dpi=400, bbox_inches='tight')"
@@ -583,9 +652,9 @@
     "           (noiseMapCM > mnnoise+bad_pixel_noise_sigma*stdnoise)] = BadPixels.NOISE_OUT_OF_THRESHOLD.value\n",
     "\n",
     "fig = xana.heatmapPlot(np.log2(bad_pixels[:,:,0]),aspect=1, x_label='Column Number', y_label='Row Number', \n",
-    "                       lut_label='2^(Assigned Value to Bad Pixels)', x_range=(0,y), y_range=(0,x), \n",
-    "                       title = 'Bad Pixels Map Excluding Non-Sensitive Areas', panel_x_label= 'Columns Stat', \n",
-    "                       panel_y_label='Rows Stat')"
+    "                       lut_label='2^(Assigned Value to Bad Pixels)', x_range=(0, y), y_range=(0, x), \n",
+    "                       title = 'Bad Pixels Map Excluding Non-Sensitive\\n Areas in Middle of CCD', \n",
+    "                       panel_x_label= 'Columns Stat', panel_y_label='Rows Stat')"
    ]
   },
   {
@@ -628,8 +697,8 @@
     "\n",
     "\n",
     "# Defining a circular mask + a rectangular mask (overscan) for the hole in the middle of the CCD:\n",
-    "h, w = (x,y)\n",
-    "hole_mask_bool = create_circular_mask(h-4, w, radius=61.5, center=(w//2,(h-4)//2))\n",
+    "h, w = (x, y)\n",
+    "hole_mask_bool = create_circular_mask(h-4, w, radius=61.5, center=(w//2, (h-4)//2))\n",
     "hole_mask = np.zeros(hole_mask_bool.shape, np.uint32)\n",
     "hole_mask[hole_mask_bool] = BadPixels.NON_SENSITIVE.value\n",
     "\n",
@@ -639,11 +708,12 @@
     "\n",
     "# Assigning this masked area as bad pixels:\n",
     "bad_pixels = np.bitwise_or(bad_pixels, mask)\n",
+    "\n",
     "fig = xana.heatmapPlot(np.log2(bad_pixels[:,:,0]),aspect=1, x_label='Column Number', y_label='Row Number', \n",
-    "                       lut_label='2^(Assigned Value to Bad Pixels)', x_range=(0,y), y_range=(0,x), panel_top_low_lim = 0, \n",
-    "                       panel_top_high_lim = 20, panel_side_low_lim = 0, panel_side_high_lim = 20, \n",
-    "                       title = 'Bad Pixels Map Including Non-Sensitive Areas', panel_x_label='Columns Stat', \n",
-    "                       panel_y_label='Rows Stat', vmax=20)\n",
+    "                       lut_label='2^(Assigned Value to Bad Pixels)', x_range=(0, y), y_range=(0, x), \n",
+    "                       panel_top_low_lim = 0, panel_top_high_lim = 20, panel_side_low_lim = 0, panel_side_high_lim = 20, \n",
+    "                       title = 'Bad Pixels Map Including Non-Sensitive\\n Areas in Middle of CCD', \n",
+    "                       panel_x_label='Columns Stat', panel_y_label='Rows Stat', vmax=20)\n",
     "\n",
     "#fig.savefig('BadPixelMap_1.svg', format='svg', dpi=1200, bbox_inches='tight') "
    ]
@@ -678,10 +748,26 @@
    "metadata": {},
    "outputs": [],
    "source": [
+    "counter1 = 1 # To count how many \"if data.shape[2] >= chunkSize\" instances are there.\n",
+    "counter2 = 0 # To count how many \"if data.shape[2] < chunkSize\" instances are there.\n",
+    "chunkSize_new = 0 # See below\n",
+    "\n",
     "for data in reader.readChunks():\n",
-    "    data = data.astype(np.float32)\n",
+    "    #data = data.astype(np.float32)\n",
+    "    data = np.bitwise_and(data.astype(np.uint16), 0b0011111111111111).astype(np.float32)\n",
     "    dx = np.count_nonzero(data, axis=(0, 1))\n",
     "    data = data[:,:,dx != 0]\n",
+    "    # Some sequences may have less than 500 frames in them. To find out how many images there are, we will temporarily \n",
+    "    # change chunkSize to be the same as whatever number of frames the last chunk of data has:\n",
+    "    if data.shape[2] < chunkSize:\n",
+    "        chunkSize_new = data.shape[2]\n",
+    "        print(\"Number of images are less than chunkSize. chunkSize is temporarily changed to {}.\"\n",
+    "              .format(chunkSize_new))\n",
+    "        images = images + chunkSize_new\n",
+    "        counter2 += 1 \n",
+    "    else:\n",
+    "        images = counter1*chunkSize + counter2*chunkSize_new\n",
+    "        counter1 += 1     \n",
     "    data_copy = offsetCorrection.correct(copy.copy(data))\n",
     "    cellTable=np.zeros(data_copy.shape[2], np.int32)\n",
     "    data_copy = cmCorrection.correct(data_copy.astype(np.float32), cellTable=cellTable)\n",
@@ -694,11 +780,21 @@
     "    data = cmCorrection.correct(data.astype(np.float32), cellTable=cellTable)\n",
     "    histCalCMCorrected.fill(data)\n",
     "    noiseCal.fill(data) \n",
+    "    chunkSize = 100 # resetting the chunkSize to its default value for the next sequence or data-chunk\n",
     "\n",
+    "print('A total number of {} images are processed.'.format(images))\n",
+    "print(\"Final iteration is Performed.\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
     "noiseMapCM_2nd = noiseCal.get().filled(0) # the masked pixels are filled with zero\n",
     "ho2, eo2, co2, so2 = histCalCorrected.get()\n",
-    "hCM2, eCM2, cCM2 ,sCM2 = histCalCMCorrected.get()\n",
-    "print(\"Final iteration is Performed.\")"
+    "hCM2, eCM2, cCM2, sCM2 = histCalCMCorrected.get()"
    ]
   },
   {
@@ -751,17 +847,19 @@
     "     }]\n",
     "\n",
     "fig = xana.simplePlot(do_Final, figsize='2col', aspect=1, x_label = 'Corrected Signal (ADU)', \n",
-    "                      y_label=\"Counts (Logarithmic Scale)\", y_log=True, x_range=(-40,40), legend='bottom-left-frame-1col',\n",
-    "                      title = 'Comparison of Corrected Signal')\n",
+    "                      y_label=\"Counts (Logarithmic Scale)\", y_log=True, x_range=(-40, 40), \n",
+    "                      legend='bottom-left-frame-1col', title = 'Comparison of Corrected Signal')\n",
     "#fig.savefig('Corrected_Signal_Hist_2.svg', format='svg', dpi=1200, bbox_inches='tight') \n",
     "\n",
     "# offset_corr_data2 and data most likely have some nan's => I am going to use nanmean, nanmedian and nanstd functions:\n",
     "t0 = PrettyTable()\n",
     "t0.title = \"Comparison of the Second Round of Corrections - Bad Pixels Excluded\"\n",
-    "t0.field_names = [\"After Offset Correction\",\"After Common Mode Correction\"]\n",
+    "t0.field_names = [\"After Offset Correction\", \"After Common Mode Correction\"]\n",
     "t0.add_row([\"Mean: {:0.3f} (ADU)\".format(np.nanmean(offset_corr_data2)), \"Mean: {:0.3f} (ADU)\".format(np.nanmean(data))])\n",
-    "t0.add_row([\"Median: {:0.3f} (ADU)\".format(np.nanmedian(offset_corr_data2)), \"Median: {:0.3f} (ADU)\".format(np.nanmedian(data))])\n",
-    "t0.add_row([\"Standard Deviation: {:0.3f} (ADU)\".format(np.nanstd(offset_corr_data2)), \"Standard Deviation: {:0.3f} (ADU)\".format(np.nanstd(data))])\n",
+    "t0.add_row([\"Median: {:0.3f} (ADU)\".format(np.nanmedian(offset_corr_data2)), \"Median: {:0.3f} (ADU)\"\n",
+    "            .format(np.nanmedian(data))])\n",
+    "t0.add_row([\"Standard Deviation: {:0.3f} (ADU)\".format(np.nanstd(offset_corr_data2)), \n",
+    "            \"Standard Deviation: {:0.3f} (ADU)\".format(np.nanstd(data))])\n",
     "print(t0,'\\n')"
    ]
   },
@@ -783,7 +881,7 @@
    "outputs": [],
    "source": [
     "#*****NOISE MAP HISTOGRAM FROM THE COMMON MODE CORRECTED DATA*******#\n",
-    "hn_CM2,cn_CM2 = np.histogram(noiseMapCM_2nd.flatten(), bins=200, range=(2,40))\n",
+    "hn_CM2, cn_CM2 = np.histogram(noiseMapCM_2nd.flatten(), bins=200, range=(0, 40))\n",
     "\n",
     "dn2 = [{'x': cn[:-1],\n",
     "     'y': hn,\n",
@@ -809,13 +907,13 @@
     "     }]\n",
     "\n",
     "fig = xana.simplePlot(dn2, figsize='2col', aspect = 1, x_label = 'Noise (ADU)', y_label=\"Counts\", y_log=True, \n",
-    "                      x_range=(0,40), y_range=(0,1e6), legend='top-right-frame-1col', title = 'Final Noise Comparison')\n",
+    "                      x_range=(0, 40), y_range=(0, 1e6), legend='top-right-frame-1col', title = 'Final Noise Comparison')\n",
     "\n",
     "#fig.savefig('Noise_Hist_2.svg', format='svg', dpi=1200, bbox_inches='tight') \n",
     "\n",
     "fig = xana.heatmapPlot(np.log2(noiseMapCM_2nd[:,:,0]), aspect=1, x_label='Column Number', y_label='Row Number',\n",
-    "                       lut_label='Noise (ADU)', x_range=(0,y), y_range=(0,x), vmax=2*np.mean(noiseMapCM_2nd), \n",
-    "                       title = 'Final Common Mode Corrected Noise (Bad Pixels Excluded)', \n",
+    "                       lut_label='Noise (ADU)', x_range=(0, y), y_range=(0, x), vmax=2*np.mean(noiseMapCM_2nd), \n",
+    "                       title = 'Final Common Mode Corrected Noise\\n (Bad Pixels Excluded)', \n",
     "                       panel_x_label='Columns Stat (ADU)', panel_y_label='Rows Stat (ADU)')\n",
     "#fig.savefig('NoiseMapCM_2nd.pdf', format='pdf', dpi=400, bbox_inches='tight') "
    ]
@@ -848,8 +946,8 @@
     "\n",
     "bad_pixels = np.bitwise_or(bad_pixels, mask)\n",
     "fig = xana.heatmapPlot(np.log2(bad_pixels[:,:,0]),aspect=1, x_label='Column Number', y_label='Row Number', \n",
-    "                       lut_label='2^(Assigned Value to Bad Pixels)', x_range=(0,y), y_range=(0,x), panel_top_low_lim = 0, \n",
-    "                       panel_top_high_lim = 20, panel_side_low_lim = 0, panel_side_high_lim = 20, \n",
+    "                       lut_label='2^(Assigned Value to Bad Pixels)', x_range=(0, y), y_range=(0, x), \n",
+    "                       panel_top_low_lim = 0, panel_top_high_lim = 20, panel_side_low_lim = 0, panel_side_high_lim = 20, \n",
     "                       title = 'Final Bad Pixels Map', panel_x_label='Columns Stat', \n",
     "                       panel_y_label='Rows Stat', vmax=20)\n",
     "#fig.savefig('BadPixelMap_2.svg', format='svg', dpi=1200, bbox_inches='tight') "
@@ -874,7 +972,7 @@
    "source": [
     "### Electronic Noise\n",
     "\n",
-    "According to Table 6.1 (page 80) of Ivana Klačková's master's thesis: \"Conversion gain for the FastCCD is: lower hemisphere = 6.2e-/ADU and upper hemisphere = 6.1e-/ADU.\"\n",
+    "According to Table 6.1 (page 80) of Ivana Klačková's master's thesis: \"Conversion gain for the FastCCD high gain is: lower hemisphere = 6.2e-/ADU and upper hemisphere = 6.1e-/ADU.\" Also, we know that the high gain/medium gain and high gain/low gain ratios are 4 and 8, respectively since high gain = x8, medium gain = x2 and low gain = x1. We do not currently (October - 2019) know the conversion gains for the FastCCD medium and lows gains in electrons. Therefore, we will use those of the high gains (in both hemispheres) together with the gain ratios to convert the noise in ADU to electrons.\n",
     "\n",
     "The following Tables present the noise along lower hemisphere, upper hemisphere, and the entire FastCCD detector at different stages. Here, the values in the first table (in ADU and e-) are the mean of noise per pixel, where noise is considered to be the initial uncorrected noise, CM corrected noise after second trial (including bad pixels) and CM corrected noise after third trial (excluding bad pixels). \n",
     "\n",
@@ -890,27 +988,80 @@
     "# noiseMap refers to the initial uncorrected noise, noiseMapCM refers to common mode corrected noise with inclusion of \n",
     "# bad pixels, and noiseMapCM_2nd refers to common mode corrected noise without inclusion of bad pixels:\n",
     "\n",
-    "ADU_to_electron = (ADU_to_electron_upper + ADU_to_electron_lower)/2 # Average of ADU_to_electron for the entire detector \n",
+    "ADU_to_electron_hg = (ADU_to_electron_upper_hg + ADU_to_electron_lower_hg)/2 # Average of ADU_to_electron for entire CCD\n",
+    "                                                                             # for high gain\n",
+    "\n",
+    "ADU_to_electron_upper_mg = ADU_to_electron_upper_hg*4 # high/medium gain ratio = 4\n",
+    "ADU_to_electron_lower_mg = ADU_to_electron_lower_hg*4\n",
+    "ADU_to_electron_mg = (ADU_to_electron_upper_mg + ADU_to_electron_lower_mg)/2 # Average of ADU_to_electron for entire CCD\n",
+    "                                                                             # for medium gain\n",
+    "    \n",
+    "ADU_to_electron_upper_lg = ADU_to_electron_upper_hg*8 # high/medium gain ratio = 8\n",
+    "ADU_to_electron_lower_lg = ADU_to_electron_lower_hg*8\n",
+    "ADU_to_electron_lg = (ADU_to_electron_upper_lg + ADU_to_electron_lower_lg)/2 # Average of ADU_to_electron for entire CCD\n",
+    "                                                                             # for low gain"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "for gain, value in gain_dict.items():  \n",
+    "    \n",
+    "    if det_gain == gain_dict[\"low gain\"]:\n",
+    "        ADU_to_electron = ADU_to_electron_lg\n",
+    "        ADU_to_electron_upper = ADU_to_electron_upper_lg\n",
+    "        ADU_to_electron_lower = ADU_to_electron_lower_lg\n",
+    "        \n",
+    "    elif det_gain == gain_dict[\"medium gain\"]:\n",
+    "        ADU_to_electron = ADU_to_electron_mg\n",
+    "        ADU_to_electron_upper = ADU_to_electron_upper_mg\n",
+    "        ADU_to_electron_lower = ADU_to_electron_lower_mg\n",
+    "        \n",
+    "    else: # Here, we assume the auto gain and high gain conversions from ADU to electrons are the same.\n",
+    "        ADU_to_electron = ADU_to_electron_hg\n",
+    "        ADU_to_electron_upper = ADU_to_electron_upper_hg\n",
+    "        ADU_to_electron_lower = ADU_to_electron_lower_hg\n",
     "\n",
     "print(\"Abbreviations:\")\n",
-    "print(\" - ED = Entire Detector; LH: Lower Hemisphere; UH: Upper Hemisphere\")\n",
-    "print(\" - CM Noise: Common Mode Corrected Noise\")\n",
+    "print(\" - ED = Entire Detector;\\n - LH: Lower Hemisphere;\\n - UH: Upper Hemisphere;\")\n",
+    "print(\" - CM Noise: Common Mode Corrected Noise;\")\n",
     "print(\" - BP: Bad Pixels\\n\")\n",
     "      \n",
     "t0 = PrettyTable()\n",
     "t0.title = \"Averages of Noise per Pixel\"\n",
-    "t0.field_names = [\"Uncorrected Noise\",\"CM Noise, BP Incl.\", \"CM Noise, BP Excl.\"]\n",
-    "t0.add_row([\"ED: {:0.2f} ADU = {:0.2f} e-\".format(np.mean(noiseMap),np.mean(noiseMap)*ADU_to_electron), \"ED: {:0.2f} ADU = {:0.2f} e-\".format(np.mean(noiseMapCM), np.mean(noiseMapCM)*ADU_to_electron), \"ED: {:0.2f} ADU = {:0.2f} e-\".format(np.mean(noiseMapCM_2nd), np.mean(noiseMapCM_2nd)*ADU_to_electron)])\n",
-    "t0.add_row([\"LH: {:0.2f} ADU = {:0.2f} e-\".format(np.mean(noiseMap[:x//2,:]), np.mean(noiseMap[:x//2,:])*ADU_to_electron_lower), \"LH: {:0.2f} ADU = {:0.2f} e-\".format(np.mean(noiseMapCM[:x//2,:]), np.mean(noiseMapCM[:x//2,:])*ADU_to_electron_lower), \"LH: {:0.2f} ADU = {:0.2f} e-\".format(np.mean(noiseMapCM_2nd[:x//2,:]), np.mean(noiseMapCM_2nd[:x//2,:])*ADU_to_electron_lower)])\n",
-    "t0.add_row([\"UH: {:0.2f} ADU = {:0.2f} e-\".format(np.mean(noiseMap[x//2:,:]), np.mean(noiseMap[x//2:,:])*ADU_to_electron_upper), \"UH: {:0.2f} ADU = {:0.2f} e-\".format(np.mean(noiseMapCM[x//2:,:]), np.mean(noiseMapCM[x//2:,:])*ADU_to_electron_upper), \"UH: {:0.2f} ADU = {:0.2f} e-\".format(np.mean(noiseMapCM_2nd[x//2:,:]), np.mean(noiseMapCM_2nd[x//2:,:])*ADU_to_electron_upper)])\n",
+    "t0.field_names = [\"Uncorrected Noise\", \"CM Noise, BP Incl.\", \"CM Noise, BP Excl.\"]\n",
+    "t0.add_row([\"ED: {:0.2f} ADU = {:0.2f} e-\".format(np.mean(noiseMap), np.mean(noiseMap)*ADU_to_electron), \n",
+    "            \"ED: {:0.2f} ADU = {:0.2f} e-\".format(np.mean(noiseMapCM), np.mean(noiseMapCM)*ADU_to_electron), \n",
+    "            \"ED: {:0.2f} ADU = {:0.2f} e-\".format(np.mean(noiseMapCM_2nd), np.mean(noiseMapCM_2nd)*ADU_to_electron)])\n",
+    "t0.add_row([\"LH: {:0.2f} ADU = {:0.2f} e-\".format(np.mean(noiseMap[:x//2,:]), \n",
+    "                                                  np.mean(noiseMap[:x//2,:])*ADU_to_electron_lower), \n",
+    "            \"LH: {:0.2f} ADU = {:0.2f} e-\".format(np.mean(noiseMapCM[:x//2,:]), \n",
+    "                                                  np.mean(noiseMapCM[:x//2,:])*ADU_to_electron_lower), \n",
+    "            \"LH: {:0.2f} ADU = {:0.2f} e-\".format(np.mean(noiseMapCM_2nd[:x//2,:]), \n",
+    "                                                  np.mean(noiseMapCM_2nd[:x//2,:])*ADU_to_electron_lower)])\n",
+    "t0.add_row([\"UH: {:0.2f} ADU = {:0.2f} e-\".format(np.mean(noiseMap[x//2:,:]), \n",
+    "                                                  np.mean(noiseMap[x//2:,:])*ADU_to_electron_upper), \n",
+    "            \"UH: {:0.2f} ADU = {:0.2f} e-\".format(np.mean(noiseMapCM[x//2:,:]), \n",
+    "                                                  np.mean(noiseMapCM[x//2:,:])*ADU_to_electron_upper), \n",
+    "            \"UH: {:0.2f} ADU = {:0.2f} e-\".format(np.mean(noiseMapCM_2nd[x//2:,:]), \n",
+    "                                                  np.mean(noiseMapCM_2nd[x//2:,:])*ADU_to_electron_upper)])\n",
     "print(t0,'\\n')\n",
     "\n",
     "t1 = PrettyTable()\n",
     "t1.title = \"Standard Deviations of Noise per Pixel\"\n",
-    "t1.field_names = [\"Uncorrected Noise\",\"CM Noise, BP Incl.\", \"CM Noise, BP Excl.\"]\n",
-    "t1.add_row([\"ED: {:0.2f} e-\".format(np.std(noiseMap)*ADU_to_electron), \"ED: {:0.2f} e-\".format(np.std(noiseMapCM)*ADU_to_electron), \"ED: {:0.2f} e-\".format(np.std(noiseMapCM_2nd)*ADU_to_electron)])\n",
-    "t1.add_row([\"LH: {:0.2f} e-\".format(np.std(noiseMap[:x//2,:])*ADU_to_electron_lower), \"LH: {:0.2f} e-\".format(np.std(noiseMapCM[:x//2,:])*ADU_to_electron_lower), \"LH: {:0.2f} e-\".format(np.std(noiseMapCM_2nd[:x//2,:])*ADU_to_electron_lower)])\n",
-    "t1.add_row([\"UH: {:0.2f} e-\".format(np.std(noiseMap[x//2:,:])*ADU_to_electron_upper), \"UH: {:0.2f} e-\".format(np.std(noiseMapCM[x//2:,:])*ADU_to_electron_upper), \"UH: {:0.2f} e-\".format(np.std(noiseMapCM_2nd[x//2:,:])*ADU_to_electron_upper)])\n",
+    "t1.field_names = [\"Uncorrected Noise\", \"CM Noise, BP Incl.\", \"CM Noise, BP Excl.\"]\n",
+    "t1.add_row([\"ED: {:0.2f} e-\".format(np.std(noiseMap)*ADU_to_electron), \n",
+    "            \"ED: {:0.2f} e-\".format(np.std(noiseMapCM)*ADU_to_electron), \n",
+    "            \"ED: {:0.2f} e-\".format(np.std(noiseMapCM_2nd)*ADU_to_electron)])\n",
+    "t1.add_row([\"LH: {:0.2f} e-\".format(np.std(noiseMap[:x//2,:])*ADU_to_electron_lower), \n",
+    "            \"LH: {:0.2f} e-\".format(np.std(noiseMapCM[:x//2,:])*ADU_to_electron_lower), \n",
+    "            \"LH: {:0.2f} e-\".format(np.std(noiseMapCM_2nd[:x//2,:])*ADU_to_electron_lower)])\n",
+    "t1.add_row([\"UH: {:0.2f} e-\".format(np.std(noiseMap[x//2:,:])*ADU_to_electron_upper), \n",
+    "            \"UH: {:0.2f} e-\".format(np.std(noiseMapCM[x//2:,:])*ADU_to_electron_upper), \n",
+    "            \"UH: {:0.2f} e-\".format(np.std(noiseMapCM_2nd[x//2:,:])*ADU_to_electron_upper)])\n",
     "print(t1)"
    ]
   },
@@ -961,10 +1112,11 @@
     "    \n",
     "    if db_output:\n",
     "        metadata.calibration_constant_version.raw_data_location = file_loc\n",
-    "        metadata.send(cal_db_interface, timeout=cal_db_timeout)    \n",
-    "\n",
+    "        metadata.send(cal_db_interface, timeout=cal_db_timeout)  \n",
+    "        \n",
     "print(\"Calibration constants (offsetMap, noiseMapCM_2nd and bad_pixels) are sent to the calibration database.\")\n",
-    "print(\"Creation time is: {}\".format(creation_time))"
+    "print(\"Creation time is: {}\".format(creation_time))\n",
+    "print(\"Raw data location is: {}\".format(file_loc))"
    ]
   },
   {
diff --git a/notebooks/FastCCD/PlotFromCalDB_FastCCD_NBC.ipynb b/notebooks/FastCCD/PlotFromCalDB_FastCCD_NBC.ipynb
deleted file mode 100644
index 908d75fea..000000000
--- a/notebooks/FastCCD/PlotFromCalDB_FastCCD_NBC.ipynb
+++ /dev/null
@@ -1,505 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "# Statistical analysis of calibration factors#\n",
-    "\n",
-    "Author: Mikhail Karnevskiy, Steffen Hauf, Version 0.1\n",
-    "\n",
-    "A description of the notebook."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "cluster_profile = \"noDB\" # The ipcluster profile to use\n",
-    "start_date = \"2019-01-30\" # date to start investigation interval from\n",
-    "end_date = \"2019-08-30\" # date to end investigation interval at, can be \"now\"\n",
-    "nconstants = 10 # Number of time stamps to plot. If not 0, overcome start_date.\n",
-    "dclass=\"CCD\" # Detector class\n",
-    "db_module = \"fastCCD1\" # detector entry in the DB to investigate\n",
-    "constants = [\"Noise\", \"Offset\"] # constants to plot\n",
-    "\n",
-    "gain_setting = [0,1,2,8] # gain stages\n",
-    "bias_voltage = [79] # Bias voltage\n",
-    "temperature = [235, 216, 245] # Operation temperature\n",
-    "integration_time = [1, 50] # Integration time\n",
-    "pixels_x=[1934] # number of pixels along X axis\n",
-    "pixels_y=[960] # number of pixels along Y axis\n",
-    "max_time = 15 # max time margin in minutes to match bad pixels\n",
-    "parameter_names = ['bias_voltage', 'integration_time', 'temperature', \n",
-    "                   'gain_setting', 'pixels_x', 'pixels_y'] # names of parameters\n",
-    "\n",
-    "separate_plot = ['integration_time', 'gain_setting', 'temperature'] # Plot on separate plots\n",
-    "photon_energy = 9.2 # Photon energy of the beam\n",
-    "out_folder = \"/gpfs/exfel/data/scratch/karnem/test_FCCD/\" # output folder\n",
-    "use_existing = \"\" # If not empty, constants stored in given folder will be used\n",
-    "cal_db_interface = \"tcp://max-exfl016:8015#8025\" # the database interface to use\n",
-    "cal_db_timeout = 180000 # timeout on caldb requests\",\n",
-    "plot_range = 3 # range for plotting in units of median absolute deviations"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "scrolled": true
-   },
-   "outputs": [],
-   "source": [
-    "import copy\n",
-    "import datetime\n",
-    "import dateutil.parser\n",
-    "import numpy as np\n",
-    "from operator import itemgetter\n",
-    "import os\n",
-    "import sys\n",
-    "import warnings\n",
-    "warnings.filterwarnings('ignore')\n",
-    "\n",
-    "from iCalibrationDB import Constants, Conditions, Detectors, ConstantMetaData\n",
-    "from cal_tools.tools import get_from_db\n",
-    "from cal_tools.ana_tools import (save_dict_to_hdf5, load_data_from_hdf5, \n",
-    "                                 HMType, hm_combine,\n",
-    "                                 combine_lists, get_range)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# Prepare variables\n",
-    "spShape = (967, 10) # Shape of superpixel\n",
-    "\n",
-    "parameters = [globals()[x] for x in parameter_names]\n",
-    "\n",
-    "constantsDark = {'Noise': 'BadPixelsDark',\n",
-    "                 'Offset': 'BadPixelsDark'}\n",
-    "print('Bad pixels data: ', constantsDark)\n",
-    "\n",
-    "# Define parameters in order to perform loop over time stamps\n",
-    "start = datetime.datetime.now() if start_date.upper() == \"NOW\" else dateutil.parser.parse(\n",
-    "    start_date)\n",
-    "end = datetime.datetime.now() if end_date.upper() == \"NOW\" else dateutil.parser.parse(\n",
-    "    end_date)\n",
-    "\n",
-    "# Create output folder\n",
-    "os.makedirs(out_folder, exist_ok=True)\n",
-    "\n",
-    "# Get getector conditions\n",
-    "det = getattr(Detectors, db_module)\n",
-    "dconstants = getattr(Constants, dclass)(det.detector_type)\n",
-    "\n",
-    "print('CalDB Interface: {}'.format(cal_db_interface))\n",
-    "print('Start time at: ', start)\n",
-    "print('End time at: ', end)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "parameter_list = combine_lists(*parameters, names = parameter_names)\n",
-    "print(parameter_list)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "scrolled": false
-   },
-   "outputs": [],
-   "source": [
-    "# Retrieve list of meta-data\n",
-    "constant_versions = []\n",
-    "constant_parameters = []\n",
-    "constantBP_versions = []\n",
-    "\n",
-    "# Loop over constants\n",
-    "for c, const in enumerate(constants):\n",
-    "    \n",
-    "    if use_existing != \"\":\n",
-    "        break\n",
-    "    \n",
-    "    # Loop over parameters\n",
-    "    for pars in parameter_list:\n",
-    "    \n",
-    "        if (const in [\"Offset\", \"Noise\", \"SlopesPC\"] or \"DARK\" in const.upper()):\n",
-    "            dcond = Conditions.Dark\n",
-    "            mcond = getattr(dcond, dclass)(**pars)\n",
-    "        else:\n",
-    "            dcond = Conditions.Illuminated\n",
-    "            mcond = getattr(dcond, dclass)(**pars,\n",
-    "                                photon_energy=photon_energy)\n",
-    "\n",
-    "            \n",
-    "            \n",
-    "        print('Request: ', const, 'with paramters:', pars)\n",
-    "        # Request Constant versions for given parameters and module\n",
-    "        data = get_from_db(det,\n",
-    "                           getattr(dconstants,\n",
-    "                                   const)(),\n",
-    "                           copy.deepcopy(mcond), None,\n",
-    "                           cal_db_interface,\n",
-    "                           creation_time=start,\n",
-    "                           verbosity=0,\n",
-    "                           timeout=cal_db_timeout,\n",
-    "                           meta_only=True,\n",
-    "                           version_info=True)\n",
-    "        \n",
-    "        if not isinstance(data, list):\n",
-    "                continue\n",
-    "            \n",
-    "        data = sorted(data, key=itemgetter('begin_at'), reverse=True)\n",
-    "        print('Number of retrieved constants: {}'.format(len(data)) )\n",
-    "                \n",
-    "        if const in constantsDark:\n",
-    "            # Request BP constant versions\n",
-    "            dataBP = get_from_db(det,\n",
-    "                                 getattr(dconstants, \n",
-    "                                         constantsDark[const])(),\n",
-    "                                 copy.deepcopy(mcond), None,\n",
-    "                                 cal_db_interface,\n",
-    "                                 creation_time=start,\n",
-    "                                 verbosity=0,\n",
-    "                                 timeout=cal_db_timeout,\n",
-    "                                 meta_only=True,\n",
-    "                                 version_info=True)\n",
-    "        \n",
-    "            if not isinstance(data, list) or not isinstance(dataBP, list):\n",
-    "                continue\n",
-    "            print('Number of retrieved darks: {}'.format(len(dataBP)) )\n",
-    "            found_BPmatch = False\n",
-    "            for d in data:\n",
-    "                # Match proper BP constant version\n",
-    "                # and get constant version within\n",
-    "                # requested time range\n",
-    "                if d is None:\n",
-    "                    print('Time or data is not found!')\n",
-    "                    continue\n",
-    "\n",
-    "                dt = dateutil.parser.parse(d['begin_at'])\n",
-    "\n",
-    "                if (dt.replace(tzinfo=None) > end or \n",
-    "                    (nconstants==0 and dt.replace(tzinfo=None) < start)):\n",
-    "                    continue\n",
-    "                    \n",
-    "                if nconstants>0 and constant_parameters.count(pars)>nconstants-1:\n",
-    "                    break\n",
-    "\n",
-    "                closest_BP = None\n",
-    "                closest_BPtime = None\n",
-    "\n",
-    "                for dBP in dataBP:\n",
-    "                    if dBP is None:\n",
-    "                        print(\"Bad pixels are not found!\")\n",
-    "                        continue\n",
-    "\n",
-    "                    dt = dateutil.parser.parse(d['begin_at'])\n",
-    "                    dBPt = dateutil.parser.parse(dBP['begin_at'])\n",
-    "\n",
-    "                    if dt == dBPt:\n",
-    "                        found_BPmatch = True\n",
-    "                    else:\n",
-    "\n",
-    "                        if np.abs(dBPt-dt).seconds < (max_time*60):\n",
-    "                            if closest_BP is None:\n",
-    "                                closest_BP = dBP\n",
-    "                                closest_BPtime = dBPt\n",
-    "                            else:\n",
-    "                                if np.abs(dBPt-dt) < np.abs(closest_BPtime-dt):\n",
-    "                                    closest_BP = dBP\n",
-    "                                    closest_BPtime = dBPt\n",
-    "\n",
-    "                        if dataBP.index(dBP) ==  len(dataBP)-1:\n",
-    "                            if closest_BP:\n",
-    "                                dBP = closest_BP\n",
-    "                                dBPt = closest_BPtime\n",
-    "                                found_BPmatch = True\n",
-    "                            else:\n",
-    "                                print('Bad pixels are not found!')\n",
-    "\n",
-    "                    if found_BPmatch:\n",
-    "                        print(\"Found constant {}: begin at {}\".format(const, dt))\n",
-    "                        print(\"Found bad pixels at {}\".format(dBPt))\n",
-    "                        constantBP_versions.append(dBP)\n",
-    "                        constant_versions.append(d)\n",
-    "                        constant_parameters.append(copy.deepcopy(pars))\n",
-    "                        found_BPmatch = False\n",
-    "                        break\n",
-    "        else:\n",
-    "            constant_versions += data\n",
-    "            constant_parameters += [copy.deepcopy(pars)]*len(data)\n",
-    "\n",
-    "# Remove dublications\n",
-    "constant_versions_tmp = []\n",
-    "constant_parameters_tmp = []\n",
-    "constantBP_versions_tmp = []\n",
-    "for i, x in enumerate(constant_versions):\n",
-    "    if x not in constant_versions_tmp:\n",
-    "        constant_versions_tmp.append(x)\n",
-    "        constant_parameters_tmp.append(constant_parameters[i])\n",
-    "        if i<len(constantBP_versions)-1:\n",
-    "            constantBP_versions_tmp.append(constantBP_versions[i])\n",
-    "constant_versions=constant_versions_tmp\n",
-    "constantBP_versions=constantBP_versions_tmp\n",
-    "constant_parameters=constant_parameters_tmp\n",
-    "\n",
-    "print('Number of stored constant versions is {}'.format(len(constant_versions)))"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "def get_rebined(a, rebin):\n",
-    "    return a[:,:,0].reshape(\n",
-    "                int(a.shape[0] / rebin[0]),\n",
-    "                rebin[0],\n",
-    "                int(a.shape[1] / rebin[1]),\n",
-    "                rebin[1])\n",
-    "    \n",
-    "def modify_const(const, data, isBP = False):\n",
-    "    return data\n",
-    "\n",
-    "ret_constants = {}\n",
-    "constant_data = ConstantMetaData()\n",
-    "constant_BP = ConstantMetaData()\n",
-    "for i, constant_version in enumerate(constant_versions):\n",
-    "\n",
-    "    const = constant_version['data_set_name'].split('/')[-2]\n",
-    "    qm = db_module\n",
-    "    \n",
-    "    print(\"constant: {}, module {}\".format(const,qm))\n",
-    "    constant_data.retrieve_from_version_info(constant_version)\n",
-    "    \n",
-    "    for key in separate_plot:\n",
-    "        const = '{}_{}'.format(const, constant_parameters[i][key])\n",
-    "    \n",
-    "    if not const in ret_constants:\n",
-    "        ret_constants[const] = {}\n",
-    "    if not qm in ret_constants[const]:\n",
-    "            ret_constants[const][qm] = []\n",
-    "    \n",
-    "    cdata = constant_data.calibration_constant.data\n",
-    "    ctime = constant_data.calibration_constant_version.begin_at\n",
-    "    \n",
-    "    cdata = modify_const(const, cdata)\n",
-    "    \n",
-    "    if len(constantBP_versions)>0:\n",
-    "        constant_BP.retrieve_from_version_info(constantBP_versions[i])\n",
-    "        cdataBP = constant_BP.calibration_constant.data\n",
-    "        cdataBP = modify_const(const, cdataBP, True)\n",
-    "        \n",
-    "        if cdataBP.shape != cdata.shape:\n",
-    "            print('Wrong bad pixel shape! {}, expected {}'.format(cdataBP.shape, cdata.shape))\n",
-    "            continue\n",
-    "        \n",
-    "        # Apply bad pixel mask\n",
-    "        cdataABP = np.copy(cdata)\n",
-    "        cdataABP[cdataBP > 0] = np.nan\n",
-    "    \n",
-    "        # Create superpixels for constants with BP applied\n",
-    "        cdataABP = get_rebined(cdataABP, spShape)\n",
-    "        toStoreBP = np.nanmean(cdataABP, axis=(1, 3))\n",
-    "        toStoreBPStd = np.nanstd(cdataABP, axis=(1, 3))\n",
-    "\n",
-    "        # Prepare number of bad pixels per superpixels\n",
-    "        cdataBP = get_rebined(cdataBP, spShape)\n",
-    "        cdataNBP = np.nansum(cdataBP > 0, axis=(1, 3))\n",
-    "    else:\n",
-    "        toStoreBP = 0\n",
-    "        toStoreBPStd = 0\n",
-    "        cdataNBP = 0\n",
-    "\n",
-    "    # Create superpixels for constants without BP applied\n",
-    "    cdata = get_rebined(cdata, spShape)\n",
-    "    toStoreStd = np.nanstd(cdata, axis=(1, 3))\n",
-    "    toStore = np.nanmean(cdata, axis=(1, 3))\n",
-    "    \n",
-    "    # Convert parameters to dict\n",
-    "    dpar = {p.name: p.value for p in constant_data.detector_condition.parameters}\n",
-    "    \n",
-    "    print(\"Store values in dict\", const, qm, ctime)\n",
-    "    ret_constants[const][qm].append({'ctime': ctime,\n",
-    "                                     'nBP': cdataNBP,\n",
-    "                                     'dataBP': toStoreBP,\n",
-    "                                     'dataBPStd': toStoreBPStd,\n",
-    "                                     'data': toStore,\n",
-    "                                     'dataStd': toStoreStd,\n",
-    "                                     'mdata': dpar})  \n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "scrolled": true
-   },
-   "outputs": [],
-   "source": [
-    "if use_existing == \"\":\n",
-    "    print('Save data to {}/CalDBAna_{}_{}.h5'.format(out_folder, dclass, db_module))\n",
-    "    save_dict_to_hdf5(ret_constants,\n",
-    "                      '{}/CalDBAna_{}_{}.h5'.format(out_folder, dclass, db_module))"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "if use_existing == \"\":\n",
-    "    fpath = '{}/CalDBAna_{}_*.h5'.format(out_folder, dclass)\n",
-    "else:\n",
-    "    fpath = '{}/CalDBAna_{}_*.h5'.format(use_existing, dclass)\n",
-    "\n",
-    "print('Load data from {}'.format(fpath))\n",
-    "ret_constants = load_data_from_hdf5(fpath)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# Parameters for plotting\n",
-    "\n",
-    "keys = {\n",
-    "    'Mean': ['data', '', 'Mean over pixels'],\n",
-    "    'std': ['dataStd', '', '$\\sigma$ over pixels'],\n",
-    "    'MeanBP': ['dataBP', 'Good pixels only', 'Mean over pixels'],\n",
-    "    'NBP': ['nBP', 'Fraction of BP', 'Fraction of BP'],\n",
-    "    'stdBP': ['dataBPStd', 'Good pixels only', '$\\sigma$ over pixels'],\n",
-    "    'stdASIC': ['', '', '$\\sigma$ over ASICs'],\n",
-    "    'stdCell': ['', '', '$\\sigma$ over Cells'],\n",
-    "}\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "print('Plot calibration constants')\n",
-    "\n",
-    "# loop over constat type\n",
-    "for const, modules in ret_constants.items():\n",
-    "\n",
-    "        const = const.split(\"_\")\n",
-    "        print('Const: {}'.format(const))\n",
-    "\n",
-    "        # Loop over modules\n",
-    "        for mod, data in modules.items():\n",
-    "            print(mod)\n",
-    "\n",
-    "            ctimes = np.array(data[\"ctime\"])\n",
-    "            ctimes_ticks = [x.strftime('%y-%m-%d') for x in ctimes]\n",
-    "\n",
-    "            if (\"mdata\" in data):\n",
-    "                cmdata = np.array(data[\"mdata\"])\n",
-    "                for i, tick in enumerate(ctimes_ticks):\n",
-    "                    ctimes_ticks[i] = ctimes_ticks[i] + \\\n",
-    "                        ', V={:1.0f}'.format(cmdata[i]['Sensor Temperature']) + \\\n",
-    "                        ', T={:1.0f}'.format(\n",
-    "                        cmdata[i]['Integration Time'])\n",
-    "\n",
-    "            sort_ind = np.argsort(ctimes_ticks)\n",
-    "            ctimes_ticks = list(np.array(ctimes_ticks)[sort_ind])\n",
-    "\n",
-    "            # Create sorted by data dataset\n",
-    "            rdata = {}\n",
-    "            for key, item in keys.items():\n",
-    "                if item[0] in data:\n",
-    "                    rdata[key] = np.array(data[item[0]])[sort_ind]\n",
-    "\n",
-    "            nTimes = rdata['Mean'].shape[0]\n",
-    "            nPixels = rdata['Mean'].shape[1] * rdata['Mean'].shape[2]\n",
-    "            nBins = nPixels\n",
-    "\n",
-    "            # Avoid too low values\n",
-    "            if const[0] in [\"Noise\", \"Offset\"]:\n",
-    "                rdata['Mean'][rdata['Mean'] < 0.1] = np.nan\n",
-    "                if 'MeanBP' in rdata:\n",
-    "                    rdata['MeanBP'][rdata['MeanBP'] < 0.1] = np.nan\n",
-    "                    \n",
-    "            if 'NBP' in rdata:\n",
-    "                rdata['NBP'] = rdata['NBP'].astype(float)\n",
-    "                rdata[\"NBP\"][rdata[\"NBP\"] == (spShape[0] * spShape[1])] = np.nan\n",
-    "                rdata[\"NBP\"] = rdata[\"NBP\"] / spShape[0] / spShape[1] * 100\n",
-    "\n",
-    "            # Reshape: ASICs over cells for plotting\n",
-    "            pdata = {}\n",
-    "            for key in rdata:\n",
-    "                if len(rdata[key].shape)<3:\n",
-    "                    continue\n",
-    "                pdata[key] = rdata[key][:, :, :].reshape(nTimes, nBins).swapaxes(0, 1)\n",
-    "\n",
-    "            # Plotting\n",
-    "            for key in pdata:\n",
-    "                if len(pdata[key].shape)<2:\n",
-    "                    continue\n",
-    "\n",
-    "                if key == 'NBP':\n",
-    "                    unit = '[%]'\n",
-    "                else:\n",
-    "                    unit = '[ADU]'\n",
-    "\n",
-    "                title = '{}, module {}, {}'.format(\n",
-    "                    const[0], mod, keys[key][1])\n",
-    "                cb_label = '{}, {} {}'.format(const[0], keys[key][2], unit)\n",
-    "\n",
-    "                fname = '{}/{}_{}'.format(out_folder, const[0], mod.replace('_', ''))\n",
-    "                for item in const[1:]:\n",
-    "                    fname = '{}_{}'.format(fname, item)\n",
-    "                fname = '{}_ASIC_{}.png'.format(fname, key)\n",
-    "                \n",
-    "                vmin,vmax = get_range(pdata[key][::-1].flatten(), plot_range)\n",
-    "                hm_combine(pdata[key][::-1], htype=HMType.mro,\n",
-    "                          x_label='Creation Time', y_label='ASIC ID',\n",
-    "                          x_ticklabels=ctimes_ticks,\n",
-    "                          x_ticks=np.arange(len(ctimes_ticks))+0.3,\n",
-    "                          title=title, cb_label=cb_label,\n",
-    "                          vmin=vmin, vmax=vmax,\n",
-    "                          fname=fname,\n",
-    "                          pad=[0.125, 0.125, 0.12, 0.185])\n"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.6.7"
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 2
-}
diff --git a/notebooks/Jungfrau/PlotFromCalDB_Jungfrau_NBC.ipynb b/notebooks/Jungfrau/PlotFromCalDB_Jungfrau_NBC.ipynb
deleted file mode 100644
index 1b428701c..000000000
--- a/notebooks/Jungfrau/PlotFromCalDB_Jungfrau_NBC.ipynb
+++ /dev/null
@@ -1,574 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "# Statistical analysis of calibration factors#\n",
-    "\n",
-    "Author: Mikhail Karnevskiy, Steffen Hauf, Version 0.1\n",
-    "\n",
-    "Calibration constants for JungFrau detector from the data base with injection time between start_date and end_date are considered.\n",
-    "\n",
-    "To be visualized, calibration constants are averaged per group of pixels. Plots shows calibration constant over time for each constant.\n",
-    "\n",
-    "Values shown in plots are saved in h5 files."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "cluster_profile = \"noDB\" # The ipcluster profile to use\n",
-    "start_date = \"2019-06-30\" # date to start investigation interval from\n",
-    "end_date = \"2019-09-01\" # date to end investigation interval at, can be \"now\"\n",
-    "dclass=\"jungfrau\" # Detector class\n",
-    "modules = [\"Jungfrau_M125\", \"Jungfrau_M260\"] # detector entry in the DB to investigate\n",
-    "constants = [\"Noise\", \"Offset\"] # constants to plot\n",
-    "nconstants = 10 # Number of time stamps to plot. If not 0, overcome start_date.\n",
-    "bias_voltage = [90, 180] # bias voltage\n",
-    "memory_cells = [1] # number of memory cells\n",
-    "pixels_x = [1024] # number of pixels along X axis\n",
-    "pixels_y = [512, 1024] # number of pixels along Y axis\n",
-    "temperature = [291] # operational temperature\n",
-    "integration_time = [50, 250] # integration time\n",
-    "gain_setting = [0] # gain stage\n",
-    "\n",
-    "parameter_names = ['bias_voltage', 'integration_time', 'pixels_x', 'pixels_y', 'gain_setting',\n",
-    "                   'temperature', 'memory_cells'] # names of parameters\n",
-    "\n",
-    "separate_plot = ['integration_time'] # Plot on separate plots\n",
-    "max_time = 15 # max time margin in minutes to match bad pixels\n",
-    "photon_energy = 9.2 # Photon energy of the beam\n",
-    "out_folder = \"/gpfs/exfel/data/scratch/karnem/test_JF/\" # output folder\n",
-    "use_existing = \"\" # If not empty, constants stored in given folder will be used\n",
-    "cal_db_interface = \"tcp://max-exfl016:8016\" # the database interface to use\n",
-    "cal_db_timeout = 180000 # timeout on caldb requests\",\n",
-    "plot_range = 3 # range for plotting in units of median absolute deviations\n",
-    "spShape = [256, 64] # Shape of superpixel"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "scrolled": true
-   },
-   "outputs": [],
-   "source": [
-    "import copy\n",
-    "import datetime\n",
-    "import dateutil.parser\n",
-    "import numpy as np\n",
-    "import os\n",
-    "import sys\n",
-    "import warnings\n",
-    "warnings.filterwarnings('ignore')\n",
-    "\n",
-    "import matplotlib.pyplot as plt\n",
-    "%matplotlib inline\n",
-    "\n",
-    "from iCalibrationDB import Constants, Conditions, Detectors, ConstantMetaData\n",
-    "from cal_tools.tools import get_from_db, get_random_db_interface\n",
-    "from cal_tools.ana_tools import (save_dict_to_hdf5, load_data_from_hdf5, \n",
-    "                                 HMType, hm_combine,\n",
-    "                                 combine_lists, get_range)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# Prepare variables\n",
-    "parameters = [globals()[x] for x in parameter_names]\n",
-    "\n",
-    "constantsDark = {'Noise': 'BadPixelsDark',\n",
-    "                 'Offset': 'BadPixelsDark'}\n",
-    "print('Bad pixels data: ', constantsDark)\n",
-    "\n",
-    "# Define parameters in order to perform loop over time stamps\n",
-    "start = datetime.datetime.now() if start_date.upper() == \"NOW\" else dateutil.parser.parse(\n",
-    "    start_date)\n",
-    "end = datetime.datetime.now() if end_date.upper() == \"NOW\" else dateutil.parser.parse(\n",
-    "    end_date)\n",
-    "\n",
-    "# Create output folder\n",
-    "os.makedirs(out_folder, exist_ok=True)\n",
-    "\n",
-    "# Get getector conditions\n",
-    "dconstants = getattr(Constants, dclass)\n",
-    "\n",
-    "print('CalDB Interface: {}'.format(cal_db_interface))\n",
-    "print('Start time at: ', start)\n",
-    "print('End time at: ', end)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "parameter_list = combine_lists(*parameters, names = parameter_names)\n",
-    "print(parameter_list)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "scrolled": false
-   },
-   "outputs": [],
-   "source": [
-    "# Retrieve list of meta-data\n",
-    "constant_versions = []\n",
-    "constant_parameters = []\n",
-    "constantBP_versions = []\n",
-    "\n",
-    "# Loop over constants\n",
-    "for c, const in enumerate(constants):\n",
-    "    \n",
-    "    for db_module in modules:\n",
-    "        det = getattr(Detectors, db_module)\n",
-    "    \n",
-    "        if use_existing != \"\":\n",
-    "            break\n",
-    "\n",
-    "        # Loop over parameters\n",
-    "        for pars in parameter_list:\n",
-    "\n",
-    "            if (const in [\"Offset\", \"Noise\", \"SlopesPC\"] or \"DARK\" in const.upper()):\n",
-    "                dcond = Conditions.Dark\n",
-    "                mcond = getattr(dcond, dclass)(**pars)\n",
-    "            else:\n",
-    "                dcond = Conditions.Illuminated\n",
-    "                mcond = getattr(dcond, dclass)(**pars,\n",
-    "                                    photon_energy=photon_energy)\n",
-    "\n",
-    "\n",
-    "\n",
-    "            print('Request: ', const, 'with paramters:', pars)\n",
-    "            # Request Constant versions for given parameters and module\n",
-    "            data = get_from_db(det,\n",
-    "                               getattr(dconstants,\n",
-    "                                       const)(),\n",
-    "                               copy.deepcopy(mcond), None,\n",
-    "                               cal_db_interface,\n",
-    "                               creation_time=start,\n",
-    "                               verbosity=0,\n",
-    "                               timeout=cal_db_timeout,\n",
-    "                               meta_only=True,\n",
-    "                               version_info=True)\n",
-    "\n",
-    "            if not isinstance(data, list):\n",
-    "                    continue\n",
-    "\n",
-    "            if const in constantsDark:\n",
-    "                # Request BP constant versions\n",
-    "                print('constantDark:', constantsDark[const], )        \n",
-    "                dataBP = get_from_db(det,\n",
-    "                                     getattr(dconstants, \n",
-    "                                             constantsDark[const])(),\n",
-    "                                     copy.deepcopy(mcond), None,\n",
-    "                                     cal_db_interface,\n",
-    "                                     creation_time=start,\n",
-    "                                     verbosity=0,\n",
-    "                                     timeout=cal_db_timeout,\n",
-    "                                     meta_only=True,\n",
-    "                                     version_info=True)\n",
-    "\n",
-    "                if not isinstance(data, list) or not isinstance(dataBP, list):\n",
-    "                    continue\n",
-    "\n",
-    "                found_BPmatch = False\n",
-    "                for d in data:\n",
-    "                    # Match proper BP constant version\n",
-    "                    # and get constant version within\n",
-    "                    # requested time range\n",
-    "                    if d is None:\n",
-    "                        print('Time or data is not found!')\n",
-    "                        continue\n",
-    "\n",
-    "                    dt = dateutil.parser.parse(d['begin_at'])\n",
-    "\n",
-    "                    if dt.replace(tzinfo=None) > end or dt.replace(tzinfo=None) < start:\n",
-    "                        continue\n",
-    "\n",
-    "                    closest_BP = None\n",
-    "                    closest_BPtime = None\n",
-    "\n",
-    "                    for dBP in dataBP:\n",
-    "                        if dBP is None:\n",
-    "                            print(\"Bad pixels are not found!\")\n",
-    "                            continue\n",
-    "\n",
-    "                        dt = dateutil.parser.parse(d['begin_at'])\n",
-    "                        dBPt = dateutil.parser.parse(dBP['begin_at'])\n",
-    "\n",
-    "                        if dt == dBPt:\n",
-    "                            found_BPmatch = True\n",
-    "                        else:\n",
-    "\n",
-    "                            if np.abs(dBPt-dt).seconds < (max_time*60):\n",
-    "                                if closest_BP is None:\n",
-    "                                    closest_BP = dBP\n",
-    "                                    closest_BPtime = dBPt\n",
-    "                                else:\n",
-    "                                    if np.abs(dBPt-dt) < np.abs(closest_BPtime-dt):\n",
-    "                                        closest_BP = dBP\n",
-    "                                        closest_BPtime = dBPt\n",
-    "\n",
-    "                            if dataBP.index(dBP) ==  len(dataBP)-1:\n",
-    "                                if closest_BP:\n",
-    "                                    dBP = closest_BP\n",
-    "                                    dBPt = closest_BPtime\n",
-    "                                    found_BPmatch = True\n",
-    "                                else:\n",
-    "                                    print('Bad pixels are not found!')\n",
-    "\n",
-    "                        if found_BPmatch:\n",
-    "                            print(\"Found constant {}: begin at {}\".format(const, dt))\n",
-    "                            print(\"Found bad pixels at {}\".format(dBPt))\n",
-    "                            constantBP_versions.append(dBP)\n",
-    "                            constant_versions.append(d)\n",
-    "                            constant_parameters.append(copy.deepcopy(pars))\n",
-    "                            found_BPmatch = False\n",
-    "                            break\n",
-    "            else:\n",
-    "                constant_versions += data\n",
-    "                constant_parameters += [copy.deepcopy(pars)]*len(data)\n",
-    "\n",
-    "# Remove dublications\n",
-    "constant_versions_tmp = []\n",
-    "constant_parameters_tmp = []\n",
-    "for i, x in enumerate(constant_versions):\n",
-    "    if x not in constant_versions_tmp:\n",
-    "        constant_versions_tmp.append(x)\n",
-    "        constant_parameters_tmp.append(constant_parameters[i])\n",
-    "        \n",
-    "constant_versions=constant_versions_tmp\n",
-    "constant_parameters=constant_parameters_tmp\n",
-    "\n",
-    "print('Number of stored constant versions is {}'.format(len(constant_versions)))\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "def get_rebined(a, rebin):\n",
-    "    return a.reshape(\n",
-    "                int(a.shape[0] / rebin[0]),\n",
-    "                rebin[0],\n",
-    "                int(a.shape[1] / rebin[1]),\n",
-    "                rebin[1],\n",
-    "                a.shape[2],\n",
-    "                a.shape[3])\n",
-    "\n",
-    "def modify_const(const, data, isBP = False):\n",
-    "    return data\n",
-    "\n",
-    "ret_constants = {}\n",
-    "constant_data = ConstantMetaData()\n",
-    "constant_BP = ConstantMetaData()\n",
-    "\n",
-    "# sort over begin_at\n",
-    "idxs, _ = zip(*sorted(enumerate(constant_versions), \n",
-    "                     key=lambda x: x[1]['begin_at'], reverse=True))\n",
-    "\n",
-    "for i in idxs:\n",
-    "    const = constant_versions[i]['data_set_name'].split('/')[-2]\n",
-    "    qm = constant_versions[i]['physical_device']['name']\n",
-    "    \n",
-    "    for key in separate_plot:\n",
-    "        const = '{}_{}'.format(const, constant_parameters[i][key])\n",
-    "        \n",
-    "    if not const in ret_constants:\n",
-    "        ret_constants[const] = {}\n",
-    "    if not qm in ret_constants[const]:\n",
-    "            ret_constants[const][qm] = []\n",
-    "            \n",
-    "    if nconstants>0 and len(ret_constants[const][qm])>=nconstants:\n",
-    "        continue\n",
-    "        \n",
-    "    print(\"constant: {}, module {}\".format(const,qm))\n",
-    "    constant_data.retrieve_from_version_info(constant_versions[i])\n",
-    "    \n",
-    "    cdata = constant_data.calibration_constant.data\n",
-    "    ctime = constant_data.calibration_constant_version.begin_at\n",
-    "    cdata = modify_const(const, cdata)\n",
-    "    \n",
-    "    if len(constantBP_versions)>0:\n",
-    "        constant_BP.retrieve_from_version_info(constantBP_versions[i])\n",
-    "        cdataBP = constant_BP.calibration_constant.data\n",
-    "        cdataBP = modify_const(const, cdataBP, True)\n",
-    "        \n",
-    "        if cdataBP.shape != cdata.shape:\n",
-    "            print('Wrong bad pixel shape! {}, expected {}'.format(cdataBP.shape, cdata.shape))\n",
-    "            continue\n",
-    "        \n",
-    "        # Apply bad pixel mask\n",
-    "        cdataABP = np.copy(cdata)\n",
-    "        cdataABP[cdataBP > 0] = np.nan\n",
-    "    \n",
-    "        # Create superpixels for constants with BP applied\n",
-    "        cdataABP = get_rebined(cdataABP, spShape)\n",
-    "        toStoreBP = np.nanmean(cdataABP, axis=(1, 3))\n",
-    "        toStoreBPStd = np.nanstd(cdataABP, axis=(1, 3))\n",
-    "\n",
-    "        # Prepare number of bad pixels per superpixels\n",
-    "        cdataBP = get_rebined(cdataBP, spShape)\n",
-    "        cdataNBP = np.nansum(cdataBP > 0, axis=(1, 3))\n",
-    "    else:\n",
-    "        toStoreBP = 0\n",
-    "        toStoreBPStd = 0\n",
-    "        cdataNBP = 0\n",
-    "\n",
-    "    # Create superpixels for constants without BP applied\n",
-    "    cdata = get_rebined(cdata, spShape)\n",
-    "    toStoreStd = np.nanstd(cdata, axis=(1, 3))\n",
-    "    toStore = np.nanmean(cdata, axis=(1, 3))\n",
-    "    \n",
-    "    # Convert parameters to dict\n",
-    "    dpar = {p.name: p.value for p in constant_data.detector_condition.parameters}\n",
-    "    \n",
-    "    print(\"Store values in dict\", const, qm, ctime)\n",
-    "    ret_constants[const][qm].append({'ctime': ctime,\n",
-    "                                     'nBP': cdataNBP,\n",
-    "                                     'dataBP': toStoreBP,\n",
-    "                                     'dataBPStd': toStoreBPStd,\n",
-    "                                     'data': toStore,\n",
-    "                                     'dataStd': toStoreStd,\n",
-    "                                     'mdata': dpar})  \n",
-    "    "
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "scrolled": true
-   },
-   "outputs": [],
-   "source": [
-    "if use_existing == \"\":\n",
-    "    print('Save data to /CalDBAna_{}_{}.h5'.format(dclass, db_module))\n",
-    "    save_dict_to_hdf5(ret_constants,\n",
-    "                      '{}/CalDBAna_{}_{}.h5'.format(out_folder, dclass, db_module))"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "if use_existing == \"\":\n",
-    "    fpath = '{}/CalDBAna_{}_*.h5'.format(out_folder, dclass)\n",
-    "else:\n",
-    "    fpath = '{}/CalDBAna_{}_*.h5'.format(use_existing, dclass)\n",
-    "\n",
-    "print('Load data from {}'.format(fpath))\n",
-    "ret_constants = load_data_from_hdf5(fpath)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# Parameters for plotting\n",
-    "\n",
-    "keys = {\n",
-    "    'Mean': ['data', '', 'Mean over pixels'],\n",
-    "    'std': ['dataStd', '', '$\\sigma$ over pixels'],\n",
-    "    'MeanBP': ['dataBP', 'Good pixels only', 'Mean over pixels'],\n",
-    "    'NBP': ['nBP', 'Fraction of BP', 'Number of BP'],\n",
-    "    'stdBP': ['dataBPStd', 'Good pixels only', '$\\sigma$ over pixels'],\n",
-    "}\n",
-    "\n",
-    "gain_name = ['High', 'Medium', 'Low']"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "scrolled": true
-   },
-   "outputs": [],
-   "source": [
-    "print('Plot calibration constants')\n",
-    "\n",
-    "# loop over constat type\n",
-    "for const, modules in ret_constants.items():\n",
-    "    \n",
-    "    const = const.split(\"_\")\n",
-    "    for gain in range(3):\n",
-    "\n",
-    "        print('Const: {}'.format(const))\n",
-    "\n",
-    "        # summary over modules\n",
-    "        mod_data = {}\n",
-    "        mod_names = []\n",
-    "        mod_times = []\n",
-    "        \n",
-    "        # Loop over modules\n",
-    "        for mod, data in modules.items():\n",
-    "            print(mod)\n",
-    "\n",
-    "            ctimes = np.array(data[\"ctime\"])\n",
-    "            ctimes_ticks = [x.strftime('%y-%m-%d') for x in ctimes]\n",
-    "\n",
-    "            if (\"mdata\" in data):\n",
-    "                cmdata = np.array(data[\"mdata\"])\n",
-    "                for i, tick in enumerate(ctimes_ticks):\n",
-    "                    ctimes_ticks[i] = ctimes_ticks[i] + \\\n",
-    "                        ', V={:1.0f}'.format(cmdata[i]['Sensor Temperature']) + \\\n",
-    "                        ', T={:1.0f}'.format(\n",
-    "                        cmdata[i]['Integration Time'])\n",
-    "\n",
-    "            sort_ind = np.argsort(ctimes_ticks)\n",
-    "            ctimes_ticks = list(np.array(ctimes_ticks)[sort_ind])\n",
-    "\n",
-    "            # Create sorted by data dataset\n",
-    "            rdata = {}\n",
-    "            for key, item in keys.items():\n",
-    "                if item[0] in data:\n",
-    "                    rdata[key] = np.array(data[item[0]])[sort_ind]\n",
-    "\n",
-    "            nTimes = rdata['Mean'].shape[0]\n",
-    "            nPixels = rdata['Mean'].shape[1] * rdata['Mean'].shape[2]\n",
-    "            nBins = nPixels\n",
-    "            \n",
-    "            # Select gain\n",
-    "            if const[0] not in [\"Gain\", \"Noise-e\"]:\n",
-    "                for key in rdata:\n",
-    "                    if len(rdata[key].shape)<5:\n",
-    "                        continue\n",
-    "                    rdata[key] = rdata[key][..., 0, gain]\n",
-    "\n",
-    "            # Avoid to low values\n",
-    "            if const[0] in [\"Noise10Hz\", \"Offset10Hz\"]:\n",
-    "                rdata['Mean'][rdata['Mean'] < 0.1] = np.nan\n",
-    "                if 'MeanBP' in rdata:\n",
-    "                    rdata['MeanBP'][rdata['MeanBP'] < 0.1] = np.nan\n",
-    "                if 'NBP' in rdata:\n",
-    "                    rdata['NBP'] = rdata['NBP'].astype(float)\n",
-    "                    rdata['NBP'][rdata['NBP'] == spShape[0]*spShape[1]] = np.nan\n",
-    "\n",
-    "            # Reshape: ASICs over cells for plotting\n",
-    "            pdata = {}\n",
-    "            for key in rdata:\n",
-    "                if len(rdata[key].shape)<3:\n",
-    "                    continue\n",
-    "                pdata[key] = rdata[key].reshape(nTimes, nBins).swapaxes(0, 1)\n",
-    "\n",
-    "            # Summary over ASICs\n",
-    "            adata = {}\n",
-    "            for key in rdata:\n",
-    "                if len(rdata[key].shape)<3:\n",
-    "                    continue\n",
-    "                adata[key] = np.nansum(rdata[key], axis=(1, 2))\n",
-    "\n",
-    "            # Summary information over modules\n",
-    "            for key in pdata:\n",
-    "                if key not in mod_data:\n",
-    "                    mod_data[key] = []\n",
-    "                if key == 'NBP':\n",
-    "                    mod_data[key].append(np.nansum(pdata[key], axis=0))\n",
-    "                else:\n",
-    "                    mod_data[key].append(np.nanmean(pdata[key], axis=0))\n",
-    "\n",
-    "            mod_names.append(mod)\n",
-    "            mod_times.append(ctimes[sort_ind])\n",
-    "            \n",
-    "            # Plotting\n",
-    "            for key in pdata:\n",
-    "                \n",
-    "                if len(pdata[key].shape)<2:\n",
-    "                    continue\n",
-    "                    \n",
-    "                vmin,vmax = get_range(pdata[key][::-1].flatten(), plot_range)\n",
-    "                if key == 'NBP':\n",
-    "                    unit = '[%]'\n",
-    "                else:\n",
-    "                    unit = '[ADU]'\n",
-    "\n",
-    "                title = '{}, module {}, {}'.format(\n",
-    "                    const[0], mod,  keys[key][1])\n",
-    "                cb_label = '{}, {} {}'.format(const[0], keys[key][2], unit)\n",
-    "\n",
-    "                fname = '{}/{}_{}'.format(out_folder, const[0], mod.replace('_', ''))\n",
-    "                for item in const[1:]:\n",
-    "                    fname = '{}_{}'.format(fname, item)\n",
-    "                fname = '{}_ASIC_{}.png'.format(fname, key)\n",
-    "                \n",
-    "                hm_combine(pdata[key][::-1], htype=HMType.mro,\n",
-    "                          x_label='Creation Time', y_label='ASIC ID',\n",
-    "                          x_ticklabels=ctimes_ticks,\n",
-    "                          x_ticks=np.arange(len(ctimes_ticks))+0.3,\n",
-    "                          title=title, cb_label=cb_label,\n",
-    "                          vmin=vmin, vmax=vmax,\n",
-    "                          fname=fname,\n",
-    "                          pad=[0.125, 0.125, 0.12, 0.185])\n",
-    "\n",
-    "                \n",
-    "        # Summary over modules\n",
-    "        for key in mod_data:\n",
-    "            \n",
-    "            if key == 'NBP':\n",
-    "                unit = ''\n",
-    "            else:\n",
-    "                unit = '[ADU]'\n",
-    "\n",
-    "            title = '{}, All modules, {} gain, {}'.format(\n",
-    "                    const[0], gain_name[gain], keys[key][1])\n",
-    "            \n",
-    "            fname = '{}/{}_{}'.format(out_folder, const[0], 'All')\n",
-    "            for item in const[1:]:\n",
-    "                fname = '{}_{}'.format(fname, item)\n",
-    "            fname = '{}_ASIC_{}.png'.format(fname, key)\n",
-    "                \n",
-    "            fig = plt.figure(figsize=(12,12) )\n",
-    "            for i in range(len(mod_data[key])):\n",
-    "                plt.scatter(mod_times[i], mod_data[key][i], label=mod_names[i])\n",
-    "            plt.grid()\n",
-    "            plt.xlabel('Creation Time')\n",
-    "            plt.ylabel('{}, {} {}'.format(const[0], keys[key][2], unit))  \n",
-    "            plt.legend(loc='best guess')\n",
-    "            plt.title(title)\n",
-    "            fig.savefig(fname)\n"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.6.7"
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 2
-}
diff --git a/notebooks/LPD/PlotFromCalDB_LPD_NBC.ipynb b/notebooks/LPD/PlotFromCalDB_LPD_NBC.ipynb
deleted file mode 100644
index 6f797cae3..000000000
--- a/notebooks/LPD/PlotFromCalDB_LPD_NBC.ipynb
+++ /dev/null
@@ -1,673 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "# Statistical analysis of calibration factors#\n",
-    "\n",
-    "Author: Mikhail Karnevskiy, Steffen Hauf, Version 0.2\n",
-    "\n",
-    "Calibration constants for LPD1M detector from the data base with injection time between start_date and end_date are considered.\n",
-    "\n",
-    "To be visualized, calibration constants are averaged per ASICs. Plots shows calibration constant over time for each constant and for each module. Summary plots overall modules are created.\n",
-    "\n",
-    "In additional gain-slopes flat-field and pulse-capacitor are combined to relative-gain constant and presented as well. Noise in electron units is derived using gain factors and presented.\n",
-    "\n",
-    "Values shown in plots are saved in h5 files.\n",
-    "\n",
-    "All presented values corresponds to high and medium gain stages."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "cluster_profile = \"noDB\"  # The ipcluster profile to use\n",
-    "start_date = \"2019-01-30\"  # Date to start investigation interval from\n",
-    "end_date = \"2019-12-12\"  # Date to end investigation interval at, can be \"now\"\n",
-    "nconstants = 20 # Number of time stamps to plot. If not 0, overcome start_date.\n",
-    "constants = [\"Noise\", \"Offset\", \"SlopesFF\", \"SlopesCI\"] # constants to plot\n",
-    "modules = [2]  # Modules, set to -1 for all, range allowed\n",
-    "bias_voltages = [250, 500]  # Bias voltage\n",
-    "mem_cells = [1, 128, 512]  # Number of used memory cells.\n",
-    "photon_energy = 9.2  # Photon energy of the beam\n",
-    "out_folder = \"/gpfs/exfel/data/scratch/karnem/test_LPD2\"  # Output folder, required\n",
-    "use_existing = \"\" # If not empty, constants stored in given folder will be used\n",
-    "cal_db_timeout = 180000 # timeout on caldb requests\",\n",
-    "adu_to_photon = 33.17 # ADU to photon conversion factor (8000 / 3.6 / 67.)\n",
-    "nMemToShow = 32 # Number of memory cells to be shown in plots over ASICs\n",
-    "db_module = \"LPD1M1\"  # detector entry in the DB to investigate\n",
-    "dclass = \"LPD\"  # Detector class\n",
-    "cal_db_interface = \"tcp://max-exfl016:8015#8025\" # the database interface to use\n",
-    "max_time = 15 # the max margin in min. for the matching closest bad pixels\n",
-    "range_offset = [800., 1500, 600, 900] # plotting range for offset: high gain l, r, medium gain l, r \n",
-    "range_noise = [2.0, 16, 1.0, 7.0] # plotting range for noise: high gain l, r, medium gain l, r \n",
-    "range_gain = [20, 30, 20, 30] # plotting range for gain: high gain l, r, medium gain l, r \n",
-    "range_noise_e = [100., 600., 100., 600.] # plotting range for noise in [e-]: high gain l, r, medium gain l, r \n",
-    "range_slopesCI = [0.95, 1.05, 0.0, 0.5] # plotting range for slope CI: high gain l, r, medium gain l, r \n",
-    "range_slopesFF = [0.8, 1.2, 0.8, 1.2] # plotting range for slope FF: high gain l, r, medium gain l, r \n",
-    "plot_range = 3 # range for plotting in units of median absolute deviations\n",
-    "x_labels = ['Sensor Bias Voltage', 'Memory cells'] # parameters to be shown on X axis"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "scrolled": true
-   },
-   "outputs": [],
-   "source": [
-    "import copy\n",
-    "import datetime\n",
-    "import dateutil.parser\n",
-    "import numpy as np\n",
-    "import os\n",
-    "import sys\n",
-    "import warnings\n",
-    "warnings.filterwarnings('ignore')\n",
-    "\n",
-    "from iCalibrationDB import Constants, Conditions, Detectors, ConstantMetaData\n",
-    "from cal_tools.tools import get_from_db, get_random_db_interface\n",
-    "from cal_tools.ana_tools import (save_dict_to_hdf5, load_data_from_hdf5, \n",
-    "                                 combine_constants, HMType, IMType,\n",
-    "                                 hm_combine, combine_lists, get_range)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# Prepare variables\n",
-    "nMem = max(mem_cells) # Number of mem Cells to store\n",
-    "spShape = (64,64) # Shape of superpixel\n",
-    "\n",
-    "if modules[0] == -1:\n",
-    "    modules = range(16)\n",
-    "    \n",
-    "modules = [\"Q{}M{}\".format(x // 4 + 1, x % 4 + 1) for x in modules]\n",
-    "\n",
-    "constantsDark = {\"SlopesFF\": 'BadPixelsFF',\n",
-    "                 'SlopesCI': 'BadPixelsCI',\n",
-    "                 'Noise': 'BadPixelsDark',\n",
-    "                 'Offset': 'BadPixelsDark'}\n",
-    "print('Bad pixels data: ', constantsDark)\n",
-    "\n",
-    "# Define parameters in order to perform loop over time stamps\n",
-    "start = datetime.datetime.now() if start_date.upper() == \"NOW\" else dateutil.parser.parse(\n",
-    "    start_date)\n",
-    "end = datetime.datetime.now() if end_date.upper() == \"NOW\" else dateutil.parser.parse(\n",
-    "    end_date)\n",
-    "\n",
-    "# Create output folder\n",
-    "os.makedirs(out_folder, exist_ok=True)\n",
-    "\n",
-    "# Get getector conditions\n",
-    "det = getattr(Detectors, db_module)\n",
-    "dconstants = getattr(Constants, dclass)\n",
-    "\n",
-    "print('CalDB Interface: {}'.format(cal_db_interface))\n",
-    "print('Start time at: ', start)\n",
-    "print('End time at: ', end)\n",
-    "print('Modules: ', modules)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "parameter_list = combine_lists(bias_voltages, modules, mem_cells, names = ['bias_voltage', 'module', 'mem_cells'])\n",
-    "print(parameter_list)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# Retrieve list of meta-data\n",
-    "constant_versions = []\n",
-    "constant_parameters = []\n",
-    "constantBP_versions = []\n",
-    "\n",
-    "# Loop over constants\n",
-    "for c, const in enumerate(constants):\n",
-    "    \n",
-    "    if use_existing != \"\":\n",
-    "        break\n",
-    "    \n",
-    "    # Loop over parameters\n",
-    "    for pars in parameter_list:\n",
-    "    \n",
-    "        if (const in [\"Offset\", \"Noise\", \"SlopesCI\"] or \"DARK\" in const.upper()):\n",
-    "            dcond = Conditions.Dark\n",
-    "            mcond = getattr(dcond, dclass)(\n",
-    "                        memory_cells=pars['mem_cells'],\n",
-    "                        bias_voltage=pars['bias_voltage'])\n",
-    "        else:\n",
-    "            dcond = Conditions.Illuminated\n",
-    "            mcond = getattr(dcond, dclass)(\n",
-    "                                memory_cells=pars['mem_cells'],\n",
-    "                                bias_voltage=pars['bias_voltage'],\n",
-    "                                photon_energy=photon_energy)\n",
-    "\n",
-    "        print('Request: ', const, 'with paramters:', pars)\n",
-    "        # Request Constant versions for given parameters and module\n",
-    "        data = get_from_db(getattr(det, pars['module']),\n",
-    "                           getattr(dconstants,\n",
-    "                                   const)(),\n",
-    "                           copy.deepcopy(mcond), None,\n",
-    "                           cal_db_interface,\n",
-    "                           creation_time=start,\n",
-    "                           verbosity=0,\n",
-    "                           timeout=cal_db_timeout,\n",
-    "                           meta_only=True,\n",
-    "                           version_info=True)\n",
-    "\n",
-    "        if not isinstance(data, list):\n",
-    "            continue\n",
-    "            \n",
-    "        # Request BP constant versions\n",
-    "        print('constantDark:', constantsDark[const], )        \n",
-    "        dataBP = get_from_db(getattr(det, pars['module']),\n",
-    "                             getattr(dconstants, \n",
-    "                                     constantsDark[const])(),\n",
-    "                             copy.deepcopy(mcond), None,\n",
-    "                             cal_db_interface,\n",
-    "                             creation_time=start,\n",
-    "                             verbosity=0,\n",
-    "                             timeout=cal_db_timeout,\n",
-    "                             meta_only=True,\n",
-    "                             version_info=True)\n",
-    "        \n",
-    "            \n",
-    "        for d in data:\n",
-    "            # print('Item: ', d)\n",
-    "            # Match proper BP constant version\n",
-    "            # and get constant version within\n",
-    "            # requested time range\n",
-    "            if d is None:\n",
-    "                print('Time or data is not found!')\n",
-    "                continue\n",
-    "\n",
-    "            dt = dateutil.parser.parse(d['begin_at'])\n",
-    "\n",
-    "            if (dt.replace(tzinfo=None) > end or \n",
-    "                (nconstants==0 and dt.replace(tzinfo=None) < start)):\n",
-    "                continue\n",
-    "                \n",
-    "            closest_BP = None\n",
-    "            closest_BPtime = None\n",
-    "            found_BPmatch = False\n",
-    "                \n",
-    "            if not isinstance(dataBP, list):\n",
-    "                dataBP = []\n",
-    "            \n",
-    "            for dBP in dataBP:\n",
-    "                if dBP is None:\n",
-    "                    print(\"Bad pixels are not found!\")\n",
-    "                    continue\n",
-    "            \n",
-    "                dt = dateutil.parser.parse(d['begin_at'])\n",
-    "                dBPt = dateutil.parser.parse(dBP['begin_at'])\n",
-    "                \n",
-    "                if dt == dBPt:\n",
-    "                    found_BPmatch = True\n",
-    "                else:\n",
-    "\n",
-    "                    if np.abs(dBPt-dt).seconds < (max_time*60):\n",
-    "                        if closest_BP is None:\n",
-    "                            closest_BP = dBP\n",
-    "                            closest_BPtime = dBPt\n",
-    "                        else:\n",
-    "                            if np.abs(dBPt-dt) < np.abs(closest_BPtime-dt):\n",
-    "                                closest_BP = dBP\n",
-    "                                closest_BPtime = dBPt\n",
-    "                    \n",
-    "                    if dataBP.index(dBP) ==  len(dataBP)-1:\n",
-    "                        if closest_BP:\n",
-    "                            dBP = closest_BP\n",
-    "                            dBPt = closest_BPtime\n",
-    "                            found_BPmatch = True\n",
-    "                        else:\n",
-    "                            print('Bad pixels are not found!')\n",
-    "                    \n",
-    "                if found_BPmatch:\n",
-    "                    print(\"Found constant {}: begin at {}\".format(const, dt))\n",
-    "                    print(\"Found bad pixels at {}\".format(dBPt))\n",
-    "                    constantBP_versions.append(dBP)\n",
-    "                    constant_versions.append(d)\n",
-    "                    constant_parameters.append(copy.deepcopy(pars))\n",
-    "                    break\n",
-    "                    \n",
-    "            if not found_BPmatch:\n",
-    "                print('Bad pixels are not matched')\n",
-    "                constantBP_versions.append(None)\n",
-    "                constant_versions.append(d)\n",
-    "                constant_parameters.append(copy.deepcopy(pars))\n",
-    "                    \n",
-    "print('Number of retrieved constants {}'.format(len(constant_versions)))"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "def prepare_to_store(a, nMem):\n",
-    "    shape = list(a.shape[:2])+[nMem, 2]\n",
-    "    b = np.full(shape, np.nan)\n",
-    "    b[:, :, :a.shape[2]] = a[:, :, :, :2]\n",
-    "    return b\n",
-    "\n",
-    "\n",
-    "def get_rebined(a, rebin):\n",
-    "    return a.reshape(\n",
-    "                int(a.shape[0] / rebin[0]),\n",
-    "                rebin[0],\n",
-    "                int(a.shape[1] / rebin[1]),\n",
-    "                rebin[1],\n",
-    "                a.shape[2],\n",
-    "                a.shape[3])\n",
-    "\n",
-    "\n",
-    "def modify_const(const, data):\n",
-    "\n",
-    "    if const in ['SlopesFF']:\n",
-    "        data = data[..., None, None]\n",
-    "\n",
-    "    if(len(data.shape)==5):\n",
-    "        data = data[:,:,:,:,0]\n",
-    "\n",
-    "    if len(data.shape) < 4:\n",
-    "        print(data.shape, \"Unexpected shape!\")\n",
-    "\n",
-    "    if data.shape[0] != 256:\n",
-    "        data = data.swapaxes(0, 2).swapaxes(1,3).swapaxes(2,3) \n",
-    "                            \n",
-    "    return data\n",
-    "\n",
-    "\n",
-    "ret_constants = {}\n",
-    "constant_data = ConstantMetaData()\n",
-    "constant_BP = ConstantMetaData()\n",
-    "\n",
-    "# sort over begin_at\n",
-    "idxs, _ = zip(*sorted(enumerate(constant_versions), \n",
-    "                     key=lambda x: x[1]['begin_at'], reverse=True))\n",
-    "\n",
-    "for i in idxs:\n",
-    "    const = constant_versions[i]['data_set_name'].split('/')[-2]\n",
-    "    qm = constant_parameters[i]['module']\n",
-    "    \n",
-    "    if not const in ret_constants:\n",
-    "        ret_constants[const] = {}\n",
-    "    if not qm in ret_constants[const]:\n",
-    "            ret_constants[const][qm] = []\n",
-    "    \n",
-    "    if nconstants>0 and len(ret_constants[const][qm])>=nconstants:\n",
-    "        continue\n",
-    "        \n",
-    "    constant_data.retrieve_from_version_info(constant_versions[i])\n",
-    "    cdata = constant_data.calibration_constant.data\n",
-    "    ctime = constant_data.calibration_constant_version.begin_at \n",
-    "    cdata = modify_const(const, cdata)\n",
-    "    print(\"constant: {}, module {}, begin_at {}\".format(const, qm, ctime))\n",
-    "\n",
-    "    if constantBP_versions[i]:\n",
-    "        constant_BP.retrieve_from_version_info(constantBP_versions[i])\n",
-    "        cdataBP = constant_BP.calibration_constant.data\n",
-    "        cdataBP = modify_const(const, cdataBP)\n",
-    "\n",
-    "        if cdataBP.shape != cdata.shape:\n",
-    "            print('Wrong bad pixel shape! {}, expected {}'.format(cdataBP.shape, cdata.shape))\n",
-    "            cdataBP = np.full_like(cdata, -1)\n",
-    "\n",
-    "        # Apply bad pixel mask\n",
-    "        cdataABP = np.copy(cdata)\n",
-    "        cdataABP[cdataBP > 0] = np.nan\n",
-    "\n",
-    "        # Create superpixels for constants with BP applied\n",
-    "        cdataABP = get_rebined(cdataABP, spShape)\n",
-    "        toStoreBP = prepare_to_store(np.nanmean(cdataABP, axis=(1, 3)), nMem)\n",
-    "        toStoreBPStd = prepare_to_store(np.nanstd(cdataABP, axis=(1, 3)), nMem)\n",
-    "\n",
-    "        # Prepare number of bad pixels per superpixels\n",
-    "        cdataBP = get_rebined(cdataBP, spShape)\n",
-    "        cdataNBP = prepare_to_store(np.nansum(cdataBP > 0, axis=(1, 3)), nMem)\n",
-    "\n",
-    "    # Create superpixels for constants without BP applied\n",
-    "    cdata = get_rebined(cdata, spShape)\n",
-    "    toStoreStd = prepare_to_store(np.nanstd(cdata, axis=(1, 3)), nMem)\n",
-    "    toStore = prepare_to_store(np.nanmean(cdata, axis=(1, 3)), nMem)\n",
-    "    \n",
-    "    if not constantBP_versions[i]:\n",
-    "        toStoreBP = np.full_like(toStore,  IMType.NO_BPMAP.value)\n",
-    "        toStoreBPStd = np.full_like(toStore,  IMType.NO_BPMAP.value)\n",
-    "        cdataNBP = np.full_like(toStore,  IMType.NO_BPMAP.value)\n",
-    "    \n",
-    "    dpar = {p.name: p.value for p in constant_data.detector_condition.parameters}\n",
-    "\n",
-    "    print(\"Store values in dict\", const, qm, ctime)\n",
-    "    ret_constants[const][qm].append({'ctime': ctime,\n",
-    "                                     'nBP': cdataNBP,\n",
-    "                                     'dataBP': toStoreBP,\n",
-    "                                     'dataBPStd': toStoreBPStd,\n",
-    "                                     'data': toStore,\n",
-    "                                     'dataStd': toStoreStd,\n",
-    "                                     'mdata': dpar})    \n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "scrolled": true
-   },
-   "outputs": [],
-   "source": [
-    "if use_existing == \"\":\n",
-    "    print('Save data to /CalDBAna_{}_{}.h5'.format(dclass, modules[0]))\n",
-    "    save_dict_to_hdf5(ret_constants,\n",
-    "                      '{}/CalDBAna_{}_{}.h5'.format(out_folder, dclass, modules[0]))"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "scrolled": true
-   },
-   "outputs": [],
-   "source": [
-    "if use_existing == \"\":\n",
-    "    fpath = '{}/CalDBAna_{}_*.h5'.format(out_folder, dclass)\n",
-    "else:\n",
-    "    fpath = '{}/CalDBAna_{}_*.h5'.format(use_existing, dclass)\n",
-    "\n",
-    "print('Load data from {}'.format(fpath))\n",
-    "ret_constants = load_data_from_hdf5(fpath)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# Combine FF and PC data to calculate Gain\n",
-    "# Estimate Noise in units of electrons\n",
-    "print ('Calculate Gain and Noise in electron units')\n",
-    "\n",
-    "ret_constants[\"Gain\"] = {}\n",
-    "ret_constants[\"Noise-e\"] = {}\n",
-    "for mod in list(range(16)):\n",
-    "    if (\"SlopesFF\" not in ret_constants or\n",
-    "            \"SlopesCI\" not in ret_constants):\n",
-    "        break\n",
-    "\n",
-    "    qm = \"Q{}M{}\".format(mod // 4 + 1, mod % 4 + 1)\n",
-    "    print(qm)\n",
-    "\n",
-    "    if (qm not in ret_constants[\"SlopesFF\"] or\n",
-    "            qm not in ret_constants[\"SlopesCI\"]):\n",
-    "        continue\n",
-    "\n",
-    "    ret_constants[\"Gain\"][qm] = {}\n",
-    "\n",
-    "    dataFF = ret_constants[\"SlopesFF\"][qm]\n",
-    "    dataPC = ret_constants[\"SlopesCI\"][qm]\n",
-    "\n",
-    "    if (len(dataFF) == 0 or len(dataPC) == 0):\n",
-    "        continue\n",
-    "\n",
-    "    ctimesFF = np.array(dataFF[\"ctime\"])\n",
-    "    ctimesPC = np.array(dataPC[\"ctime\"])\n",
-    "\n",
-    "    ctime, icomb = combine_constants(ctimesFF, ctimesPC)\n",
-    "\n",
-    "    cdataPC_vs_time = np.array(dataPC[\"data\"])[..., 0]\n",
-    "    cdataFF_vs_time = np.array(dataFF[\"data\"])[..., 0]\n",
-    "\n",
-    "    cdataFF_vs_time = np.nanmedian(cdataFF_vs_time, axis=3)[..., None]\n",
-    "\n",
-    "    cdataFF_vs_time /= np.nanmedian(cdataFF_vs_time, axis=(1, 2, 3))[:, None,\n",
-    "                       None, None]\n",
-    "    cdataPC_vs_time /= np.nanmedian(cdataPC_vs_time, axis=(1, 2, 3))[:, None,\n",
-    "                       None, None]\n",
-    "\n",
-    "    gain_vs_time = []\n",
-    "    for iFF, iPC in icomb:\n",
-    "        gain_vs_time.append(cdataFF_vs_time[iFF] * cdataPC_vs_time[iPC])\n",
-    "\n",
-    "    print(np.array(gain_vs_time).shape)\n",
-    "    \n",
-    "    ctime_ts = [t.timestamp() for t in ctime]\n",
-    "    \n",
-    "    ret_constants[\"Gain\"][qm][\"ctime\"] = ctime\n",
-    "    ret_constants[\"Gain\"][qm][\"data\"] = np.array(gain_vs_time)\n",
-    "    # Fill missing data for compatibility with plotting code\n",
-    "    ret_constants[\"Gain\"][qm][\"dataBP\"] = np.array(gain_vs_time)\n",
-    "    ret_constants[\"Gain\"][qm][\"nBP\"] = np.array(gain_vs_time)\n",
-    "\n",
-    "    if \"Noise\" not in ret_constants:\n",
-    "        continue\n",
-    "\n",
-    "    if qm not in ret_constants[\"Noise\"]:\n",
-    "        continue\n",
-    "\n",
-    "    dataN = ret_constants[\"Noise\"][qm]\n",
-    "    if len(dataN) == 0:\n",
-    "        continue\n",
-    "\n",
-    "    ret_constants[\"Noise-e\"][qm] = {}\n",
-    "            \n",
-    "    ctimesG = np.array(ctime)\n",
-    "    ctimesN = np.array(dataN[\"ctime\"])\n",
-    "\n",
-    "    ctime, icomb = combine_constants(ctimesG, ctimesN)\n",
-    "\n",
-    "    cdataG_vs_time = np.array(gain_vs_time)\n",
-    "    cdataN_vs_time = np.array(dataN[\"data\"])[..., 0]\n",
-    "\n",
-    "    data_vs_time = []\n",
-    "    for iG, iN in icomb:\n",
-    "        data_vs_time.append(\n",
-    "            cdataN_vs_time[iN] * adu_to_photon / cdataG_vs_time[iG])\n",
-    "\n",
-    "    print(np.array(gain_vs_time).shape)\n",
-    "    ctime_ts = [t.timestamp() for t in ctime]\n",
-    "    ret_constants[\"Noise-e\"][qm][\"ctime\"] = ctime\n",
-    "    ret_constants[\"Noise-e\"][qm][\"data\"] = np.array(data_vs_time)\n",
-    "    # Fill missing data for compatibility with plotting code\n",
-    "    ret_constants[\"Noise-e\"][qm][\"dataBP\"] = np.array(data_vs_time)\n",
-    "    ret_constants[\"Noise-e\"][qm][\"nBP\"] = np.array(data_vs_time)\n",
-    "    \n",
-    "save_dict_to_hdf5({k:v for k,v in ret_constants.items() if k in ['Gain', 'Noise-e']},\n",
-    "                  '{}/CalDBAna_{}_Gain.h5'.format(out_folder, dclass))"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# Parameters for plotting\n",
-    "\n",
-    "# Define range for plotting\n",
-    "rangevals = {\n",
-    "    \"Offset\": [range_offset[0:2], range_offset[2:4]],\n",
-    "    \"Noise\": [range_noise[0:2], range_noise[2:4]],\n",
-    "    \"Gain\": [range_gain[0:2], range_gain[2:4]],\n",
-    "    \"Noise-e\": [range_noise_e[0:2], range_noise_e[2:4]],\n",
-    "    \"SlopesCI\": [range_slopesCI[0:2], range_slopesCI[2:4]],\n",
-    "    \"SlopesFF\": [range_slopesFF[0:2], range_slopesFF[2:4]]\n",
-    "}\n",
-    "\n",
-    "keys = {\n",
-    "    'Mean': ['data', '', 'Mean over pixels'],\n",
-    "    'std': ['dataStd', '', '$\\sigma$ over pixels'],\n",
-    "    'MeanBP': ['dataBP', 'Good pixels only', 'Mean over pixels'],\n",
-    "    'NBP': ['nBP', 'Fraction of BP', 'Fraction of BP'],\n",
-    "    'stdBP': ['dataBPStd', 'Good pixels only', '$\\sigma$ over pixels'],\n",
-    "    'stdASIC': ['', '', '$\\sigma$ over ASICs'],\n",
-    "    'stdCell': ['', '', '$\\sigma$ over Cells'],\n",
-    "}\n",
-    "\n",
-    "gain_name = ['High', 'Medium', 'Low']"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "scrolled": false
-   },
-   "outputs": [],
-   "source": [
-    "print('Plot calibration constants')\n",
-    "\n",
-    "# loop over constat type\n",
-    "for const, mods in ret_constants.items():\n",
-    "\n",
-    "    # Loop over gain\n",
-    "    for gain in range(2):\n",
-    "        print('Const: {}, gain {}'.format(const, gain))\n",
-    "\n",
-    "        if const in [\"Gain\", \"Noise-e\"] and gain == 1:\n",
-    "            continue\n",
-    "        else:\n",
-    "            pass\n",
-    "\n",
-    "        # Loop over modules\n",
-    "        for mod, data in mods.items():\n",
-    "            \n",
-    "            if mod not in modules:\n",
-    "                continue\n",
-    "\n",
-    "            print(mod)\n",
-    "\n",
-    "            ctimes = np.array(data[\"ctime\"])\n",
-    "            ctimes_ticks = [x.strftime('%y-%m-%d') for x in ctimes]\n",
-    "\n",
-    "            if (\"mdata\" in data):\n",
-    "                cmdata = np.array(data[\"mdata\"])\n",
-    "                for i, tick in enumerate(ctimes_ticks):\n",
-    "                    ctimes_ticks[i] = ctimes_ticks[i] + \\\n",
-    "                        ', V={:1.0f}'.format(cmdata[i]['Sensor Bias Voltage']) + \\\n",
-    "                        ', M={:1.0f}'.format(\n",
-    "                        cmdata[i]['Memory cells'])\n",
-    "\n",
-    "            sort_ind = np.argsort(ctimes_ticks)\n",
-    "            ctimes_ticks = list(np.array(ctimes_ticks)[sort_ind])\n",
-    "\n",
-    "            # Create sorted by data dataset\n",
-    "            rdata = {}\n",
-    "            for key, item in keys.items():\n",
-    "                if item[0] in data:\n",
-    "                    rdata[key] = np.array(data[item[0]])[sort_ind]\n",
-    "\n",
-    "            nTimes = rdata['Mean'].shape[0]\n",
-    "            nPixels = rdata['Mean'].shape[1] * rdata['Mean'].shape[2]\n",
-    "            nBins = nMemToShow * nPixels\n",
-    "\n",
-    "            # Select gain\n",
-    "            if const not in [\"Gain\", \"Noise-e\"]:\n",
-    "                for key in rdata:\n",
-    "                    rdata[key] = rdata[key][..., gain]\n",
-    "\n",
-    "            # Avoid to low values\n",
-    "            if const in [\"Noise\", \"Offset\", \"Noise-e\"]:\n",
-    "                rdata['Mean'][rdata['Mean'] < 0.1] = np.nan\n",
-    "                if 'MeanBP' in rdata:\n",
-    "                    rdata['MeanBP'][rdata['MeanBP'] < 0.1] = np.nan\n",
-    "\n",
-    "            if 'NBP' in rdata:\n",
-    "                rdata['NBP'] = rdata['NBP'].astype(float)\n",
-    "                rdata[\"NBP\"][rdata[\"NBP\"] == (spShape[0] * spShape[1])] = np.nan\n",
-    "                rdata[\"NBP\"] = rdata[\"NBP\"] / (spShape[0] * spShape[1]) * 100\n",
-    "\n",
-    "            # Reshape: ASICs over cells for plotting\n",
-    "            pdata = {}\n",
-    "            for key in rdata:\n",
-    "                pdata[key] = rdata[key][:, :, :, :nMemToShow].reshape(\n",
-    "                    nTimes, nBins).swapaxes(0, 1)\n",
-    "\n",
-    "            # Summary over ASICs\n",
-    "            adata = {}\n",
-    "            for key in rdata:\n",
-    "                adata[key] = np.nanmean(rdata[key], axis=(1, 2)).swapaxes(0, 1)\n",
-    "\n",
-    "            # Plotting\n",
-    "            for key in pdata:\n",
-    "                vmin,vmax = get_range(pdata[key][::-1].flatten(), plot_range)\n",
-    "                if const in rangevals and key in ['Mean', 'MeanBP']:\n",
-    "                    vmin = rangevals[const][gain][0]\n",
-    "                    vmax = rangevals[const][gain][1]\n",
-    "\n",
-    "                if key == 'NBP':\n",
-    "                    unit = '[%]'\n",
-    "                else:\n",
-    "                    unit = '[ADU]'\n",
-    "                    if const == 'Noise-e':\n",
-    "                        unit = '[$e^-$]'\n",
-    "\n",
-    "                title = '{}, module {}, {} gain, {}'.format(\n",
-    "                    const, mod, gain_name[gain], keys[key][1])\n",
-    "                cb_label = '{}, {} {}'.format(const, keys[key][2], unit)\n",
-    "\n",
-    "                hm_combine(pdata[key][::-1], htype=HMType.INSET_AXIS,\n",
-    "                          x_label='Creation Time', y_label='ASIC ID',\n",
-    "                          x_ticklabels=ctimes_ticks,\n",
-    "                          x_ticks=np.arange(len(ctimes_ticks))+0.3,\n",
-    "                          title=title, cb_label=cb_label,\n",
-    "                          vmin=vmin, vmax=vmax,\n",
-    "                          fname='{}/{}_{}_g{}_ASIC_{}.png'.format(\n",
-    "                                  out_folder, const, mod, gain, key),\n",
-    "                          y_ticks=np.arange(nBins, step=nMemToShow)+16,\n",
-    "                          y_ticklabels=np.arange(nPixels)[::-1]+1,\n",
-    "                          pad=[0.125, 0.125, 0.12, 0.185])\n",
-    "\n",
-    "                hm_combine(adata[key],\n",
-    "                          x_label='Creation Time', y_label='Memory cell ID',\n",
-    "                          x_ticklabels=ctimes_ticks,\n",
-    "                          x_ticks=np.arange(len(ctimes_ticks))+0.3,\n",
-    "                          title=title, cb_label=cb_label,\n",
-    "                          fname='{}/{}_{}_g{}_MEM_{}.png'.format(\n",
-    "                                  out_folder, const, mod, gain, key),\n",
-    "                          vmin=vmin, vmax=vmax)"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.6.7"
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 2
-}
diff --git a/notebooks/ePix/PlotFromCalDB_ePix100_NBC.ipynb b/notebooks/ePix/PlotFromCalDB_ePix100_NBC.ipynb
deleted file mode 100644
index e6163d8d4..000000000
--- a/notebooks/ePix/PlotFromCalDB_ePix100_NBC.ipynb
+++ /dev/null
@@ -1,481 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "# Statistical analysis of calibration factors#\n",
-    "\n",
-    "Author: Mikhail Karnevskiy, Steffen Hauf, Version 0.1\n",
-    "\n",
-    "Calibration constants for ePix100 detector from the data base with injection time between start_date and end_date are considered.\n",
-    "\n",
-    "To be visualized, calibration constants are averaged per ASICs. Plots shows calibration constant over time for each constant.\n",
-    "\n",
-    "Values shown in plots are saved in h5 files."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "cluster_profile = \"noDB\" # The ipcluster profile to use\n",
-    "start_date = \"2019-01-30\" # date to start investigation interval from\n",
-    "end_date = \"2019-05-01\" # date to end investigation interval at, can be \"now\"\n",
-    "nconstants = 10 # Number of time stamps to plot. If not 0, overcome start_date.\n",
-    "dclass=\"ePix100\" # Detector class\n",
-    "db_module = \"ePix100_M15\" # detector entry in the DB to investigate\n",
-    "constants = [\"Noise\", \"Offset\"] # constants to plot\n",
-    "bias_voltage = [200] # Bias voltage\n",
-    "temperature = [288] # Operation temperature\n",
-    "integration_time = [1, 50] # Integration time\n",
-    "in_vacuum = [0] # 0 if detector is operated in room pressure\n",
-    "parameter_names = ['bias_voltage', 'integration_time', 'temperature', 'in_vacuum'] # names of parameters\n",
-    "photon_energy = 9.2 # Photon energy of the beam\n",
-    "out_folder = \"/gpfs/exfel/data/scratch/karnem/test_ePix/\" # output folder\n",
-    "use_existing = \"\" # If not empty, constants stored in given folder will be used\n",
-    "cal_db_interface = \"tcp://max-exfl016:8015#8025\" # the database interface to use\n",
-    "cal_db_timeout = 180000 # timeout on caldb requests\",\n",
-    "range_offset = [1000., 2200] # plotting range for offset\n",
-    "range_noise = [1.5, 3.3] # plotting range for noise\n",
-    "plot_range = 3 # range for plotting in units of median absolute deviations"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "scrolled": true
-   },
-   "outputs": [],
-   "source": [
-    "import copy\n",
-    "import datetime\n",
-    "import dateutil.parser\n",
-    "import numpy as np\n",
-    "import os\n",
-    "import sys\n",
-    "import warnings\n",
-    "warnings.filterwarnings('ignore')\n",
-    "\n",
-    "from iCalibrationDB import Constants, Conditions, Detectors, ConstantMetaData\n",
-    "from cal_tools.tools import get_from_db, get_random_db_interface\n",
-    "from cal_tools.ana_tools import (save_dict_to_hdf5, load_data_from_hdf5, \n",
-    "                                 HMType, hm_combine, \n",
-    "                                 combine_lists, get_range)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# Prepare variables\n",
-    "spShape = (354, 96) # Shape of superpixel\n",
-    "\n",
-    "parameters = [globals()[x] for x in parameter_names]\n",
-    "\n",
-    "constantsDark = {'Noise_': 'BadPixelsDark',\n",
-    "                 'Offset_': 'BadPixelsDark'}\n",
-    "print('Bad pixels data: ', constantsDark)\n",
-    "\n",
-    "# Define parameters in order to perform loop over time stamps\n",
-    "start = datetime.datetime.now() if start_date.upper() == \"NOW\" else dateutil.parser.parse(\n",
-    "    start_date)\n",
-    "end = datetime.datetime.now() if end_date.upper() == \"NOW\" else dateutil.parser.parse(\n",
-    "    end_date)\n",
-    "\n",
-    "# Create output folder\n",
-    "os.makedirs(out_folder, exist_ok=True)\n",
-    "\n",
-    "# Get getector conditions\n",
-    "det = getattr(Detectors, db_module)\n",
-    "dconstants = getattr(Constants, dclass)\n",
-    "\n",
-    "print('CalDB Interface: {}'.format(cal_db_interface))\n",
-    "print('Start time at: ', start)\n",
-    "print('End time at: ', end)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "parameter_list = combine_lists(*parameters, names = parameter_names)\n",
-    "print(parameter_list)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# Retrieve list of meta-data\n",
-    "constant_versions = []\n",
-    "constant_parameters = []\n",
-    "constantBP_versions = []\n",
-    "\n",
-    "# Loop over constants\n",
-    "for c, const in enumerate(constants):\n",
-    "    \n",
-    "    if use_existing != \"\":\n",
-    "        break\n",
-    "    \n",
-    "    # Loop over parameters\n",
-    "    for pars in parameter_list:\n",
-    "    \n",
-    "        if (const in [\"Offset\", \"Noise\", \"SlopesPC\"] or \"DARK\" in const.upper()):\n",
-    "            dcond = Conditions.Dark\n",
-    "            mcond = getattr(dcond, dclass)(**pars)\n",
-    "        else:\n",
-    "            dcond = Conditions.Illuminated\n",
-    "            mcond = getattr(dcond, dclass)(**pars,\n",
-    "                                photon_energy=photon_energy)\n",
-    "\n",
-    "            \n",
-    "            \n",
-    "        print('Request: ', const, 'with paramters:', pars)\n",
-    "        # Request Constant versions for given parameters and module\n",
-    "        data = get_from_db(det,\n",
-    "                           getattr(dconstants,\n",
-    "                                   const)(),\n",
-    "                           copy.deepcopy(mcond), None,\n",
-    "                           cal_db_interface,\n",
-    "                           creation_time=start,\n",
-    "                           verbosity=0,\n",
-    "                           timeout=cal_db_timeout,\n",
-    "                           meta_only=True,\n",
-    "                           version_info=True)\n",
-    "        \n",
-    "        if not isinstance(data, list):\n",
-    "                continue\n",
-    "                \n",
-    "        if const in constantsDark:\n",
-    "            # Request BP constant versions\n",
-    "            print('constantDark:', constantsDark[const], )        \n",
-    "            dataBP = get_from_db(det,\n",
-    "                                 getattr(dconstants, \n",
-    "                                         constantsDark[const])(),\n",
-    "                                 copy.deepcopy(mcond), None,\n",
-    "                                 cal_db_interface,\n",
-    "                                 creation_time=start,\n",
-    "                                 verbosity=0,\n",
-    "                                 timeout=cal_db_timeout,\n",
-    "                                 meta_only=True,\n",
-    "                                 version_info=True)\n",
-    "            \n",
-    "            if not isinstance(data, list) or not isinstance(dataBP, list):\n",
-    "                continue\n",
-    "            \n",
-    "            found_BPmatch = False\n",
-    "            for d in data:\n",
-    "                # Match proper BP constant version\n",
-    "                # and get constant version within\n",
-    "                # requested time range\n",
-    "                if d is None:\n",
-    "                    print('Time or data is not found!')\n",
-    "                    continue\n",
-    "\n",
-    "                dt = dateutil.parser.parse(d['begin_at'])\n",
-    "\n",
-    "                if (dt.replace(tzinfo=None) > end or \n",
-    "                    (nconstants==0 and dt.replace(tzinfo=None) < start)):\n",
-    "                    continue\n",
-    "\n",
-    "                closest_BP = None\n",
-    "                closest_BPtime = None\n",
-    "\n",
-    "                for dBP in dataBP:\n",
-    "                    if dBP is None:\n",
-    "                        print(\"Bad pixels are not found!\")\n",
-    "                        continue\n",
-    "\n",
-    "                    dt = dateutil.parser.parse(d['begin_at'])\n",
-    "                    dBPt = dateutil.parser.parse(dBP['begin_at'])\n",
-    "\n",
-    "                    if dt == dBPt:\n",
-    "                        found_BPmatch = True\n",
-    "                    else:\n",
-    "\n",
-    "                        if np.abs(dBPt-dt).seconds < (max_time*60):\n",
-    "                            if closest_BP is None:\n",
-    "                                closest_BP = dBP\n",
-    "                                closest_BPtime = dBPt\n",
-    "                            else:\n",
-    "                                if np.abs(dBPt-dt) < np.abs(closest_BPtime-dt):\n",
-    "                                    closest_BP = dBP\n",
-    "                                    closest_BPtime = dBPt\n",
-    "\n",
-    "                        if dataBP.index(dBP) ==  len(dataBP)-1:\n",
-    "                            if closest_BP:\n",
-    "                                dBP = closest_BP\n",
-    "                                dBPt = closest_BPtime\n",
-    "                                found_BPmatch = True\n",
-    "                            else:\n",
-    "                                print('Bad pixels are not found!')\n",
-    "\n",
-    "                    if found_BPmatch:\n",
-    "                        print(\"Found constant {}: begin at {}\".format(const, dt))\n",
-    "                        print(\"Found bad pixels at {}\".format(dBPt))\n",
-    "                        constantBP_versions.append(dBP)\n",
-    "                        constant_versions.append(d)\n",
-    "                        constant_parameters.append(copy.deepcopy(pars))\n",
-    "                        found_BPmatch = False\n",
-    "                        break\n",
-    "        else:\n",
-    "            constant_versions += data\n",
-    "            constant_parameters += [copy.deepcopy(pars)]*len(data)\n",
-    "\n",
-    "# Remove dublications\n",
-    "constant_versions_tmp = []\n",
-    "constant_parameters_tmp = []\n",
-    "constantBP_versions_tmp = []\n",
-    "for i, x in enumerate(constant_versions):\n",
-    "    if x not in constant_versions_tmp:\n",
-    "        constant_versions_tmp.append(x)\n",
-    "        constant_parameters_tmp.append(constant_parameters[i])\n",
-    "        if i<len(constantBP_versions)-1:\n",
-    "            constantBP_versions_tmp.append(constantBP_versions[i])\n",
-    "constant_versions=constant_versions_tmp\n",
-    "constantBP_versions=constantBP_versions_tmp\n",
-    "constant_parameters=constant_parameters_tmp\n",
-    "\n",
-    "print('Number of stored constant versions is {}'.format(len(constant_versions)))\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "def get_rebined(a, rebin):\n",
-    "    return a[:,:,0].reshape(\n",
-    "                int(a.shape[0] / rebin[0]),\n",
-    "                rebin[0],\n",
-    "                int(a.shape[1] / rebin[1]),\n",
-    "                rebin[1])\n",
-    "    \n",
-    "def modify_const(const, data, isBP = False):\n",
-    "    return data\n",
-    "\n",
-    "ret_constants = {}\n",
-    "constand_data = ConstantMetaData()\n",
-    "constant_BP = ConstantMetaData()\n",
-    "\n",
-    "# sort over begin_at\n",
-    "idxs, _ = zip(*sorted(enumerate(constant_versions), \n",
-    "                     key=lambda x: x[1]['begin_at'], reverse=True))\n",
-    "\n",
-    "for i in idxs:\n",
-    "    const = constant_versions[i]['data_set_name'].split('/')[-2]\n",
-    "    qm = db_module\n",
-    "    \n",
-    "    if not const in ret_constants:\n",
-    "        ret_constants[const] = {}\n",
-    "    if not qm in ret_constants[const]:\n",
-    "            ret_constants[const][qm] = []\n",
-    "            \n",
-    "    if nconstants>0 and len(ret_constants[const][qm])>=nconstants:\n",
-    "        continue\n",
-    "        \n",
-    "    print(\"constant: {}, module {}\".format(const,qm))\n",
-    "    constand_data.retrieve_from_version_info(constant_versions[i])\n",
-    "    \n",
-    "    cdata = constand_data.calibration_constant.data\n",
-    "    ctime = constand_data.calibration_constant_version.begin_at\n",
-    "    \n",
-    "    cdata = modify_const(const, cdata)\n",
-    "    \n",
-    "    # Create superpixels for constants without BP applied\n",
-    "    cdata = get_rebined(cdata, spShape)\n",
-    "    toStoreStd = np.nanstd(cdata, axis=(1, 3))\n",
-    "    toStore = np.nanmean(cdata, axis=(1, 3))\n",
-    "    \n",
-    "    # Convert parameters to dict\n",
-    "    dpar = {p.name: p.value for p in constand_data.detector_condition.parameters}\n",
-    "    \n",
-    "    print(\"Store values in dict\", const, qm, ctime)\n",
-    "    ret_constants[const][qm].append({'ctime': ctime,\n",
-    "                                     'nBP': 0,\n",
-    "                                     'dataBP': 0,\n",
-    "                                     'dataBPStd': 0,\n",
-    "                                     'data': toStore,\n",
-    "                                     'dataStd': toStoreStd,\n",
-    "                                     'mdata': dpar})  \n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "scrolled": true
-   },
-   "outputs": [],
-   "source": [
-    "if use_existing == \"\":\n",
-    "    print('Save data to /CalDBAna_{}_{}.h5'.format(dclass, db_module))\n",
-    "    save_dict_to_hdf5(ret_constants,\n",
-    "                      '{}/CalDBAna_{}_{}.h5'.format(out_folder, dclass, db_module))"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "if use_existing == \"\":\n",
-    "    fpath = '{}/CalDBAna_{}_*.h5'.format(out_folder, dclass)\n",
-    "else:\n",
-    "    fpath = '{}/CalDBAna_{}_*.h5'.format(use_existing, dclass)\n",
-    "\n",
-    "print('Load data from {}'.format(fpath))\n",
-    "ret_constants = load_data_from_hdf5(fpath)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# Parameters for plotting\n",
-    "\n",
-    "# Define range for plotting\n",
-    "rangevals = {\n",
-    "    \"OffsetEPix100\": [range_offset[0:2], range_offset[2:4]],\n",
-    "    \"NoiseEPix100\": [range_noise[0:2], range_noise[2:4]],\n",
-    "}\n",
-    "\n",
-    "keys = {\n",
-    "    'Mean': ['data', '', 'Mean over pixels'],\n",
-    "    'std': ['dataStd', '', '$\\sigma$ over pixels'],\n",
-    "    'MeanBP': ['dataBP', 'Good pixels only', 'Mean over pixels'],\n",
-    "    'NBP': ['nBP', 'Fraction of BP', 'Fraction of BP'],\n",
-    "    'stdBP': ['dataBPStd', 'Good pixels only', '$\\sigma$ over pixels'],\n",
-    "    'stdASIC': ['', '', '$\\sigma$ over ASICs'],\n",
-    "    'stdCell': ['', '', '$\\sigma$ over Cells'],\n",
-    "}\n",
-    "\n",
-    "gain_name = ['High', 'Medium', 'Low']"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "scrolled": false
-   },
-   "outputs": [],
-   "source": [
-    "print('Plot calibration constants')\n",
-    "\n",
-    "# loop over constat type\n",
-    "for const, modules in ret_constants.items():\n",
-    "\n",
-    "        print('Const: {}'.format(const))\n",
-    "\n",
-    "        # Loop over modules\n",
-    "        for mod, data in modules.items():\n",
-    "            print(mod)\n",
-    "\n",
-    "            ctimes = np.array(data[\"ctime\"])\n",
-    "            ctimes_ticks = [x.strftime('%y-%m-%d') for x in ctimes]\n",
-    "\n",
-    "            if (\"mdata\" in data):\n",
-    "                cmdata = np.array(data[\"mdata\"])\n",
-    "                for i, tick in enumerate(ctimes_ticks):\n",
-    "                    ctimes_ticks[i] = ctimes_ticks[i] + \\\n",
-    "                        ', V={:1.0f}'.format(cmdata[i]['Sensor Temperature']) + \\\n",
-    "                        ', T={:1.0f}'.format(\n",
-    "                        cmdata[i]['Integration Time'])\n",
-    "\n",
-    "            sort_ind = np.argsort(ctimes_ticks)\n",
-    "            ctimes_ticks = list(np.array(ctimes_ticks)[sort_ind])\n",
-    "\n",
-    "            # Create sorted by data dataset\n",
-    "            rdata = {}\n",
-    "            for key, item in keys.items():\n",
-    "                if item[0] in data:\n",
-    "                    rdata[key] = np.array(data[item[0]])[sort_ind]\n",
-    "\n",
-    "            nTimes = rdata['Mean'].shape[0]\n",
-    "            nPixels = rdata['Mean'].shape[1] * rdata['Mean'].shape[2]\n",
-    "            nBins = nPixels\n",
-    "\n",
-    "            # Avoid to low values\n",
-    "            if const in [\"Noise\", \"Offset\", \"Noise-e\"]:\n",
-    "                rdata['Mean'][rdata['Mean'] < 0.1] = np.nan\n",
-    "                if 'MeanBP' in rdata:\n",
-    "                    rdata['MeanBP'][rdata['MeanBP'] < 0.1] = np.nan\n",
-    "\n",
-    "            # Reshape: ASICs over cells for plotting\n",
-    "            pdata = {}\n",
-    "            for key in rdata:\n",
-    "                if key not in ['Mean', 'std']:\n",
-    "                    continue\n",
-    "                pdata[key] = rdata[key][:, :, :].reshape(nTimes, nBins).swapaxes(0, 1)\n",
-    "\n",
-    "            # Plotting\n",
-    "            for key in pdata:\n",
-    "                \n",
-    "                if key not in ['Mean', 'std']:\n",
-    "                    continue\n",
-    "                    \n",
-    "                vmin,vmax = get_range(pdata[key][::-1].flatten(), plot_range)\n",
-    "                if const in rangevals and key in ['Mean', 'MeanBP']:\n",
-    "                    vmin = rangevals[const][0][0]\n",
-    "                    vmax = rangevals[const][0][1]\n",
-    "\n",
-    "                if key == 'NBP':\n",
-    "                    unit = '[%]'\n",
-    "                else:\n",
-    "                    unit = '[ADU]'\n",
-    "                    if const == 'Noise-e':\n",
-    "                        unit = '[$e^-$]'\n",
-    "\n",
-    "                title = '{}, module {}, {}'.format(\n",
-    "                    const, mod,  keys[key][1])\n",
-    "                cb_label = '{}, {} {}'.format(const, keys[key][2], unit)\n",
-    "\n",
-    "                hm_combine(pdata[key][::-1], htype=HMType.mro,\n",
-    "                          x_label='Creation Time', y_label='ASIC ID',\n",
-    "                          x_ticklabels=ctimes_ticks,\n",
-    "                          x_ticks=np.arange(len(ctimes_ticks))+0.3,\n",
-    "                          title=title, cb_label=cb_label,\n",
-    "                          vmin=vmin, vmax=vmax,\n",
-    "                          fname='{}/{}_{}_g{}_ASIC_{}.png'.format(\n",
-    "                                  out_folder, const, mod.replace('_', ''), 0, key),\n",
-    "                          pad=[0.125, 0.125, 0.12, 0.185])\n"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.6.7"
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 2
-}
diff --git a/reportservice/manual_run.py b/reportservice/manual_run.py
index d57248838..d5ad03a14 100644
--- a/reportservice/manual_run.py
+++ b/reportservice/manual_run.py
@@ -37,7 +37,7 @@ arg_parser.add_argument('--instrument', default=['all'], nargs='+',
                         help='select the requested instruments. '
                              'Default=\"all\", which can be used for selecting'
                              ' all instruments')
-arg_parser.add_argument('--gitpush', dest='Push to git', action='store_true',
+arg_parser.add_argument('--gitpush', dest='gitpush', action='store_true',
                         help='required for pushing the generated figures '
                              'to the DC git repository. Default=bool(False)')
 arg_parser.set_defaults(gitpush=False)
diff --git a/requirements.txt b/requirements.txt
index 90228cd8c..a42e0aeda 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -17,6 +17,7 @@ sklearn
 prettytable
 jupyter
 jupyter_console
-sphinx == 1.4.5
+metadata_client
+sphinx == 1.8.5
 ./cal_tools
 .
diff --git a/webservice/messages.py b/webservice/messages.py
index cd15c8148..14c5c8eb1 100644
--- a/webservice/messages.py
+++ b/webservice/messages.py
@@ -22,5 +22,7 @@ class MDC:
 class Success:
     UPLOADED_CONFIG = "SUCCESS: Uploaded config for cycle {}, proposal {}"
     START_CORRECTION = "SUCCESS: Started correction: proposal {}, run {}"
+    START_CHAR = "SUCCESS: Started dark characterization: proposal {}, run {}"
     START_CORRECTION_SIM = "SUCCESS: Started simulated correction: proposal {}, run {}"
+    START_CHAR_SIM = "SUCCESS: Started dark characterization: proposal {}, run {}"
     QUEUED = "Queued proposal {}, run {} for offline calibration"
diff --git a/webservice/request_darks.py b/webservice/request_darks.py
index 1cafba1a1..866560774 100644
--- a/webservice/request_darks.py
+++ b/webservice/request_darks.py
@@ -22,7 +22,10 @@ parser.add_argument('--run-med', type=str,
 parser.add_argument('--run-low', type=str,
                     help='Run number of low gain data as an integer')
 parser.add_argument('--run', type=str, help='Run number as an integer')
-parser.add_argument('--reservation', type=str, help='Reservation to run on, default is to use configured reservations')  #noqa
+parser.add_argument('--reservation', type=str, help='Reservation to run on. ' 
+                                                    'Do not use this command, '
+                                                    'as it has no effect anymore.')  #noqa
+# default is to use configured reservations
 parser.add_argument('--bkg', action='store_true',
                     help='Background mode: exit script after requesting dark.')
 
@@ -46,8 +49,10 @@ if "run_low" in args and args["run_low"]:
 if "run" in args and args["run"]:
     parm_list += ["(\"run\", \"{}\")".format(args["run"])]
 
-if "reservation" in args and args["reservation"]:
-    parm_list += ["(\"reservation\", \"{}\")".format(args["reservation"])]
+# Avoid giving a reservation parameter after the ITDM changes
+# for giving xcal high priority by default.
+#if "reservation" in args and args["reservation"]:
+    #parm_list += ["(\"reservation\", \"{}\")".format(args["reservation"])]
 
 msg = "','".join(parm_list)
 socket.send("['{}']".format(msg).encode())
diff --git a/webservice/serve_overview.yaml b/webservice/serve_overview.yaml
index dea953661..ed80c5182 100644
--- a/webservice/serve_overview.yaml
+++ b/webservice/serve_overview.yaml
@@ -53,4 +53,4 @@ server-config:
 
 web-service:
     job-db: ./webservice_jobs.sqlite
-    cal-config: /home/karnem/myscratch/calibration3/calibration_configurations/default.yaml
+    cal-config: /home/xcal/calibration_config/default.yaml
diff --git a/webservice/webservice.py b/webservice/webservice.py
index 9c438a46b..f2df2c70b 100644
--- a/webservice/webservice.py
+++ b/webservice/webservice.py
@@ -89,9 +89,9 @@ async def upload_config(socket, config, yaml, instrument, cycle, proposal):
     :param instrument: instrument for which the update is for
     :param cycle: the facility cylce the update is for
     :param proposal: the proposal the update is for
-    
+
     The YAML contents will be placed into a file at
-    
+
         {config.local-path}/{cycle}/{proposal}.yaml
 
     If it exists it is overwritten and then the new version is pushed to
@@ -336,7 +336,7 @@ async def copy_untouched_files(file_list, out_folder, run):
 
 async def run_correction(conn, cmd, mode, proposal, run, rid):
     """ Run a correction command
-    
+
     :param cmd: to run, should be a in list for as expected by subprocess.run
     :param mode: "prod" or "sim", in the latter case nothing will be executed
                  but the command will be logged
@@ -351,7 +351,10 @@ async def run_correction(conn, cmd, mode, proposal, run, rid):
         logging.info(" ".join(cmd))
         ret = subprocess.run(cmd, stdout=subprocess.PIPE)
         if ret.returncode == 0:
-            logging.info(Success.START_CORRECTION.format(proposal, run))
+            if "DARK" in cmd:
+                logging.info(Success.START_CHAR.format(proposal, run))
+            else:
+                logging.info(Success.START_CORRECTION.format(proposal, run))
             # enter jobs in job db
             c = conn.cursor()
             rstr = ret.stdout.decode()
@@ -364,22 +367,32 @@ async def run_correction(conn, cmd, mode, proposal, run, rid):
                                            now=datetime.now().isoformat()))
             conn.commit()
             logging.debug(" ".join(cmd))
-            return Success.START_CORRECTION.format(proposal, run)
+            if "DARK" in cmd:
+                return Success.START_CHAR.format(proposal, run)
+            else:
+                return Success.START_CORRECTION.format(proposal, run)
         else:
             logging.error(Errors.JOB_LAUNCH_FAILED.format(cmd, ret.returncode))
             return Errors.JOB_LAUNCH_FAILED.format(cmd, ret.returncode)
 
     else:
-        logging.debug(Success.START_CORRECTION_SIM.format(proposal, run))
+        if "DARK" in cmd:
+            logging.debug(Success.START_CHAR_SIM.format(proposal, run))
+        else:
+            logging.debug(Success.START_CORRECTION_SIM.format(proposal, run))
+
         logging.debug(cmd)
-        return Success.START_CORRECTION_SIM.format(proposal, run)
+        if "DARK" in cmd:
+            return Success.START_CHAR_SIM.format(proposal, run)
+        else:
+            return Success.START_CORRECTION_SIM.format(proposal, run)
 
 
 async def server_runner(config, mode):
     """ The main server loop
-    
+
     The main server loop handles remote requests via a ZMQ interface.
-    
+
     Requests are the form of ZMQ.REQuest and have the format
 
         command, *parms
@@ -646,8 +659,10 @@ async def server_runner(config, mode):
                     cmd = ["python", "-m", "xfel_calibrate.calibrate",
                            detector, "DARK", '--priority', priority]
 
-                    if req_res:
-                        cmd += ['--reservation', req_res]
+                    # Avoid giving a reservation parameter after the
+                    # ITDM changes for giving xcal high priority by default
+                    #if req_res:
+                    #    cmd += ['--reservation', req_res]
 
                     run_config = []
                     for typ, run in run_mapping.items():
diff --git a/xfel_calibrate/calibrate.py b/xfel_calibrate/calibrate.py
index ec2aa7835..0cf3139c6 100755
--- a/xfel_calibrate/calibrate.py
+++ b/xfel_calibrate/calibrate.py
@@ -532,35 +532,34 @@ def make_par_table(parms, temp_path, run_uuid):
     :param run_uuid: inset of folder name containing job output
     """
 
-    # Add space in long strings without line breakers ` ,-` to
+    # Add space in long strings without line breakers ` ,-/` to
     # wrap them in latex
     def split_len(seq, length):
-        lbc = set(' ,-')
+        lbc = set(' ,-/')
         line = ''
         for i in range(0, len(seq), length):
             sub_line = seq[i:i + length]
-            line += sub_line
+            line += sub_line.replace('/', '/\-')
             if not any(c in lbc for c in sub_line):
-                line += ' '
+                line += '\-'
         return line
 
     # Prepare strings and estimate their length
     l_parms = []
     len_parms = [0, 0]
-    max_len = [30, 30]
+    max_len = [20, 20]
     for p in parms:
         name = p.name.replace('_', '-')
         if len(name) > max_len[0]:
             len_parms[0] = max_len[0]
             name = split_len(name, max_len[0])
 
-        value = str(p.value)
+        value = tex_escape(str(p.value))
         if len(value) > max_len[1]:
             len_parms[1] = max_len[1]
             value = split_len(value, max_len[1])
         if p.type is str:
             value = "``{}''".format(value)
-        value = tex_escape(value)
         comment = tex_escape(str(p.comment)[1:])
         l_parms.append([name, value, comment])
 
@@ -574,16 +573,16 @@ def make_par_table(parms, temp_path, run_uuid):
     tmpl = Template('''
                     Input of the calibration pipeline 
                     =================================
-                    
-                    .. math::
-                    
-                        \\begin{tabular}{ {% for k in p %}{{k}}{%- endfor %} } 
+
+                    .. raw:: latex
+
+                        \\begin{longtable}{ {% for k in p %}{{k}}{%- endfor %} } 
                         \hline
                         {% for k in lines %}
                         {{ k[0] }} & {{ k[1] }} & {{ k[2] }} \\\\
                         {%- endfor %}
                         \hline
-                        \end{tabular}
+                        \end{longtable}
                     ''')
 
     f_name = "{}/slurm_tmp_{}/InputParameters.rst".format(temp_path, run_uuid)
-- 
GitLab