From cc3d0a3b9e569da838f007ff6d68834334356321 Mon Sep 17 00:00:00 2001
From: Kiana Setoodehnia <kiana.setoodehnia@xfel.eu>
Date: Mon, 8 Feb 2021 11:02:54 +0100
Subject: [PATCH] Corrected pnCCD Main notebooks for path to slow data, etc.

---
 cal_tools/cal_tools/pnccdlib.py               |  46 ++
 .../pnCCD/Characterize_pnCCD_Dark_NBC.ipynb   | 375 +++++++------
 notebooks/pnCCD/Characterize_pnCCD_Gain.ipynb | 374 ++++++++-----
 notebooks/pnCCD/Correct_pnCCD_NBC.ipynb       | 517 ++++++++++--------
 4 files changed, 753 insertions(+), 559 deletions(-)
 create mode 100644 cal_tools/cal_tools/pnccdlib.py

diff --git a/cal_tools/cal_tools/pnccdlib.py b/cal_tools/cal_tools/pnccdlib.py
new file mode 100644
index 000000000..da95ed28e
--- /dev/null
+++ b/cal_tools/cal_tools/pnccdlib.py
@@ -0,0 +1,46 @@
+# Extracting slow data:
+import os
+import traceback
+from typing import Tuple
+
+import h5py
+
+
+def extract_slow_data(karabo_id: str, karabo_da_control: str,
+                      ctrl_fname: str, ctrl_path: str,
+                      mdl_ctrl_path: str,
+                      bias_voltage: float, gain: float,
+                      fix_temperature_top :float,
+                      fix_temperature_bot :float,
+                      ) -> Tuple[float, float, float, float]:
+    """
+    Extract slow data from given control paths.
+    """
+    try:
+        with h5py.File(ctrl_fname, "r") as f:
+            if bias_voltage == 0.:
+                bias_voltage = abs(f[os.path.join(mdl_ctrl_path,
+                                                  "DAQ_MPOD/u0voltage/value")][0])  # noqa
+            if gain == 0.1:
+                gain = f[os.path.join(mdl_ctrl_path, 
+                                      "DAQ_GAIN/pNCCDGain/value")][0]
+            if fix_temperature_top == 0.:
+                fix_temperature_top = f[os.path.join(ctrl_path, 
+                                                     "inputA/krdg/value")][0]
+            if fix_temperature_bot == 0.:
+                fix_temperature_bot = f[os.path.join(ctrl_path,
+                                                     "inputB/krdg/value")][0]
+    except KeyError:
+        print("Error during reading slow data,"
+              " please check the given h5 path for the control parameters")
+        traceback.print_exc(limit=1)
+        print("bias voltage control h5path:",
+              os.path.join(mdl_ctrl_path, "DAQ_MPOD/u0voltage/value"))
+        print("gain control h5path:",
+              os.path.join(mdl_ctrl_path, "DAQ_GAIN/pNCCDGain/value"))
+        print("fix_temperature_top control h5path:",
+              os.path.join(ctrl_path, "inputA/krdg/value"))
+        print("fix_temperature_bot control h5path:",
+              os.path.join(ctrl_path, "inputB/krdg/value"))
+    
+    return bias_voltage, gain, fix_temperature_top, fix_temperature_bot
\ No newline at end of file
diff --git a/notebooks/pnCCD/Characterize_pnCCD_Dark_NBC.ipynb b/notebooks/pnCCD/Characterize_pnCCD_Dark_NBC.ipynb
index 48c76a03a..57a98508c 100644
--- a/notebooks/pnCCD/Characterize_pnCCD_Dark_NBC.ipynb
+++ b/notebooks/pnCCD/Characterize_pnCCD_Dark_NBC.ipynb
@@ -6,11 +6,11 @@
    "source": [
     "# pnCCD Dark Characterization\n",
     "\n",
-    "Author: DET Group, modified by Kiana Setoodehnia, Version: 2.0\n",
+    "Author: DET Group, modified by Kiana Setoodehnia, Version: 4.0 (December 2020)\n",
     "\n",
     "The following notebook provides dark image analysis of the pnCCD detector. Dark characterization evaluates offset and noise of the detector and gives information about bad pixels. \n",
     "\n",
-    "On the first iteration, the offset and noise maps are generated. Initial bad pixels map is obtained based on the offset and initial noise maps. \n",
+    "On the first iteration, the offset and noise maps are generated. Initial bad pixels map is obtained based on the offset and initial noise maps. Edge pixels are also added to the bad pixels map.\n",
     "\n",
     "On the second iteration, the noise map is corrected for common mode. A second bad pixel map is generated based on the offset map and offset-and-common-mode-corrected noise map. Then, the hole in the center of the CCD is added to the second bad pixel map.\n",
     "\n",
@@ -31,12 +31,13 @@
    "outputs": [],
    "source": [
     "cluster_profile = \"noDB\"  # ipcluster profile to use\n",
-    "in_folder = \"/gpfs/exfel/exp/SQS/202002/p002714/raw\"  # input folder, required\n",
-    "out_folder = '/gpfs/exfel/data/scratch/ahmedk/test/pnccd'  # output folder, required\n",
+    "in_folder = \"/gpfs/exfel/exp/SQS/202031/p900166/raw\"  # input folder, required\n",
+    "out_folder = '/gpfs/exfel/data/scratch/setoodeh'  # output folder, required\n",
     "sequence = 0  # sequence file to use\n",
-    "run = 281 # which run to read data from, required\n",
+    "run = 339 # which run to read data from, required\n",
     "\n",
-    "db_module = \"pnCCD_M205_M206\"\n",
+    "# Data files parameters:\n",
+    "db_module = \"pnCCD_M205_M206\" # the device name for pnCCD detector\n",
     "karabo_da = ['PNCCD01'] # data aggregators\n",
     "karabo_da_control = \"PNCCD02\" # file inset for control data\n",
     "karabo_id = \"SQS_NQS_PNCCD1MP\" # karabo prefix of PNCCD devices\n",
@@ -45,7 +46,7 @@
     "h5path = '/INSTRUMENT/{}/CAL/{}:output/data/image/' # path in the HDF5 file the data is at\n",
     "h5path_ctrl = '/CONTROL/{}/CTRL/TCTRL'\n",
     "\n",
-    "# for database time derivation:\n",
+    "# Database access parameters:\n",
     "use_dir_creation_date = True  # use dir creation date as data production reference date\n",
     "cal_db_interface = \"tcp://max-exfl016:8021\"  # calibration DB interface to use\n",
     "cal_db_timeout = 300000 # timeout on caldb requests\n",
@@ -55,15 +56,16 @@
     "\n",
     "number_dark_frames = 0  # number of images to be used, if set to 0 all available images are used\n",
     "chunkSize = 100 # number of images to read per chunk\n",
-    "fix_temperature = 0.  # fix temperature in K, set to 0. to use value from slow data\n",
-    "gain = 1  # the detector's gain setting, It is later read from file and this value is overwritten\n",
-    "bias_voltage = 0. # the detector's bias voltage. set to 0. to use value from slow data.\n",
+    "fix_temperature_top = 0.  # fix temperature of top pnCCD sensor in K. Set to 0, to use the value from slow data\n",
+    "fix_temperature_bot = 0.  # fix temperature of bottom pnCCD sensor in K. Set to 0, to use the value from slow data\n",
+    "gain = 0.1  # the detector's gain setting. Set to 0.1 to use the value from the slow data\n",
+    "bias_voltage = 0. # the detector's bias voltage. set to 0. to use the value from slow data.\n",
     "integration_time = 70  # detector's integration time\n",
     "commonModeAxis = 0 # axis along which common mode will be calculated (0: along rows, 1: along columns)\n",
     "commonModeBlockSize = [512, 512] # size of the detector in pixels for common mode calculations\n",
     "sigmaNoise = 10.  # pixels whose signal value exceeds sigmaNoise*noise will be considered as cosmics and are masked\n",
-    "bad_pixel_offset_sigma = 5.  # any pixel whose offset is beyond 5 standard deviations, is a bad pixel\n",
-    "bad_pixel_noise_sigma = 5.  # any pixel whose noise is beyond 5 standard deviations, is a bad pixel\n",
+    "bad_pixel_offset_sigma = 4.  # any pixel whose offset beyond this standard deviations is a bad pixel\n",
+    "bad_pixel_noise_sigma = 4.  # any pixel whose noise beyond this standard deviations is a bad pixel\n",
     "temp_limits = 5  # temperature limits in which calibration parameters are considered equal\n",
     "\n",
     "run_parallel = True # for parallel computation\n",
@@ -95,9 +97,10 @@
     "from IPython.display import display, Markdown\n",
     "\n",
     "from cal_tools.enums import BadPixels\n",
+    "from cal_tools.pnccdlib import extract_slow_data\n",
     "from cal_tools.tools import (get_dir_creation_date, save_const_to_h5,\n",
     "                             get_random_db_interface, send_to_db)\n",
-    "from iCalibrationDB import (Constants, Conditions, Detectors, Versions)\n",
+    "from iCalibrationDB import (Conditions, Constants, Detectors, Versions)\n",
     "from iCalibrationDB.detectors import DetectorTypes\n",
     "import XFELDetAna.xfelprofiler as xprof\n",
     "profiler = xprof.Profiler()\n",
@@ -155,8 +158,8 @@
     "# Calibration Database Settings, and Some Initial Run Parameters & Paths:\n",
     "\n",
     "display(Markdown('### Initial Settings'))\n",
-    "pixels_x = 1024 # rows of the FastCCD to analyze in FS mode \n",
-    "pixels_y = 1024 # columns of the FastCCD to analyze in FS mode \n",
+    "pixels_x = 1024 # number of rows of the pnCCD\n",
+    "pixels_y = 1024 # number of columns of the pnCCD \n",
     "print(f\"pnCCD size is: {pixels_x}x{pixels_y} pixels.\")\n",
     "\n",
     "ped_dir = \"{}/r{:04d}\".format(in_folder, run)\n",
@@ -176,7 +179,7 @@
     "        print(f\"creation_time value error: {e}.\" \n",
     "               \"Use same format as YYYY-MM-DD HR:MN:SC.ms e.g. 2019-07-04 11:02:41.00/n\")\n",
     "        creation_time = None\n",
-    "        print(\"Given creation time wont be used.\")\n",
+    "        print(\"Given creation time will not be used.\")\n",
     "else:\n",
     "    creation_time = None\n",
     "\n",
@@ -186,11 +189,11 @@
     "print(f\"Creation time: {creation_time}\")\n",
     "    \n",
     "cal_db_interface = get_random_db_interface(cal_db_interface)\n",
-    "print('Calibration database interface: {}'.format(cal_db_interface))\n",
-    "print(\"Sending constants to the calibration database: {}\".format(db_output))\n",
-    "print(\"HDF5 path to data: {}\".format(h5path))\n",
-    "print(\"Reading data from: {}\".format(filename))\n",
-    "print(\"Run number: {}\".format(run))"
+    "print(f'Calibration database interface: {cal_db_interface}')\n",
+    "print(f\"Sending constants to the calibration database: {db_output}\")\n",
+    "print(f\"HDF5 path to data: {h5path}\")\n",
+    "print(f\"Reading data from: {filename}\")\n",
+    "print(f\"Run number: {run}\")"
    ]
   },
   {
@@ -199,24 +202,17 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "# extract slow data\n",
+    "# Extracting slow data:\n",
     "if karabo_da_control:\n",
     "    ctrl_fname = os.path.join(ped_dir, path_template.format(run, karabo_da_control)).format(sequence)\n",
     "    ctrl_path = h5path_ctrl.format(karabo_id)\n",
     "    mdl_ctrl_path = f\"/CONTROL/{karabo_id}/MDL/\"\n",
-    "    try:\n",
-    "        with h5py.File(ctrl_fname, \"r\") as f:\n",
-    "            if bias_voltage == 0.:\n",
-    "                bias_voltage = abs(f[os.path.join(mdl_ctrl_path, \"DAQ_MPOD/u0voltage/value\")][0])\n",
-    "            gain = f[os.path.join(mdl_ctrl_path, \"DAQ_GAIN/pNCCDGain/value\")][0]\n",
-    "            if fix_temperature == 0.:\n",
-    "                fix_temperature = f[os.path.join(ctrl_path, \"inputA/krdg/value\")][0]\n",
-    "    except KeyError:\n",
-    "        print(\"Error !!! during extracting slow data\")\n",
-    "        traceback.print_exc(limit=1)\n",
-    "        print(\"bias voltage control h5path:\", os.path.join(mdl_ctrl_path, \"DAQ_MPOD/u0voltage/value\"))\n",
-    "        print(\"gain control h5path:\", os.path.join(mdl_ctrl_path, \"DAQ_GAIN/pNCCDGain/value\"))\n",
-    "        print(\"fix_temperature control h5path:\", os.path.join(ctrl_path, \"inputA/krdg/value\"))"
+    "\n",
+    "    (bias_voltage, gain,\n",
+    "     fix_temperature_top,\n",
+    "     fix_temperature_bot) = extract_slow_data(karabo_id, karabo_da_control, ctrl_fname, ctrl_path,\n",
+    "                                              mdl_ctrl_path, bias_voltage, gain,\n",
+    "                                              fix_temperature_top, fix_temperature_bot)"
    ]
   },
   {
@@ -248,10 +244,11 @@
    "source": [
     "# Printing the Parameters Read from the Data File:\n",
     "display(Markdown('### Detector Parameters'))\n",
-    "print(f\"Bias voltage is {bias_voltage} V.\")\n",
+    "print(f\"Bias voltage is {bias_voltage:0.2f} V.\")\n",
     "print(f\"Detector gain is set to {gain}.\")\n",
-    "print(f\"Detector integration time is set to {integration_time} ms\") \n",
-    "print(f\"Using a fixed temperature of {fix_temperature} K\")\n",
+    "print(f\"Detector integration time is set to {integration_time} ms\")\n",
+    "print(f\"Top pnCCD sensor is at temperature of {fix_temperature_top:0.2f} K\")\n",
+    "print(f\"Bottom pnCCD sensor is at temperature of {fix_temperature_bot:0.2f} K\")\n",
     "print(\"Number of dark images to analyze:\", nImages) "
    ]
   },
@@ -286,7 +283,7 @@
    "source": [
     "# Calculators:\n",
     "\n",
-    "# noiseCal is a noise map calculator, which internally also produces a per-pixel mean map, i.e. an offset map:\n",
+    "# noiseCal is a noise map calculator, which internally also produces a per-pixel mean map, i.e., an offset map:\n",
     "noiseCal = xcal.NoiseCalculator(sensorSize, memoryCells, cores=cpuCores, blockSize=blockSize,\n",
     "                                runParallel=run_parallel)"
    ]
@@ -317,8 +314,8 @@
     "\n",
     "for data in reader.readChunks():\n",
     "    data = data.astype(np.float32)\n",
-    "    dx = np.count_nonzero(data, axis=(0, 1))\n",
-    "    data = data[:,:,dx != 0]\n",
+    "    dx = np.count_nonzero(data, axis=(0, 1)) \n",
+    "    data = data[:,:,dx != 0] # Getting rid of empty frames\n",
     "    # Some sequences may have less than 500 frames in them. To find out how many images there are, we will \n",
     "    # temporarily change chunkSize to be the same as whatever number of frames the last chunk of data has:\n",
     "    if data.shape[2] < chunkSize:\n",
@@ -354,13 +351,41 @@
     "print(\"Initial maps are created.\")"
    ]
   },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# pnCCD valid gains are 1, 1/4, 1/16, 1/64, 1/256, 1/340 and 1/512:\n",
+    "valid_gains = {\n",
+    "    \"a\" : 1.0,\n",
+    "    \"b\" : 4.0,\n",
+    "    \"c\" : 16.0,\n",
+    "    \"d\" : 64.0,\n",
+    "    \"e\" : 256.0,\n",
+    "    \"f\" : 340.0,\n",
+    "    \"g\" : 512.0\n",
+    "}\n",
+    "\n",
+    "gain_k = [k for k, v in valid_gains.items() if v == gain][0]\n",
+    "if gain_k == 'a' or gain_k == 'b':\n",
+    "    xrange = (0, 200) # x-axis range for the noise histogram plots\n",
+    "    bins = 2000 # number of bins for Histogram Calculators \n",
+    "    bin_range = [-1000, 1000] # bin range for Histogram Calculators \n",
+    "else:\n",
+    "    xrange = (0, 20)\n",
+    "    bins = 100\n",
+    "    bin_range = [-50, 50] "
+   ]
+  },
   {
    "cell_type": "markdown",
    "metadata": {},
    "source": [
     "### Offset and Noise Maps prior to Common Mode Correction\n",
     "\n",
-    "In the following, the histograms of the pnCCD offset and initial noise, and the heat maps of pnCCD offset, as well as the initial uncorrected noise are plotted."
+    "In the following, the histograms of the pnCCD offset and uncorrected noise maps, and the heat maps of pnCCD offset, as well as the initial uncorrected noise are plotted."
    ]
   },
   {
@@ -375,55 +400,33 @@
    },
    "outputs": [],
    "source": [
-    "#************** OFFSET MAP HISTOGRAM ***********#\n",
-    "ho, co = np.histogram(offsetMap.flatten(), bins=2000) # ho = offset histogram; co = offset bin centers\n",
-    "\n",
-    "do = {'x': co[:-1],\n",
-    "     'y': ho,\n",
-    "     #'y_err': np.sqrt(ho[:]), Markus thinks these errors bars are not correctly calculated!\n",
-    "     'drawstyle': 'bars',\n",
-    "     'color': 'cornflowerblue',\n",
-    "     'label': 'Raw Pedestal (ADU)'\n",
-    "     }\n",
-    "                      \n",
-    "fig = xana.simplePlot(do, figsize='1col', aspect=1, x_label = 'Raw Pedestal (ADU)', y_label=\"Counts\", \n",
-    "                      title = 'Offset Histogram', x_range=(0, np.nanmax(offsetMap)), y_log=True)\n",
-    "\n",
-    "# Calculating mean, median and standard deviation for the above histogram:\n",
-    "mids = 0.5*(co[1:] + co[:-1])\n",
-    "mean = np.average(mids, weights=ho)\n",
-    "variance = np.average((mids - mean)**2, weights=ho)\n",
-    "std = np.sqrt(variance)\n",
-    "\n",
-    "# Table of statistics on raw signal:\n",
-    "t0 = PrettyTable()\n",
-    "t0.title = \"Statistics on Raw Pedestal (Offset)\"\n",
-    "t0.field_names = [\"Mean\", \"Standard Deviation\"]\n",
-    "t0.add_row([\"{:0.3f} (ADU)\".format(mean), \"{:0.3f} (ADU)\".format(std)])\n",
-    "print(t0,'\\n')\n",
-    "\n",
-    "#***** NOISE MAP HISTOGRAM FROM THE OFFSET CORRECTED DATA *******#\n",
-    "hn, cn = np.histogram(noiseMap.flatten(), bins=2000)  # hn = noise histogram; cn = noise bin centers\n",
-    "\n",
-    "dn = {'x': cn[:-1],\n",
-    "     'y': hn,\n",
-    "     #'y_err': np.sqrt(hn[:]),\n",
-    "     'drawstyle': 'bars',\n",
-    "     'color': 'cornflowerblue',\n",
-    "     'label': 'Noise (ADU)'\n",
-    "     }\n",
-    "\n",
-    "fig = xana.simplePlot(dn, figsize='1col', aspect=1, x_label = 'Raw Noise (ADU)', y_label=\"Counts\", \n",
-    "                      title = 'Noise Histogram', y_log=True, x_range=(0, 600))\n",
-    "\n",
+    "#************** HISTOGRAMS *******************#\n",
+    "\n",
+    "fig = plt.figure(figsize=(12,4))\n",
+    "ax = fig.add_subplot(121)\n",
+    "xana.histPlot(ax, offsetMap.flatten(), bins=2000, plot_errors=False)\n",
+    "t = ax.set_xlabel(\"ADU per 2000 bins\")\n",
+    "t = ax.set_ylabel(\"Counts\")\n",
+    "t = ax.set_title(\"Histogram of Offset Map\")\n",
+    "t = ax.set_xlim(6000, 15000)\n",
+    "\n",
+    "fig = plt.figure(figsize=(12,4))\n",
+    "ax = fig.add_subplot(121)\n",
+    "xana.histPlot(ax, noiseMap.flatten(), bins=2000, plot_errors=False)\n",
+    "t = ax.set_xlabel(\"ADU per 2000 bins\")\n",
+    "t = ax.set_ylabel(\"Counts\")\n",
+    "t = ax.set_title(\"Histogram of Uncorrected Noise Map\")\n",
+    "t = ax.set_xlim(xrange)\n",
     "\n",
     "#************** HEAT MAPS *******************#\n",
+    "\n",
     "fig = xana.heatmapPlot(offsetMap[:,:,0], x_label='Column Number', y_label='Row Number',  aspect=1,\n",
-    "                       x_range=(0, pixels_y), y_range=(0, pixels_x), vmin=6000, vmax=14500, lut_label='Offset (ADU)', \n",
-    "                       panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)', title = 'Offset Map')\n",
+    "                       x_range=(0, pixels_y), y_range=(0, pixels_x), vmin=6000, vmax=15000, \n",
+    "                       lut_label='Offset (ADU)', panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)',\n",
+    "                       title = 'Offset Map')\n",
     "\n",
     "fig = xana.heatmapPlot(noiseMap[:,:,0], x_label='Column Number', y_label='Row Number', aspect=1,\n",
-    "                       lut_label='Noise (ADU)', x_range=(0, pixels_y),\n",
+    "                       lut_label='Uncorrected Noise (ADU)', x_range=(0, pixels_y),\n",
     "                       y_range=(0, pixels_x), vmax=2*np.mean(noiseMap), panel_x_label='Row Stat (ADU)', \n",
     "                       panel_y_label='Column Stat (ADU)', title = 'Uncorrected NoiseMap')"
    ]
@@ -454,11 +457,16 @@
     "bad_pixels[(noiseMap < mnnoise-bad_pixel_noise_sigma*stdnoise) |\n",
     "           (noiseMap > mnnoise+bad_pixel_noise_sigma*stdnoise)] = BadPixels.NOISE_OUT_OF_THRESHOLD.value\n",
     "\n",
+    "# Edge pixels are marked as bad pixels:\n",
+    "EDGE_PIXELS = 1000  # 1000 is just a constant chosen to identify the edge pixels that are marked as bad pixels.\n",
+    "bad_pixels[0,:] = EDGE_PIXELS \n",
+    "bad_pixels[1023,:] = EDGE_PIXELS \n",
+    "bad_pixels[:,0] = EDGE_PIXELS \n",
+    "bad_pixels[:,1023] = EDGE_PIXELS  \n",
+    "\n",
     "fig = xana.heatmapPlot(np.log2(bad_pixels[:, :, 0]),\n",
-    "                       x_label='Columns', y_label='Rows',\n",
-    "                       lut_label='Bad Pixel Value (ADU)',\n",
-    "                       x_range=(0, pixels_y),\n",
-    "                       y_range=(0, pixels_x), vmin=0, vmax=32,\n",
+    "                       x_label='Columns', y_label='Rows', lut_label='Bad Pixel Value (ADU)',\n",
+    "                       x_range=(0, pixels_y), y_range=(0, pixels_x), vmin=0, vmax=32,\n",
     "                       panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)', \n",
     "                       title = 'Initial Bad Pixels Map')"
    ]
@@ -474,10 +482,14 @@
     "offsetCorrection = xcal.OffsetCorrection(sensorSize, offsetMap, nCells = memoryCells, cores=cpuCores, gains=None,\n",
     "                                         runParallel=run_parallel, blockSize=blockSize)\n",
     "\n",
-    "\n",
     "# Common Mode Correction:\n",
-    "# In this method, the median of all pixels that are read out at the same time along a row is subtracted from the\n",
-    "# signal in each pixel:\n",
+    "# In this method, the median of all (assuming stride = 1) pixels that are read out at the same time along a column \n",
+    "# is subtracted from the signal in each pixel in that column. Here, the signals in the pixels refer to the value in \n",
+    "# ADU per pixel after offset correction. The minFrac parameter is used to reject those common modes calculated with\n",
+    "# too few pixels without events, i.e., if the ratio of the number of usuable pixels to the total number of pixels \n",
+    "# in a column per quadrant is smaller than the value set for minFrac parameter for a particular column, that column \n",
+    "# will be ignored for calculation of common mode values and that column is not corrected for common mode.\n",
+    "# minFrac = 0 means no column is ignored except those containing nan values (bad pixels):\n",
     "cmCorrection = xcal.CommonModeCorrection(sensorSize,\n",
     "                                         commonModeBlockSize,\n",
     "                                         commonModeAxis, parallel=run_parallel, dType=np.float32, stride=1,\n",
@@ -497,10 +509,10 @@
     "# negative bin to ensure the offset and common mode corrections actually move the signal to zero:\n",
     "\n",
     "# For offset corrected data:\n",
-    "histCalCorrected = xcal.HistogramCalculator(sensorSize, bins=1100, range=[-50, 1050], memoryCells=memoryCells,\n",
+    "histCalCorrected = xcal.HistogramCalculator(sensorSize, bins=bins, range=bin_range, memoryCells=memoryCells,\n",
     "                                            cores=cpuCores, gains=None, blockSize=blockSize)\n",
     "# For common mode corrected data:\n",
-    "histCalCMCorrected = xcal.HistogramCalculator(sensorSize, bins=1100, range=[-50, 1050], memoryCells=memoryCells,\n",
+    "histCalCMCorrected = xcal.HistogramCalculator(sensorSize, bins=bins, range=bin_range, memoryCells=memoryCells,\n",
     "                                              cores=cpuCores, gains=None, blockSize=blockSize)"
    ]
   },
@@ -510,7 +522,7 @@
    "source": [
     "### Second Iteration\n",
     "\n",
-    "During the second iteration, the data are offset and common mode corrected to produce an offset-and-common-mode-corrected noise map. The common mode correction is calculated by subtracting out the median of all pixels that are read out at the same time along a row."
+    "During the second iteration, the data are offset and common mode corrected to produce an offset-and-common-mode-corrected noise map. The common mode correction is calculated by subtracting out the median of all pixels that are read out at the same time along a column (assuming stride = 1, see above code)."
    ]
   },
   {
@@ -602,22 +614,22 @@
     "     'y': hCM,\n",
     "     'drawstyle': 'steps-post',\n",
     "     'color': 'red',\n",
-    "     'label': 'Common Mode Corrected Signal'\n",
+    "     'label': 'Offset and Common Mode Corrected Signal'\n",
     "     }]\n",
     "      \n",
-    "fig = xana.simplePlot(do, figsize='2col', aspect=1, x_label = 'Corrected Pedestal (ADU)', y_label=\"Counts\", \n",
-    "                      x_range = (-50, 1050), y_log=True, legend='top-right-frame-1col', \n",
-    "                      title = 'Corrected Pedestal - 2nd Iteration') \n",
+    "fig = xana.simplePlot(do, figsize='2col', aspect=1, x_label = 'ADU', y_label=\"Counts\", \n",
+    "                      x_range = bin_range, y_log=True, legend='top-right-frame-1col', \n",
+    "                      title = 'Dark Pedestal After Offset and Common Mode Corrections - 2nd Iteration') \n",
     "\n",
     "t0 = PrettyTable()\n",
     "t0.title = \"Comparison of the First Round of Corrections - Bad Pixels Not Excluded\"\n",
-    "t0.field_names = [\"After Offset Correction\", \"After Common Mode Correction\"]\n",
-    "t0.add_row([\"Mean: {:0.3f} (ADU)\".format(np.mean(offset_corr_data)), \"Mean: {:0.3f} (ADU)\"\n",
+    "t0.field_names = [\"Dark Pedestal After Offset Correction\", \"Dark Pedestal After Offset and Common Mode Corrections\"]\n",
+    "t0.add_row([\"Mean: {:0.3f} ADU\".format(np.mean(offset_corr_data)), \"Mean: {:0.3f} ADU\"\n",
     "            .format(np.mean(data))])\n",
-    "t0.add_row([\"Median: {:0.3f} (ADU)\".format(np.median(offset_corr_data)), \"Median: {:0.3f} (ADU)\"\n",
+    "t0.add_row([\"Median: {:0.3f} ADU\".format(np.median(offset_corr_data)), \"Median: {:0.3f} ADU\"\n",
     "            .format(np.median(data))])\n",
-    "t0.add_row([\"Standard Deviation: {:0.3f} (ADU)\".format(np.std(offset_corr_data)), \n",
-    "            \"Standard Deviation: {:0.3f} (ADU)\"\n",
+    "t0.add_row([\"Standard Deviation: {:0.3f} ADU\".format(np.std(offset_corr_data)), \n",
+    "            \"Standard Deviation: {:0.3f} ADU\"\n",
     "            .format(np.std(data))])\n",
     "print(t0,'\\n')"
    ]
@@ -626,9 +638,9 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "### Noise Map after Common Mode Correction\n",
+    "### Noise Map after Offset and Common Mode Correction\n",
     "\n",
-    "In the following, the effect of common mode correction on the noise is shown. Finally common mode corrected noise map (noiseMapCM) is displayed and compared to the initial uncorrected noise map."
+    "In the following, the effect of offset and common mode corrections on the noise is shown. Finally, the corrected noise map (noiseMapCM) is displayed."
    ]
   },
   {
@@ -637,33 +649,22 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "#***** NOISE MAP HISTOGRAM FROM THE COMMON MODE CORRECTED DATA *******#\n",
-    "hn, cn = np.histogram(noiseMap.flatten(), bins=2000, range=(0, 600))\n",
-    "hn_CM, cn_CM = np.histogram(noiseMapCM.flatten(), bins=2000, range=(0, 600))\n",
-    "\n",
-    "dn = [{'x': cn[:-1],\n",
-    "       'y': hn,\n",
-    "       'drawstyle': 'steps-post',\n",
-    "       'color': 'blue',\n",
-    "       'label': 'Uncorrected'\n",
-    "       },\n",
-    "      {'x': cn_CM[:-1],\n",
-    "       'y': hn_CM,\n",
-    "       'drawstyle': 'steps-post',  # 'bars',\n",
-    "       'color': 'crimson',  # 'red',#'cornflowerblue',\n",
-    "       'label': 'Common Mode Corrected'\n",
-    "       }]\n",
-    "fig = xana.simplePlot(dn, figsize='1col', aspect=1, x_label='Noise (ADU)', y_label=\"Counts\",\n",
-    "                      x_range=(0, 600), y_log=True, legend='top-center-frame-1col',\n",
-    "                      title='Noise Comparison')\n",
-    "\n",
+    "#***** HISTOGRAM OF CORRECTED NOISE MAP *******#\n",
+    "fig = plt.figure(figsize=(12,4))\n",
+    "ax = fig.add_subplot(122)\n",
+    "xana.histPlot(ax, noiseMapCM.flatten(), bins=2000, plot_errors=False)\n",
+    "t = ax.set_xlabel(\"ADU per 2000 bins\")\n",
+    "t = ax.set_ylabel(\"Counts\")\n",
+    "t = ax.set_title(\"Histogram of the Noise Map After Offset and \\n Common Mode Corrections\")\n",
+    "t = ax.set_xlim(xrange)\n",
     "\n",
     "fig = xana.heatmapPlot(noiseMapCM[:, :, 0],\n",
     "                       x_label='Columns', y_label='Rows',\n",
-    "                       lut_label='Common Mode Corrected Noise (ADU)',\n",
+    "                       lut_label='Corrected Noise (ADU)',\n",
     "                       x_range=(0, pixels_y),\n",
-    "                       y_range=(0, pixels_x), vmax=2*np.mean(noiseMapCM), panel_x_label='Row Stat (ADU)', \n",
-    "                       panel_y_label='Column Stat (ADU)', title='Common Mode Corrected Noise Map')"
+    "                       y_range=(0, pixels_x), vmax=2*np.mean(noiseMap), panel_x_label='Row Stat (ADU)', \n",
+    "                       panel_y_label='Column Stat (ADU)', \n",
+    "                       title='Noise Map After Offset and Common Mode Corrections')"
    ]
   },
   {
@@ -675,8 +676,7 @@
     "# Resetting the calculators:\n",
     "noiseCal.reset() # resetting noise calculator\n",
     "histCalCorrected.reset() # resetting histogram calculator\n",
-    "histCalCMCorrected.reset() # resetting histogram calculator\n",
-    "cmCorrection.reset()"
+    "histCalCMCorrected.reset() # resetting histogram calculator"
    ]
   },
   {
@@ -702,15 +702,7 @@
     "mnnoise = np.nanmedian(noiseMapCM)\n",
     "stdnoise = np.nanstd(noiseMapCM)\n",
     "bad_pixels[(noiseMapCM < mnnoise-bad_pixel_noise_sigma*stdnoise) |\n",
-    "           (noiseMapCM > mnnoise+bad_pixel_noise_sigma*stdnoise)] = BadPixels.NOISE_OUT_OF_THRESHOLD.value\n",
-    "\n",
-    "fig = xana.heatmapPlot(np.log2(bad_pixels[:, :, 0]),\n",
-    "                       x_label='Columns', y_label='Rows',\n",
-    "                       lut_label='Bad Pixel Value (ADU)',\n",
-    "                       x_range=(0, pixels_y),\n",
-    "                       y_range=(0, pixels_x), vmin=0, vmax=32,\n",
-    "                       panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)', \n",
-    "                       title = 'Second Bad Pixels Map')"
+    "           (noiseMapCM > mnnoise+bad_pixel_noise_sigma*stdnoise)] = BadPixels.NOISE_OUT_OF_THRESHOLD.value"
    ]
   },
   {
@@ -728,8 +720,10 @@
    },
    "outputs": [],
    "source": [
+    "# Each pnCCD sensor has 22 rows and 60 columns cut in the middle of it out by a laser:\n",
+    "\n",
     "hole_mask = np.zeros(bad_pixels.shape, np.uint32) \n",
-    "hole_mask[483:539,477:543,:] = BadPixels.NON_SENSITIVE.value\n",
+    "hole_mask[489:533,481:541,:] = BadPixels.NON_SENSITIVE.value\n",
     "\n",
     "# Assigning this masked area as bad pixels:\n",
     "bad_pixels = np.bitwise_or(bad_pixels, hole_mask)\n",
@@ -748,7 +742,7 @@
    "source": [
     "### Third Iteration\n",
     "\n",
-    "During the third iteration, the last bad pixel map is applied to the data. Bad pixels are masked. Possible cosmic ray events are also masked. Offset and common mode corrections are applied once again to the data, which now have bad pixdels excluded, to produce a newly corrected noise map."
+    "During the third iteration, the last bad pixel map is applied to the data. Bad pixels are masked. Possible cosmic ray events are also masked. Offset and common mode corrections are applied once again to the data, which now have bad pixdels and possible cosmic events excluded, to produce a newly corrected noise map."
    ]
   },
   {
@@ -761,6 +755,19 @@
     "noiseCal.setBadPixelMask(bad_pixels != 0) # setting bad pixels map for the noise calculator"
    ]
   },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "cmCorrection = xcal.CommonModeCorrection(sensorSize,\n",
+    "                                         commonModeBlockSize,\n",
+    "                                         commonModeAxis, parallel=run_parallel, dType=np.float32, stride=1,\n",
+    "                                         noiseMap=noiseMapCM.astype(np.float32), minFrac=0)\n",
+    "cmCorrection.debug()"
+   ]
+  },
   {
    "cell_type": "code",
    "execution_count": null,
@@ -794,8 +801,8 @@
     "    cellTable=np.zeros(data_copy.shape[2], np.int32)\n",
     "    data_copy = cmCorrection.correct(data_copy.astype(np.float32), cellTable=cellTable)\n",
     "    data[data_copy > event_threshold] = np.nan # discarding events caused by cosmic rays\n",
-    "    data = np.ma.MaskedArray(data, np.isnan(data), fill_value=0) # masking cosmics, default fill_value = 1e+20 \n",
-    "\n",
+    "    #data = np.ma.MaskedArray(data, np.isnan(data), fill_value=np.nan) # masking cosmics,default fill_value = 1e+20\n",
+    "    \n",
     "    data -= offsetMap.data # Offset correction\n",
     "    offset_corr_data2 = copy.copy(data) # I am copying this so that I can have access to it in the table below\n",
     "    histCalCorrected.fill(data)\n",
@@ -815,7 +822,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "noiseMapCM_2nd = noiseCal.get().filled(0) # the masked pixels are filled with 0\n",
+    "noiseMapCM_2nd = noiseCal.get().filled(np.nan) # the masked pixels are filled with nans\n",
     "ho2, eo2, co2, so2 = histCalCorrected.get()\n",
     "hCM2, eCM2, cCM2, sCM2 = histCalCMCorrected.get()"
    ]
@@ -839,41 +846,42 @@
     "     'y': ho_second_trial,\n",
     "     'drawstyle': 'steps-post',\n",
     "     'color': 'blue',\n",
-    "     'label': 'Offset Correction, Bad Pixels Included - 2nd Trial'\n",
+    "     'label': 'Offset Corrected Signal (BP Incl.)'\n",
     "     },\n",
     "    {'x': cCM_second_trial,\n",
     "     'y': hCM_second_trial,\n",
     "     'drawstyle': 'steps-post',\n",
     "     'color': 'red',\n",
     "     'ecolor': 'crimson',\n",
-    "     'label': 'Common Mode Correction, Bad Pixels Included - 2nd Trial' \n",
+    "     'label': 'Offset and Common Mode Corrected Signal (BP Incl.)' \n",
     "     },\n",
     "    {'x': co2,\n",
     "     'y': ho2,\n",
     "     'drawstyle': 'steps-post',\n",
     "     'color': 'black',\n",
-    "     'label': 'Offset Correction, Bad Pixels Excluded - 3rd Trial'\n",
+    "     'label': 'Offset Corrected Signal (BP Excl.)'\n",
     "     },\n",
     "    {'x': cCM2,\n",
     "     'y': hCM2,\n",
     "     'drawstyle': 'steps-post',\n",
     "     'color': 'orange',\n",
-    "     'label': 'Common Mode Correction, Bad Pixels Excluded - 3rd Trial'\n",
+    "     'label': 'Offset and Common Mode Corrected Signal (BP Excl.)'\n",
     "     }]\n",
     "\n",
-    "fig = xana.simplePlot(do_Final, figsize='2col', aspect=1, x_label = 'Corrected Signal (ADU)', \n",
-    "                      y_label=\"Counts (Logarithmic Scale)\", y_log=True, x_range = (0, 1100),\n",
+    "fig = xana.simplePlot(do_Final, figsize='2col', aspect=1, x_label = 'ADU', \n",
+    "                      y_label=\"Counts (logarithmic scale)\", y_log=True, x_range = bin_range, \n",
+    "                      y_range = (0.02, 1e8),\n",
     "                      legend='top-right-frame-1col', title = 'Comparison of Corrections')\n",
     "\n",
     "t0 = PrettyTable()\n",
     "t0.title = \"Comparison of the Second Round of Corrections - Bad Pixels Excluded\"\n",
-    "t0.field_names = [\"After Offset Correction\", \"After Common Mode Correction\"]\n",
-    "t0.add_row([\"Mean: {:0.3f} (ADU)\".format(np.nanmean(offset_corr_data2)), \"Mean: {:0.3f} (ADU)\"\n",
+    "t0.field_names = [\"Dark Pedestal After Offset Correction\", \"Dark Pedestal After Offset and Common Mode Corrections\"]\n",
+    "t0.add_row([\"Mean: {:0.3f} ADU\".format(np.nanmean(offset_corr_data2)), \"Mean: {:0.3f} ADU\"\n",
     "            .format(np.nanmean(data))])\n",
-    "t0.add_row([\"Median: {:0.3f} (ADU)\".format(np.nanmedian(offset_corr_data2)), \"Median: {:0.3f} (ADU)\"\n",
+    "t0.add_row([\"Median: {:0.3f} ADU\".format(np.nanmedian(offset_corr_data2)), \"Median: {:0.3f} ADU\"\n",
     "            .format(np.nanmedian(data))])\n",
-    "t0.add_row([\"Standard Deviation: {:0.3f} (ADU)\".format(np.nanstd(offset_corr_data2)), \n",
-    "            \"Standard Deviation: {:0.3f} (ADU)\".format(np.nanstd(data))])\n",
+    "t0.add_row([\"Standard Deviation: {:0.3f} ADU\".format(np.nanstd(offset_corr_data2)), \n",
+    "            \"Standard Deviation: {:0.3f} ADU\".format(np.nanstd(data))])\n",
     "print(t0,'\\n')"
    ]
   },
@@ -892,35 +900,19 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "#*****NOISE MAP HISTOGRAM FROM THE COMMON MODE CORRECTED DATA*******#\n",
-    "hn_CM2, cn_CM2 = np.histogram(noiseMapCM_2nd.flatten(), bins=2000, range=(0, 600))\n",
-    "\n",
-    "dn2 = [{'x': cn[:-1],\n",
-    "     'y': hn,\n",
-    "     'drawstyle': 'steps-post',\n",
-    "     'color': 'blue',\n",
-    "     'label': 'Uncorrected'\n",
-    "     },\n",
-    "    {'x': cn_CM[:-1],\n",
-    "     'y': hn_CM,\n",
-    "     'drawstyle': 'steps-post',\n",
-    "     'color': 'red',\n",
-    "     'label': 'Common Mode Corrected prior to Bad Pixels Exclusion'\n",
-    "     },\n",
-    "    {'x': cn_CM2[:-1],\n",
-    "     'y': hn_CM2,\n",
-    "     'drawstyle': 'steps-post',\n",
-    "     'color': 'black',\n",
-    "     'label': 'Common Mode Corrected after Bad Pixels Exclusion'\n",
-    "     }]\n",
-    "\n",
-    "fig = xana.simplePlot(dn2, figsize='1col', aspect = 1, x_label = 'Noise (ADU)', y_label=\"Counts\", y_log=True, \n",
-    "                      legend='top-right-frame-1col', \n",
-    "                      title = 'Final Noise Comparison')\n",
+    "#***** HISTOGRAMS OF NOISE MAPS *******#\n",
+    "fig = plt.figure(figsize=(12,4))\n",
+    "ax = fig.add_subplot(121)\n",
+    "xana.histPlot(ax, noiseMapCM_2nd.flatten(), bins=2000, plot_errors=False)\n",
+    "t = ax.set_xlabel(\"ADU per 2000 bins\")\n",
+    "t = ax.set_ylabel(\"Counts\")\n",
+    "t = ax.set_title(\"Histogram of the Noise Map After Offset and Common Mode \\n Corrections and exclusion of Bad Pixels\")\n",
+    "t = ax.set_xlim(xrange)\n",
     "\n",
     "fig = xana.heatmapPlot(noiseMapCM_2nd[:,:,0], aspect=1, x_label='Column Number', y_label='Row Number',\n",
-    "                       lut_label='Noise (ADU)', x_range=(0, pixels_y), y_range=(0, pixels_x),\n",
-    "                       title = 'Final Common Mode Corrected Noise\\n (Bad Pixels Excluded)', \n",
+    "                       lut_label='Final Corrected Noise (ADU)', x_range=(0, pixels_y), y_range=(0, pixels_x),\n",
+    "                       vmax=2*np.mean(noiseMap),\n",
+    "                       title = 'Final Offset and Common Mode Corrected Noise Map \\n (Bad Pixels Excluded)', \n",
     "                       panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)')"
    ]
   },
@@ -971,7 +963,7 @@
     "num_bad_pixels = np.count_nonzero(bad_pixels)\n",
     "num_all_pixels = pixels_x*pixels_y\n",
     "percentage_bad_pixels = num_bad_pixels*100/num_all_pixels\n",
-    "print(\"Number of bad pixels: {:0.0f}, i.e. {:0.2f}% of all pixels\".format(num_bad_pixels, percentage_bad_pixels))"
+    "print(\"Number of bad pixels: {:0.0f}, i.e., {:0.2f}% of all pixels\".format(num_bad_pixels, percentage_bad_pixels))"
    ]
   },
   {
@@ -1009,7 +1001,7 @@
     "    condition = Conditions.Dark.CCD(bias_voltage=bias_voltage,\n",
     "                                    integration_time=integration_time,\n",
     "                                    gain_setting=gain,\n",
-    "                                    temperature=fix_temperature,\n",
+    "                                    temperature=fix_temperature_top,\n",
     "                                    pixels_x=pixels_x,\n",
     "                                    pixels_y=pixels_y)\n",
     "\n",
@@ -1028,10 +1020,17 @@
     "        print(f\"Calibration constant {const_name} is stored to {out_folder}.\\n\")\n",
     "\n",
     "print(\"Constants parameter conditions are:\\n\")\n",
-    "print(f\"• bias_voltage: {bias_voltage}\\n• integration_time: {integration_time}\\n\"\n",
-    "      f\"• gain_setting: {gain}\\n• temperature: {fix_temperature}\\n\"\n",
+    "print(f\"• bias_voltage: {bias_voltage}\\n• gain_setting: {gain}\\n\"\n",
+    "      f\"• top_temperature: {fix_temperature_top}\\n• integration_time: {integration_time}\\n\"\n",
     "      f\"• creation_time: {md.calibration_constant_version.begin_at if md is not None else creation_time}\\n\")"
    ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
   }
  ],
  "metadata": {
diff --git a/notebooks/pnCCD/Characterize_pnCCD_Gain.ipynb b/notebooks/pnCCD/Characterize_pnCCD_Gain.ipynb
index 6cb9c9ef1..b88e2fdf9 100644
--- a/notebooks/pnCCD/Characterize_pnCCD_Gain.ipynb
+++ b/notebooks/pnCCD/Characterize_pnCCD_Gain.ipynb
@@ -6,9 +6,9 @@
    "source": [
     "# pnCCD Gain Characterization #\n",
     "\n",
-    "Authors: DET Group, modified by Kiana Setoodehnia on March 2020 - Version 2.0\n",
+    "Authors: DET Group, modified by Kiana Setoodehnia on December 2020 - Version 4.0\n",
     "\n",
-    "The following notebook provides gain characterization for the pnCCD. It relies on data which are previously  corrected using the Meta Data Catalog web service interface or by running the Correct_pnCCD_NBC.ipynb notebook. The corrections which are applied by the web service or the aforementioned notebook are as follows:\n",
+    "The following notebook provides gain characterization for the pnCCD. It relies on data which are previously  corrected using the Meta Data Catalog web service interface or by running the Correct_pnCCD_NBC.ipynb notebook. Prior to running this notebook, the corrections which should be applied by the web service or the aforementioned notebook are as follows:\n",
     "\n",
     "- offset correction\n",
     "- common mode correction\n",
@@ -27,21 +27,24 @@
    "outputs": [],
    "source": [
     "cluster_profile = \"noDB\" # ipcluster profile to use\n",
-    "in_folder = \"/gpfs/exfel/exp/SQS/201930/p900075/proc\" # input folder\n",
-    "out_folder = '/gpfs/exfel/exp/SQS/201930/p900075/proc' # output folder\n",
-    "run = 365 # which run to read data from\n",
+    "in_folder = \"/gpfs/exfel/exp/SQS/202031/p900166/raw\" # input folder for the raw data\n",
+    "out_folder = '/gpfs/exfel/data/scratch/setoodeh/Test' # output folder\n",
+    "run = 347 # which run to read data from\n",
     "sequences = [-1] # sequences to correct, set to -1 for all, range allowed\n",
     "\n",
+    "db_module = \"pnCCD_M205_M206\"\n",
     "karabo_da = 'PNCCD01' # data aggregators\n",
+    "karabo_da_control = \"PNCCD02\" # file inset for control data\n",
     "karabo_id = \"SQS_NQS_PNCCD1MP\" # karabo prefix of PNCCD devices\n",
     "receiver_id = \"PNCCD_FMT-0\" # inset for receiver devices\n",
     "path_template = 'CORR-R{:04d}-PNCCD01-S{{:05d}}.h5' # the template to use to access data\n",
+    "path_template_ctrl = 'RAW-R{:04d}-{}-S{{:05d}}.h5' # the template to use to access data\n",
     "path_template_seqs = \"{}/r{:04d}/*PNCCD01-S*.h5\"\n",
     "h5path = '/INSTRUMENT/{}/CAL/{}:output/data/' # path to data in the HDF5 file \n",
+    "h5path_ctrl = '/CONTROL/{}/CTRL/TCTRL'\n",
     "\n",
-    "cpuCores = 8\n",
+    "cpuCores = 40 # specifies the number of running cpu cores\n",
     "use_dir_creation_date = True # this is needed to obtain creation time of the run\n",
-    "overwrite = True # keep this as True to not overwrite the output\n",
     "sequences_per_node = 1\n",
     "chunkSize = 100 # number of images to read per chunk\n",
     "run_parallel = True\n",
@@ -53,10 +56,11 @@
     "creation_time = \"\" # To overwrite the measured creation_time. Required Format: YYYY-MM-DD HR:MN:SC.00 e.g. 2019-07-04 11:02:41.00\n",
     "\n",
     "# pnCCD parameters:\n",
-    "fix_temperature = 233.\n",
-    "gain = 1\n",
-    "bias_voltage = 300\n",
-    "integration_time = 70\n",
+    "fix_temperature_top = 0.  # fix temperature of top pnCCD sensor in K, set to 0. to use the value from slow data\n",
+    "fix_temperature_bot = 0.  # fix temperature of bottom pnCCD sensor in K, set to 0. to use the value from slow data\n",
+    "gain = 0.1  # the detector's gain setting. Set to 0, to use the value from slow data\n",
+    "bias_voltage = 0. # the detector's bias voltage. set to 0. to use the value from slow data.\n",
+    "integration_time = 70  # detector's integration time\n",
     "photon_energy = 1.6 # Al fluorescence in keV\n",
     "\n",
     "def balance_sequences(in_folder, run, sequences, sequences_per_node, karabo_da):\n",
@@ -64,24 +68,6 @@
     "    return bs(in_folder, run, sequences, sequences_per_node, karabo_da)"
    ]
   },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# On the singles spectrum (uploaded in the middle of this notebook), the ADU values correspoding to the boundaries\n",
-    "# of the first peak region are used as cti_limit_low and cti_limit_high:\n",
-    "if gain == 1:\n",
-    "    cti_limit_low = 3000 # lower limit of cti\n",
-    "    cti_limit_high = 10000 # higher limit of cti\n",
-    "    max_points = 100000 # maximum data value\n",
-    "elif gain == 64:\n",
-    "    cti_limit_low = 50 \n",
-    "    cti_limit_high = 170 \n",
-    "    max_points = 500000 "
-   ]
-  },
   {
    "cell_type": "code",
    "execution_count": null,
@@ -93,42 +79,41 @@
    },
    "outputs": [],
    "source": [
-    "import numpy as np\n",
-    "import h5py\n",
-    "import matplotlib.pyplot as plt\n",
-    "import iminuit as im\n",
-    "from iminuit import Minuit\n",
-    "from IPython.display import display, Markdown\n",
     "import copy\n",
-    "import glob\n",
-    "import os\n",
-    "from prettytable import PrettyTable\n",
     "import datetime\n",
     "from datetime import timedelta\n",
-    "from mpl_toolkits.axes_grid1 import ImageGrid,  AxesGrid\n",
-    "from functools import partial\n",
-    "import matplotlib\n",
+    "import glob\n",
+    "import os\n",
+    "import traceback\n",
     "import warnings\n",
     "warnings.filterwarnings('ignore')\n",
     "\n",
-    "import XFELDetAna.xfelprofiler as xprof\n",
+    "from functools import partial\n",
+    "import h5py\n",
+    "import iminuit as im\n",
+    "from iminuit import Minuit\n",
+    "from IPython.display import display, Markdown\n",
+    "import matplotlib\n",
+    "%matplotlib inline\n",
+    "import matplotlib.pyplot as plt\n",
+    "from mpl_toolkits.axes_grid1 import ImageGrid,  AxesGrid\n",
+    "import numpy as np\n",
+    "from prettytable import PrettyTable\n",
     "\n",
+    "from cal_tools.pnccdlib import extract_slow_data\n",
+    "from cal_tools.tools import get_dir_creation_date, save_const_to_h5, send_to_db\n",
+    "from iCalibrationDB import (Conditions, ConstantMetaData,\n",
+    "                            Constants, Detectors, Versions)\n",
+    "from iCalibrationDB.detectors import DetectorTypes\n",
+    "import XFELDetAna.xfelprofiler as xprof\n",
     "profiler = xprof.Profiler()\n",
     "profiler.disable()\n",
     "from XFELDetAna.util import env\n",
     "env.iprofile = cluster_profile\n",
-    "\n",
     "from XFELDetAna import xfelpycaltools as xcal\n",
     "from XFELDetAna import xfelpyanatools as xana\n",
-    "from XFELDetAna.detectors.fastccd import readerh5 as fastccdreaderh5\n",
     "from XFELDetAna.plotting.util import prettyPlotting\n",
     "prettyPlotting=True\n",
-    "from XFELDetAna.xfelreaders import ChunkReader\n",
-    "from iCalibrationDB import ConstantMetaData, Constants, Conditions, Detectors, Versions\n",
-    "from iCalibrationDB.detectors import DetectorTypes\n",
-    "from cal_tools.tools import get_dir_creation_date, save_const_to_h5\n",
-    "\n",
-    "%matplotlib inline\n",
     "\n",
     "if sequences[0] == -1:\n",
     "    sequences = None"
@@ -157,12 +142,16 @@
     "print(file_loc)\n",
     "\n",
     "# Paths to the data:\n",
-    "ped_dir = \"{}/r{:04d}\".format(in_folder, run)\n",
+    "ped_dir = \"{}/r{:04d}\".format(out_folder, run)\n",
+    "ped_dir_ctrl = \"{}/r{:04d}\".format(in_folder, run)\n",
     "fp_name = path_template.format(run, karabo_da)\n",
     "fp_path = '{}/{}'.format(ped_dir, fp_name)\n",
     "h5path = h5path.format(karabo_id, receiver_id)\n",
     "print(\"HDF5 path to data: {}\\n\".format(h5path))\n",
     "\n",
+    "# Output Folder Creation:\n",
+    "os.makedirs(out_folder, exist_ok=True)\n",
+    "\n",
     "# Run's creation time:\n",
     "if creation_time:\n",
     "    try:\n",
@@ -171,7 +160,7 @@
     "        print(f\"creation_time value error: {e}.\" \n",
     "               \"Use same format as YYYY-MM-DD HR:MN:SC.ms e.g. 2019-07-04 11:02:41.00/n\")\n",
     "        creation_time = None\n",
-    "        print(\"Given creation time wont be used.\")\n",
+    "        print(\"Given creation time will not be used.\")\n",
     "else:\n",
     "    creation_time = None\n",
     "\n",
@@ -198,7 +187,7 @@
     "            total_sequences += 1\n",
     "            fsequences.append(seq)\n",
     "\n",
-    "sequences = fsequences    \n",
+    "sequences = fsequences\n",
     "print(f\"This run has a total number of {total_sequences} sequences.\\n\")"
    ]
   },
@@ -219,6 +208,89 @@
     "    print(file_list[i])"
    ]
   },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "display(Markdown('### Detector Parameters'))\n",
+    "\n",
+    "# extract slow data\n",
+    "if karabo_da_control:\n",
+    "    ctrl_fname = os.path.join(ped_dir_ctrl, path_template_ctrl.format(run, karabo_da_control)).format(sequences[0])\n",
+    "    ctrl_path = h5path_ctrl.format(karabo_id)\n",
+    "    mdl_ctrl_path = f\"/CONTROL/{karabo_id}/MDL/\"\n",
+    "\n",
+    "    (bias_voltage, gain,\n",
+    "     fix_temperature_top,\n",
+    "     fix_temperature_bot) = extract_slow_data(karabo_id, karabo_da_control, ctrl_fname, ctrl_path,\n",
+    "                                              mdl_ctrl_path, bias_voltage, gain,\n",
+    "                                              fix_temperature_top, fix_temperature_bot)\n",
+    "\n",
+    "print(f\"Bias voltage is {bias_voltage:0.2f} V.\")\n",
+    "print(f\"Detector gain is set to {int(gain)}.\")\n",
+    "print(f\"Top pnCCD sensor is at temperature of {fix_temperature_top:0.2f} K\")\n",
+    "print(f\"Bottom pnCCD sensor is at temperature of {fix_temperature_bot:0.2f} K\")\n",
+    "print(f\"Photon energy is {photon_energy} keV\") "
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# The ADU values correspoding to the boundaries of the first peak region are used as cti_limit_low and \n",
+    "# cti_limit_high:\n",
+    "\n",
+    "valid_gains = {\n",
+    "    \"a\" : 1.0,\n",
+    "    \"b\" : 4.0,\n",
+    "    \"c\" : 16.0,\n",
+    "    \"d\" : 64.0,\n",
+    "    \"e\" : 256.0,\n",
+    "    \"f\" : 340.0,\n",
+    "    \"g\" : 512.0\n",
+    "}\n",
+    "\n",
+    "gain_k = [k for k, v in valid_gains.items() if v == gain][0]\n",
+    "if gain_k == 'a':\n",
+    "    cti_limit_low = 300 # lower limit of cti\n",
+    "    cti_limit_high = 8200 # higher limit of cti\n",
+    "    max_points = 100000 # maximum data value\n",
+    "elif gain_k == 'b':\n",
+    "    cti_limit_low = 500 \n",
+    "    cti_limit_high = 2000 \n",
+    "    max_points = 100000 \n",
+    "elif gain_k == 'c':\n",
+    "    cti_limit_low = 100 \n",
+    "    cti_limit_high = 500 \n",
+    "    max_points = 100000 \n",
+    "elif gain_k == 'd':\n",
+    "    if bias_voltage <= 400.:\n",
+    "        cti_limit_low = 50 \n",
+    "        cti_limit_high = 150\n",
+    "        max_points = 20000  # ccoords.shape for each quadrant?\n",
+    "    elif bias_voltage > 400.:\n",
+    "        cti_limit_low = 50 \n",
+    "        cti_limit_high = 120\n",
+    "        max_points = 100000  \n",
+    "elif gain_k == 'e':\n",
+    "    if bias_voltage <= 400.:\n",
+    "        cti_limit_low = 10 \n",
+    "        cti_limit_high = 150 \n",
+    "        max_points = 8000  \n",
+    "    elif bias_voltage > 400.:\n",
+    "        cti_limit_low = 13 \n",
+    "        cti_limit_high = 45 \n",
+    "        max_points = 100000              \n",
+    "else:\n",
+    "    cti_limit_low = 13 \n",
+    "    cti_limit_high = 200 \n",
+    "    max_points = 100000 "
+   ]
+  },
   {
    "cell_type": "code",
    "execution_count": null,
@@ -238,19 +310,6 @@
     "memoryCells = 1 # pnCCD has 1 memory cell"
    ]
   },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# Output Folder Creation:\n",
-    "out_folder = \"{}/r{:04d}\".format(out_folder, run)\n",
-    "os.makedirs(out_folder, exist_ok=True)\n",
-    "if not overwrite:\n",
-    "    raise AttributeError(\"Output path exists! Exiting\")    "
-   ]
-  },
   {
    "cell_type": "markdown",
    "metadata": {},
@@ -325,6 +384,29 @@
     "    return r"
    ]
   },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "The following construction of the 4 ccd quadrants are based on a right-handed axis which increase towards up (y), left (x) and into the page (z). We take the z-axis as beam axis, therefore, the image is constructed as if you are looking with the beam (you see what beam sees). The zero of the axis is on the bottom right corner as shown below:\n",
+    "\n",
+    "      ---------- ^ y\n",
+    "      | LR | UR  |\n",
+    "      ---------- |\n",
+    "      | LL | UL  |\n",
+    "    x <----------0  (beam going into the page)\n",
+    "    \n",
+    "To divide the array of shape (1024, 1024) into 4 arrays of shapes (512, 512), we take python's rule of axis = 0 being vertical (along rows of the matrices) and axis = 1 being horizontal (along columns of the matrices). Thus, we have:\n",
+    "\n",
+    "Convension:\n",
+    "quadrant coordinates = (a, b), (c, d), where a and b are the boundaries of the quadrant along axis = 0, and c and d are those along axis = 1:\n",
+    "\n",
+    "    . UL coordinates = (0,512), (0,512)\n",
+    "    . LL coordinates = (0,512), (512, 1024)\n",
+    "    . UR coordinates = (512, 1024), (0,512)\n",
+    "    . LR coordinates = (512,1024), (512,1024)"
+   ]
+  },
   {
    "cell_type": "code",
    "execution_count": null,
@@ -449,7 +531,7 @@
     "            entry['col_coords'].append(yv[asd > 0].flatten())\n",
     "            entry['vals'].append(asd[asd > 0].flatten())\n",
     "        \n",
-    "        break\n",
+    "        #break\n",
     "\n",
     "up_doubles = np.concatenate(up_doubles, axis=0)\n",
     "down_doubles = np.concatenate(down_doubles, axis=0)\n",
@@ -468,7 +550,7 @@
    "source": [
     "### Pattern Statistics ###\n",
     "\n",
-    "Relative occurrences are normalized to non-cluster events"
+    "Relative occurrences are normalized to non-cluster events. This table probably only shows the statistics of the last sequence (not the entire run)."
    ]
   },
   {
@@ -523,13 +605,8 @@
     "ax = fig.add_subplot(111)\n",
     "ax.scatter(np.abs(up_doubles[:,0]), np.abs(up_doubles[:,1]), 0.5, alpha=0.5, color='b', label=\"up doubles\")\n",
     "ax.scatter(np.abs(down_doubles[:,1]), np.abs(down_doubles[:,0]), 0.5, alpha=0.5, color='g', label=\"down doubles\")\n",
-    "if gain == 1:\n",
-    "    ax.semilogx()\n",
-    "    ax.semilogy()\n",
-    "    ax.set_xlim(1000, 50000)\n",
-    "    ax.set_ylim(1000, 50000)\n",
-    "#ax.set_xlabel('Doubles')\n",
-    "#ax.set_ylabel('Doubles')\n",
+    "ax.set_xlabel('Up Doubles')\n",
+    "ax.set_ylabel('Down Doubles')\n",
     "ax.legend()\n",
     "plt.show()"
    ]
@@ -544,13 +621,10 @@
     "ax = fig.add_subplot(111)\n",
     "ax.scatter(np.abs(left_doubles[:,0]), np.abs(left_doubles[:,1]), 0.5, alpha=0.5, color='b', label=\"left doubles\")\n",
     "ax.scatter(np.abs(right_doubles[:,1]), np.abs(right_doubles[:,0]), 0.5, alpha=0.5, color='g', label=\"right doubles\")\n",
-    "if gain == 1:\n",
-    "    ax.semilogx()\n",
-    "    ax.semilogy()\n",
-    "    ax.set_xlim(1000, 50000)\n",
-    "    ax.set_ylim(1000, 50000)   \n",
-    "#ax.set_xlabel('Doubles')\n",
-    "#ax.set_ylabel('Doubles')\n",
+    "ax.semilogx()\n",
+    "ax.semilogy()  \n",
+    "ax.set_xlabel('Left Doubles')\n",
+    "ax.set_ylabel('Right Doubles')\n",
     "ax.legend()\n",
     "plt.show()"
    ]
@@ -678,6 +752,7 @@
     "            ccoords = entry[\"col_coords\"][:max_points]\n",
     "            avals = entry[\"vals\"][:max_points]\n",
     "            co = entry[\"coords\"]\n",
+    "            \n",
     "            idx = (avals > cti_limit_low) & (avals < cti_limit_high) & (ccoords >= limit_cols[0]) & \\\n",
     "                  (ccoords <= limit_cols[1])\n",
     "            xax = rcoords[idx]-co[2]\n",
@@ -707,9 +782,9 @@
     "\n",
     "                pparm = dict(throw_nan=False, pedantic=False, print_level=0)\n",
     "                pparm[\"m\"] = 0\n",
-    "                if gain == 1:\n",
+    "                if gain == 1.0:\n",
     "                    pparm[\"limit_m\"] = (0.9, 1)\n",
-    "                elif gain == 64:\n",
+    "                else:\n",
     "                    pparm[\"limit_m\"] = (0, 1)\n",
     "                pparm[\"b\"] = np.nanmean(zm)\n",
     "\n",
@@ -727,10 +802,8 @@
     "                    mErr[i] = max(-ferr['m']['lower'], ferr['m']['upper'])\n",
     "                    bErr[i] = max(-ferr['b']['lower'], ferr['b']['upper'])\n",
     "\n",
-    "                    # I am commenting out these two lines because they seem to reduce the number of events in each \n",
-    "                    # panel:\n",
-    "                    #zx, uidx = np.unique(zx, return_index=True)\n",
-    "                    #zm = zm[uidx]\n",
+    "                    zx, uidx = np.unique(zx, return_index=True)\n",
+    "                    zm = zm[uidx]\n",
     "\n",
     "                    yt = linFunc(zx, res[\"b\"], res[\"m\"])\n",
     "\n",
@@ -741,7 +814,7 @@
     "                                   xy=(0.2, 0.3), xycoords='axes fraction')\n",
     "                    ax.set_xlabel(\"Column Coordinate\")\n",
     "                    ax.set_ylabel(\"Singles Events (between CTI_low and CTI_high)\")\n",
-    "                    ax.set_title(\"Events are only for One Row\")\n",
+    "                    ax.set_title(\"Events Are Only Shown for One Row\")\n",
     "                    ax.set_xlim(0, 512)\n",
     "                    ax.legend()"
    ]
@@ -770,7 +843,7 @@
     "\n",
     "max_points = -1\n",
     "limit_cols = [0, 512]\n",
-    "counter = 0 # Total number of bad columns (over all quadrants) for which the fit does not converge\n",
+    "counter = 0 # Total number of bad rows (over all quadrants) for which the fit does not converge\n",
     "\n",
     "for key, entry in dstats.items():\n",
     "    \n",
@@ -803,9 +876,9 @@
     "        \n",
     "        pparm = dict(throw_nan=False, pedantic=False, print_level=0)\n",
     "        pparm[\"m\"] = 0\n",
-    "        if gain == 1:\n",
+    "        if gain == 1.0:\n",
     "            pparm[\"limit_m\"] = (0.9, 1)\n",
-    "        elif gain == 64:\n",
+    "        else:\n",
     "            pparm[\"limit_m\"] = (0, 1)\n",
     "        pparm[\"b\"] = np.nanmean(zm)\n",
     "\n",
@@ -830,11 +903,11 @@
     "        \n",
     "        else:\n",
     "            counter += 1 \n",
-    "    \n",
+    "                \n",
     "    cti = ms / bs\n",
     "    ctiErr = np.sqrt((1. / bs * mErr) ** 2 + (ms / bs ** 2 * bErr) ** 2)\n",
     "    \n",
-    "    relGain = bs/np.nanmean(bs) # I am not sure why Steffen was using median instead of mean here!!\n",
+    "    relGain = bs/np.nanmean(bs) \n",
     "    relGainErr = np.sqrt((1. / np.nanmean(bs) * bErr) ** 2 + (bs / np.nanmean(bs) ** 2 * np.std(bs)) ** 2)\n",
     "\n",
     "    entry['cti'] = cti\n",
@@ -869,10 +942,10 @@
     "    cti = entry[\"cti\"]\n",
     "    x = np.arange(cti.size)+co[2]\n",
     "    grid[i].scatter(cti, x, 1., color='k')\n",
-    "    if gain == 1:\n",
-    "        grid[i].set_xlim(1e-4, 1e-3)\n",
-    "    elif gain == 64:\n",
-    "        grid[i].set_xlim(1e-18, 1e-1)\n",
+    "    if gain == 1.0:\n",
+    "        grid[i].set_xlim(1e-5, 1e-2)\n",
+    "    else:\n",
+    "        grid[i].set_xlim(1e-8, 1e-2)\n",
     "    grid[i].semilogx()\n",
     "    grid[i].set_xlabel(\"CTI\")\n",
     "    grid[i].set_ylabel(\"Row Coordinate\")\n",
@@ -904,9 +977,9 @@
     "    cti_err = entry[\"cti_err\"]\n",
     "    x = np.arange(cti_err.size)+co[2]\n",
     "    grid[i].scatter(cti_err, x, 1., color='k')\n",
-    "    if gain == 1:\n",
-    "        grid[i].set_xlim(1e-11, 1e-8)\n",
-    "    elif gain == 64:\n",
+    "    if gain == 1.0:\n",
+    "        grid[i].set_xlim(1e-12, 1e-8)\n",
+    "    else:\n",
     "        grid[i].set_xlim(1e-10, 1e-5)\n",
     "    grid[i].semilogx()\n",
     "    grid[i].set_xlabel(\"Uncertainty in CTI\")\n",
@@ -1004,6 +1077,34 @@
     "    didnt_converge = []"
    ]
   },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "For those rows where the fit did not converge, we will set their gain to 1 and their CTI to the average value of the CTI in that quadrant."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "ais = [0, 2, 1, 3]\n",
+    "ai = 0\n",
+    "for key, entry in dstats.items():\n",
+    "    i = ais[ai]\n",
+    "    cti = entry[\"cti\"]\n",
+    "    idx = np.where(np.isnan(cti))\n",
+    "    for i in idx[0]:\n",
+    "        cti[i] = np.nanmean(cti)\n",
+    "    \n",
+    "    rel_gain = entry[\"rel_gain\"] \n",
+    "    for index, element in enumerate(rel_gain):\n",
+    "        if element == 0:\n",
+    "            rel_gain[index] = 1"
+   ]
+  },
   {
    "cell_type": "code",
    "execution_count": null,
@@ -1012,7 +1113,7 @@
    },
    "outputs": [],
    "source": [
-    "display(Markdown('### CTE, CTI, and Relative Gain Maps'))\n",
+    "# Calculating CTE and gain maps:\n",
     "\n",
     "ctemap = np.zeros((sensorSize[0], sensorSize[1]))\n",
     "ctimap = np.zeros((sensorSize[0], sensorSize[1]))\n",
@@ -1031,49 +1132,53 @@
     "    else:\n",
     "        quadcte = (1-cti[:, None])**np.repeat(np.arange(512, 0, -1)[None, :], 512, axis=0)\n",
     "    \n",
-    "    quadcti = cti[:, None]   \n",
+    "    #quadcti = cti[:, None] \n",
     "    quadgain = rel_gain[:, None]\n",
     "    quadproduct = quadgain*quadcte\n",
     "    \n",
     "    ctemap[co[2]:co[3], co[0]:co[1]] = quadcte\n",
-    "    ctimap[co[2]:co[3], co[0]:co[1]] = quadcti\n",
     "    gainmap[co[2]:co[3], co[0]:co[1]] = quadgain \n",
-    "    productmap[co[2]:co[3], co[0]:co[1]] = quadproduct\n",
-    "      \n",
+    "    productmap[co[2]:co[3], co[0]:co[1]] = quadproduct"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "scrolled": false
+   },
+   "outputs": [],
+   "source": [
+    "display(Markdown('### CTE and Relative Gain Maps'))\n",
+    "\n",
     "fig = xana.heatmapPlot(ctemap, figsize=(8, 8), x_label='Columns', y_label='Rows', \n",
     "                       lut_label='Charge Transfer Efficiency', \n",
-    "                       aspect=1, x_range=(0, pixels_y), y_range=(0, pixels_x), cmap = 'viridis',\n",
+    "                       aspect=1, cmap = 'viridis',x_range=(0, pixels_y), y_range=(0, pixels_x),\n",
     "                       panel_x_label='Along Rows', panel_y_label='Along Columns', \n",
-    "                       title = 'CTE Map for pnCCD (Gain = {})'.format(gain))\n",
+    "                       title = 'CTE Map for pnCCD (Gain = 1/{})'.format(int(gain)))\n",
     "    \n",
-    "fig = xana.heatmapPlot(ctimap, figsize=(8, 8), x_label='Columns', y_label='Rows', \n",
-    "                       lut_label='Charge Transfer Inefficiency',\n",
-    "                       norm=matplotlib.colors.LogNorm(vmin=np.nanmin(ctimap), vmax=np.nanmax(ctimap)),\n",
-    "                       aspect=1, x_range=(0, pixels_y), y_range=(0, pixels_x), \n",
-    "                       panel_x_label='Along Rows', panel_y_label='Along Columns', \n",
-    "                       title = 'CTI Map for pnCCD (Gain = {})'.format(gain))\n",
-    "\n",
     "fig = xana.heatmapPlot(gainmap, figsize=(8, 8), x_label='Columns', y_label='Rows', \n",
     "                       lut_label='Relative Gain', vmin=0.8, vmax=1.2,\n",
     "                       aspect=1, x_range=(0, pixels_y), y_range=(0, pixels_x),\n",
     "                       panel_top_low_lim = 0.9, panel_top_high_lim = 1.1,\n",
+    "                       panel_side_low_lim = 0.9, panel_side_high_lim = 1.1,\n",
     "                       panel_x_label='Along Rows', panel_y_label='Along Columns', \n",
-    "                       title = 'Relativ Gain Map for pnCCD (Gain = {})'.format(gain))\n",
+    "                       title = 'Relative Gain Map for pnCCD (Gain = 1/{})'.format(int(gain)))\n",
     "\n",
     "fig = xana.heatmapPlot(productmap, figsize=(8, 8), x_label='Columns', y_label='Rows', \n",
     "                       lut_label='Relative Gain*CTI', \n",
     "                       aspect=1, x_range=(0, pixels_y), y_range=(0, pixels_x), vmin=0.8, vmax=1.2, \n",
     "                       panel_x_label='Along Rows', panel_y_label='Along Columns', \n",
-    "                       panel_top_low_lim = 0.5, panel_top_high_lim = 1.5, panel_side_low_lim = 0.5, \n",
-    "                       panel_side_high_lim = 1.5, title = 'Relative Gain*CTE Map for pnCCD (Gain = {})'\n",
-    "                       .format(gain))"
+    "                       panel_top_low_lim = 0.9, panel_top_high_lim = 1.1, panel_side_low_lim = 0, \n",
+    "                       panel_side_high_lim = 1.5, title = 'Relative Gain*CTE Map for pnCCD (Gain = 1/{})'\n",
+    "                       .format(int(gain)))"
    ]
   },
   {
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "### Sending the CTI and Relative Gain Maps to the Calibration Database"
+    "### Sending the CTE and Relative Gain Maps to the Calibration Database"
    ]
   },
   {
@@ -1084,26 +1189,26 @@
    "source": [
     "constant_maps = {\n",
     "    'RelativeGain' : gainmap,\n",
-    "    'CTI' : ctimap}\n",
+    "    'CTI' : ctemap} # Instead of CTI map, one should send the CTE map because this is the one that can correct data\n",
     "\n",
+    "md = None\n",
     "for cname in constant_maps.keys():\n",
     "    metadata = ConstantMetaData()\n",
     "    det = Constants.CCD(DetectorTypes.pnCCD)\n",
     "    const = getattr(det, cname)()\n",
     "    const.data = constant_maps[cname].data\n",
-    "    \n",
     "    metadata.calibration_constant = const\n",
     "    \n",
     "    # setting the operating condition\n",
     "    condition = Conditions.Illuminated.CCD(bias_voltage=bias_voltage,\n",
-    "                                           photon_energy=photon_energy,\n",
     "                                           integration_time=integration_time,\n",
+    "                                           photon_energy=photon_energy,\n",
     "                                           gain_setting=gain,\n",
-    "                                           temperature=fix_temperature,\n",
+    "                                           temperature=fix_temperature_top,\n",
     "                                           pixels_x=pixels_x,\n",
     "                                           pixels_y=pixels_y)\n",
     "\n",
-    "    device = Detectors.PnCCD1\n",
+    "    device = getattr(Detectors, db_module)\n",
     "    metadata.detector_condition = condition\n",
     "    \n",
     "    # Specifying the a version for this constant:\n",
@@ -1116,19 +1221,24 @@
     "    \n",
     "    if db_output:\n",
     "        try:\n",
-    "            metadata.send(cal_db_interface, timeout=cal_db_timeout)\n",
-    "            print(f\"Inject {cname} constants from {metadata.calibration_constant_version.begin_at}\")\n",
-    "        except Exception as e:\n",
-    "            print(f\"Error: {e}\")\n",
+    "            md = send_to_db(device, const, condition, file_loc, \n",
+    "                            cal_db_interface, creation_time=creation_time, timeout=cal_db_timeout)\n",
+    "            print(f\"Inject {cname} constants from {metadata.calibration_constant_version.begin_at}\\n\")\n",
+    "        except Exception as e:    \n",
+    "            if \"has already been take\" in str(e):\n",
+    "                print(f\"WARN: {cname} has already been injected with the same parameter conditions.\\n\")\n",
+    "            else:\n",
+    "                # To prevent having big error message out of the pdf report's page.\n",
+    "                print(\"\\n\".join(textwrap.wrap(str(e), 100)))\n",
     "        \n",
     "    if local_output:\n",
-    "        save_const_to_h5(metadata, out_folder)\n",
+    "        md = save_const_to_h5(device, const, condition, const.data, file_loc, creation_time, out_folder)\n",
     "        print(f\"Calibration constant {cname} is stored to {out_folder}.\")\n",
     "        \n",
     "print(\"\\nGenerated constants with conditions:\\n\")\n",
     "print(f\"• bias_voltage: {bias_voltage}\\n• photon_energy: {photon_energy}\\n\"\n",
-    "      f\"• integration_time: {integration_time}\\n• gain_setting: {gain}\\n\"\n",
-    "      f\"• temperature: {fix_temperature}\\n• creation_time: {creation_time}\\n\")"
+    "      f\"• top_temperature: {fix_temperature_top}\\n• top_temperature: {integration_time}\\n\"\n",
+    "      f\"• gain_setting: {gain}\\n• creation_time: {creation_time}\\n\")"
    ]
   },
   {
diff --git a/notebooks/pnCCD/Correct_pnCCD_NBC.ipynb b/notebooks/pnCCD/Correct_pnCCD_NBC.ipynb
index 094bbec2d..420047576 100644
--- a/notebooks/pnCCD/Correct_pnCCD_NBC.ipynb
+++ b/notebooks/pnCCD/Correct_pnCCD_NBC.ipynb
@@ -6,9 +6,9 @@
    "source": [
     "# pnCCD Data Correction #\n",
     "\n",
-    "Authors: DET Group, Modified by Kiana Setoodehnia on March 2020 - Version 2.0\n",
+    "Authors: DET Group, Modified by Kiana Setoodehnia on December 2020 - Version 4.0\n",
     "\n",
-    "The following notebook provides offset, relative gain, common mode, split events, and pattern classification corrections of images acquired with the pnCCD. This notebook *does not* yet correct for charge transfer inefficiency."
+    "The following notebook provides offset, common mode, relative gain, split events and pattern classification corrections of images acquired with the pnCCD. This notebook *does not* yet correct for charge transfer inefficiency."
    ]
   },
   {
@@ -22,9 +22,9 @@
    },
    "outputs": [],
    "source": [
-    "in_folder = \"/gpfs/exfel/exp/SQS/202022/p002720/raw\" # input folder\n",
-    "out_folder = '/gpfs/exfel/data/scratch/setoodeh' # output folder\n",
-    "run = 53 # which run to read data from\n",
+    "in_folder = \"/gpfs/exfel/exp/SQS/202031/p900166/raw\" # input folder\n",
+    "out_folder = '/gpfs/exfel/data/scratch/setoodeh/Test' # output folder\n",
+    "run = 347 # which run to read data from\n",
     "sequences = [-1] # sequences to correct, set to -1 for all, range allowed\n",
     "\n",
     "db_module = \"pnCCD_M205_M206\"\n",
@@ -43,22 +43,22 @@
     "chunk_size_idim = 1 # H5 chunking size of output data\n",
     "cluster_profile = \"noDB\" # ipcluster profile to use\n",
     "\n",
-    "cpuCores = 8\n",
+    "cpuCores = 40\n",
     "commonModeBlockSize = [512, 512] # size of the detector in pixels for common mode calculations\n",
     "commonModeAxis = 0 # axis along which common mode will be calculated, 0 = row, and 1 = column \n",
-    "split_evt_primary_threshold = 5. # primary threshold for split event classification in terms of n sigma noise\n",
+    "split_evt_primary_threshold = 4. # primary threshold for split event classification in terms of n sigma noise\n",
     "split_evt_secondary_threshold = 3. # secondary threshold for split event classification in terms of n sigma noise\n",
-    "split_evt_mip_threshold = 1000. # MIP threshold for event classification\n",
     "sequences_per_node = 1\n",
     "limit_images = 0\n",
     "seq_num = 0  # sequence number for which the last plot at the end of the notebook is plotted\n",
     "\n",
     "# pnCCD parameters:\n",
-    "fix_temperature = 0. # fix temperature in K, set to 0. to use value from slow data.\n",
-    "gain = 0. # the detector's gain setting, It is later read from file and this value is overwritten\n",
+    "fix_temperature_top = 0. # fix temperature for top sensor in K, set to 0. to use value from slow data.\n",
+    "fix_temperature_bot = 0. # fix temperature for bottom senspr in K, set to 0. to use value from slow data.\n",
+    "gain = 0.1 # the detector's gain setting, It is later read from file and this value is overwritten\n",
     "bias_voltage = 0. # the detector's bias voltage. set to 0. to use value from slow data.\n",
-    "integration_time = 70\n",
-    "photon_energy = 1.5 # Al fluorescence in keV\n",
+    "integration_time = 70  # detector's integration time\n",
+    "photon_energy = 1.6 # Al fluorescence in keV\n",
     "\n",
     "cal_db_interface = \"tcp://max-exfl016:8015\" # calibration DB interface to use\n",
     "cal_db_timeout = 300000 # timeout on caldb requests\n",
@@ -68,7 +68,9 @@
     "common_mode = True # Apply common mode correction\n",
     "relgain = True # Apply relative gain correction\n",
     "cti = False # Apply charge transfer inefficiency correction (not implemented, yet)\n",
-    "do_pattern_classification = False # classify split events"
+    "do_pattern_classification = True # classify split events\n",
+    "\n",
+    "saturated_threshold = 32000. # full well capacity in ADU"
    ]
   },
   {
@@ -101,22 +103,32 @@
    },
    "outputs": [],
    "source": [
-    "import warnings\n",
-    "warnings.filterwarnings('ignore')\n",
-    "import h5py\n",
-    "import time\n",
     "import copy\n",
-    "import os\n",
-    "import traceback\n",
-    "import glob\n",
     "import datetime\n",
     "from datetime import timedelta\n",
-    "from prettytable import PrettyTable\n",
-    "import numpy as np\n",
-    "import matplotlib.pyplot as plt\n",
-    "from iminuit import Minuit\n",
+    "import glob\n",
+    "import os\n",
+    "import time\n",
+    "import traceback\n",
+    "from typing import Tuple\n",
+    "import warnings\n",
+    "warnings.filterwarnings('ignore')\n",
+    "\n",
+    "import h5py\n",
     "from IPython.display import display, Markdown\n",
+    "from iminuit import Minuit\n",
+    "import matplotlib.pyplot as plt\n",
+    "%matplotlib inline\n",
+    "import numpy as np\n",
+    "from prettytable import PrettyTable\n",
     "\n",
+    "from cal_tools.pnccdlib import extract_slow_data\n",
+    "from cal_tools.tools import (get_constant_from_db_and_time,\n",
+    "                             get_dir_creation_date,\n",
+    "                             get_random_db_interface)\n",
+    "from iCalibrationDB import (Conditions, ConstantMetaData,\n",
+    "                            Constants, Detectors, Versions)\n",
+    "from iCalibrationDB.detectors import DetectorTypes\n",
     "import XFELDetAna.xfelprofiler as xprof\n",
     "profiler = xprof.Profiler()\n",
     "profiler.disable()\n",
@@ -126,13 +138,7 @@
     "from XFELDetAna import xfelpyanatools as xana\n",
     "from XFELDetAna.plotting.util import prettyPlotting\n",
     "prettyPlotting=True\n",
-    "from XFELDetAna.xfelreaders import ChunkReader\n",
-    "from XFELDetAna.detectors.fastccd import readerh5 as fastccdreaderh5\n",
-    "from iCalibrationDB import ConstantMetaData, Constants, Conditions, Detectors, Versions\n",
-    "from iCalibrationDB.detectors import DetectorTypes\n",
-    "from cal_tools.tools import get_dir_creation_date, get_random_db_interface, get_constant_from_db_and_time\n",
     "\n",
-    "%matplotlib inline\n",
     "\n",
     "if sequences[0] == -1:\n",
     "    sequences = None"
@@ -153,6 +159,7 @@
     "print(f'Calibration database interface selected: {cal_db_interface}')\n",
     "\n",
     "proposal = list(filter(None, in_folder.strip('/').split('/')))[-2]\n",
+    "file_loc =f'Proposal: {proposal}, Run: {run}'\n",
     "print(f'Proposal: {proposal}, Run: {run}')\n",
     "\n",
     "# Paths to the data:\n",
@@ -170,7 +177,7 @@
     "        print(f\"creation_time value error: {e}.\" \n",
     "               \"Use same format as YYYY-MM-DD HR:MN:SC.ms e.g. 2019-07-04 11:02:41.00/n\")\n",
     "        creation_time = None\n",
-    "        print(\"Given creation time wont be used.\")\n",
+    "        print(\"Given creation time will not be used.\")\n",
     "else:\n",
     "    creation_time = None\n",
     "\n",
@@ -220,20 +227,12 @@
     "    ctrl_fname = os.path.join(ped_dir, path_template.format(run, karabo_da_control)).format(sequences[0])\n",
     "    ctrl_path = h5path_ctrl.format(karabo_id)\n",
     "    mdl_ctrl_path = f\"/CONTROL/{karabo_id}/MDL/\"\n",
-    "    try:\n",
-    "        with h5py.File(ctrl_fname, \"r\") as f:\n",
-    "            if bias_voltage == 0.:\n",
-    "                bias_voltage = abs(f[os.path.join(mdl_ctrl_path, \"DAQ_MPOD/u0voltage/value\")][0])\n",
-    "            gain = f[os.path.join(mdl_ctrl_path, \"DAQ_GAIN/pNCCDGain/value\")][0]\n",
-    "            if fix_temperature == 0.:\n",
-    "                fix_temperature = f[os.path.join(ctrl_path, \"inputA/krdg/value\")][0]\n",
-    "    except KeyError:\n",
-    "        print(\"Error !!! during extracting slow data\")\n",
-    "        traceback.print_exc(limit=1)\n",
-    "        print(\"Control file name:\", ctrl_fname)\n",
-    "        print(\"bias voltage control h5path:\", os.path.join(mdl_ctrl_path, \"DAQ_MPOD/u0voltage/value\"))\n",
-    "        print(\"gain control h5path:\", os.path.join(mdl_ctrl_path, \"DAQ_GAIN/pNCCDGain/value\"))\n",
-    "        print(\"fix_temperature control h5path:\", os.path.join(ctrl_path, \"inputA/krdg/value\"))"
+    "\n",
+    "    (bias_voltage, gain,\n",
+    "     fix_temperature_top,\n",
+    "     fix_temperature_bot) = extract_slow_data(karabo_id, karabo_da_control, ctrl_fname, ctrl_path,\n",
+    "                                              mdl_ctrl_path, bias_voltage, gain,\n",
+    "                                              fix_temperature_top, fix_temperature_bot)"
    ]
   },
   {
@@ -245,10 +244,103 @@
     "# Printing the Parameters Read from the Data File:\n",
     "\n",
     "display(Markdown('### Detector Parameters'))\n",
-    "print(\"Bias voltage is {} V.\".format(bias_voltage))\n",
-    "print(\"Detector gain is set to {}.\".format(gain))\n",
-    "print(\"Detector integration time is set to {} ms\".format(integration_time))\n",
-    "print(f\"Using a fixed temperature of {fix_temperature} K\")"
+    "print(f\"Bias voltage is {bias_voltage:0.1f} V.\")\n",
+    "print(f\"Detector gain is set to 1/{int(gain)}.\")\n",
+    "print(f\"Detector integration time is set to {integration_time} ms\")\n",
+    "print(f\"Top pnCCD sensor is at temperature of {fix_temperature_top:0.2f} K\")\n",
+    "print(f\"Bottom pnCCD sensor is at temperature of {fix_temperature_bot:0.2f} K\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "valid_gains = {\n",
+    "    \"a\" : 1.0,\n",
+    "    \"b\" : 4.0,\n",
+    "    \"c\" : 16.0,\n",
+    "    \"d\" : 64.0,\n",
+    "    \"e\" : 256.0,\n",
+    "    \"f\" : 340.0,\n",
+    "    \"g\" : 512.0\n",
+    "}\n",
+    "\n",
+    "gain_k = [k for k, v in valid_gains.items() if v == gain][0]\n",
+    "if gain_k == 'a':\n",
+    "    split_evt_mip_threshold = 1000. # MIP threshold in ADU for event classification (10 times average noise)\n",
+    "\n",
+    "    # Each xcal.HistogramCalculator requires a total number of bins and a binning range. We define these \n",
+    "    # using a dictionary:\n",
+    "\n",
+    "    # For all xcal histograms:\n",
+    "\n",
+    "    Hist_Bin_Dict = {\n",
+    "        \"bins\": 35000, # number of bins \n",
+    "        \"bin_range\": [0, 35000]\n",
+    "    }\n",
+    "\n",
+    "    # For the numpy histograms on the last cell of the notebook:\n",
+    "    Event_Bin_Dict = {\n",
+    "        \"event_bins\": 1000, # number of bins \n",
+    "        \"b_range\": [0, 35000] # bin range \n",
+    "    }\n",
+    "elif gain_k == 'b':\n",
+    "    split_evt_mip_threshold = 270. # 10 times the average noise\n",
+    "    Hist_Bin_Dict = {\n",
+    "        \"bins\": 10000, \n",
+    "        \"bin_range\": [0, 10000]\n",
+    "    }\n",
+    "    Event_Bin_Dict = {\n",
+    "        \"event_bins\": 1000, \n",
+    "        \"b_range\": [0, 10000] \n",
+    "    }\n",
+    "elif gain_k == 'c':\n",
+    "    split_evt_mip_threshold = 110. # 10 times the average noise\n",
+    "    Hist_Bin_Dict = {\n",
+    "        \"bins\": 3000,  \n",
+    "        \"bin_range\": [0, 3000]\n",
+    "    }\n",
+    "    Event_Bin_Dict = {\n",
+    "        \"event_bins\": 1000, \n",
+    "        \"b_range\": [0, 3000] \n",
+    "    }\n",
+    "elif gain_k == 'd':\n",
+    "    split_evt_mip_threshold = 90. # 10 times the average noise\n",
+    "    Hist_Bin_Dict = {\n",
+    "        \"bins\": 1000, \n",
+    "        \"bin_range\": [0, 1000]\n",
+    "    }\n",
+    "    Event_Bin_Dict = {\n",
+    "        \"event_bins\": 1000, \n",
+    "        \"b_range\": [0, 1000] \n",
+    "    }\n",
+    "elif gain_k == 'e':\n",
+    "    split_evt_mip_threshold = 90. # 10 times the average noise\n",
+    "    Hist_Bin_Dict = {\n",
+    "        \"bins\": 500,\n",
+    "        \"bin_range\": [0, 500]\n",
+    "    }\n",
+    "    Event_Bin_Dict = {\n",
+    "        \"event_bins\": 500, \n",
+    "        \"b_range\": [0, 500] \n",
+    "    }\n",
+    "else:\n",
+    "    split_evt_mip_threshold = 90. # 10 times the average noise\n",
+    "    Hist_Bin_Dict = {\n",
+    "        \"bins\": 220, \n",
+    "        \"bin_range\": [0, 220]\n",
+    "    }\n",
+    "    Event_Bin_Dict = {\n",
+    "        \"event_bins\": 220, \n",
+    "        \"b_range\": [0, 220] \n",
+    "    }\n",
+    "    \n",
+    "bins = Hist_Bin_Dict[\"bins\"]\n",
+    "bin_range = Hist_Bin_Dict[\"bin_range\"]\n",
+    "event_bins = Event_Bin_Dict[\"event_bins\"]\n",
+    "b_range = Event_Bin_Dict[\"b_range\"]"
    ]
   },
   {
@@ -294,61 +386,7 @@
    "outputs": [],
    "source": [
     "# Output Folder Creation:\n",
-    "os.makedirs(out_folder, exist_ok=True)\n",
-    "if not overwrite:\n",
-    "    raise AttributeError(\"Output path exists! Exiting\")    "
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# Each xcal.HistogramCalculator requires a total number of bins and a binning range. We define these using a \n",
-    "# dictionary:\n",
-    "\n",
-    "# For all xcal histograms:\n",
-    "if gain == 1:\n",
-    "    Hist_Bin_Dict = {\n",
-    "        \"bins\": 35000, # number of bins \n",
-    "        \"bin_range\": [0, 35000]\n",
-    "    }\n",
-    "\n",
-    "    # For the numpy histograms on the last cell of the notebook:\n",
-    "    Event_Bin_Dict = {\n",
-    "        \"event_bins\": 1000, # number of bins \n",
-    "        \"b_range\": [0, 35000] # bin range \n",
-    "    }\n",
-    "    \n",
-    "#TODO: make it more adaptive for more than only 2 gains [below was for gain==64 only]\n",
-    "else:\n",
-    "    # For all xcal histograms:\n",
-    "    Hist_Bin_Dict = {\n",
-    "        \"bins\": 25000, # number of bins \n",
-    "        \"bin_range\": [0, 25000] \n",
-    "    }\n",
-    "    # For the numpy histograms on the last cell of the notebook:\n",
-    "    Event_Bin_Dict = {\n",
-    "        \"event_bins\": 1000, # number of bins \n",
-    "        \"b_range\": [0, 5000] # bin range \n",
-    "    }\n",
-    "    \n",
-    "bins = Hist_Bin_Dict[\"bins\"]\n",
-    "bin_range = Hist_Bin_Dict[\"bin_range\"]\n",
-    "event_bins = Event_Bin_Dict[\"event_bins\"]\n",
-    "b_range = Event_Bin_Dict[\"b_range\"]\n",
-    "\n",
-    "# On the singles spectrum (uploaded in the middle of this notebook), the ADU values correspoding to the boundaries\n",
-    "# of the first peak region are used as cti_limit_low and cti_limit_high:\n",
-    "\n",
-    "if gain == 1:\n",
-    "    cti_limit_low = 1000 # lower limit of cti\n",
-    "    cti_limit_high = 100000 # higher limit of cti\n",
-    "#TODO: make it more adaptive for more than only 2 gains [below was for gain==64 only\n",
-    "else:\n",
-    "    cti_limit_low = 50\n",
-    "    cti_limit_high = 2000"
+    "os.makedirs(out_folder, exist_ok=True)"
    ]
   },
   {
@@ -364,7 +402,8 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "def get_dark(db_parms, bias_voltage, integration_time, gain, fix_temperature, creation_time, run):\n",
+    "def get_dark(db_parms: Tuple[str, int], bias_voltage: float, gain: float, integration_time: float,\n",
+    "             fix_temperature_top: float, creation_time: str, run: str) -> dict :\n",
     "# This function is to retrieve the dark constants associated with the run of interest:\n",
     "\n",
     "    cal_db_interface, cal_db_timeout = db_parms\n",
@@ -383,11 +422,11 @@
     "        when = {}\n",
     "        \n",
     "        condition = Conditions.Dark.CCD(bias_voltage=bias_voltage,\n",
-    "                            integration_time=integration_time,\n",
-    "                            gain_setting=gain,\n",
-    "                            temperature=fix_temperature,\n",
-    "                            pixels_x=pixels_x,\n",
-    "                            pixels_y=pixels_y)\n",
+    "                                        integration_time=integration_time,\n",
+    "                                        gain_setting=gain,\n",
+    "                                        temperature=fix_temperature_top,\n",
+    "                                        pixels_x=pixels_x,\n",
+    "                                        pixels_y=pixels_y)\n",
     "        \n",
     "        for const in constants.keys():\n",
     "            constants[const], when[const] = \\\n",
@@ -421,22 +460,24 @@
     "\n",
     "db_parms = cal_db_interface, cal_db_timeout\n",
     "\n",
-    "constants = get_dark(db_parms, bias_voltage, integration_time, gain, fix_temperature, creation_time, run)\n",
+    "constants = get_dark(db_parms, bias_voltage, gain, integration_time,\n",
+    "                     fix_temperature_top, creation_time, run)\n",
     "\n",
-    "fig = xana.heatmapPlot(constants[\"Offset\"][:,:,0], x_label='Columns', y_label='Rows', lut_label='Pedestal (ADU)', aspect=1, \n",
+    "fig = xana.heatmapPlot(constants[\"Offset\"][:,:,0], x_label='Columns', y_label='Rows', lut_label='Offset (ADU)', \n",
+    "                       aspect=1, \n",
     "                       x_range=(0, pixels_y), y_range=(0, pixels_x), vmax=16000, \n",
     "                       panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)', \n",
     "                       title = 'Dark Offset Map')\n",
     "\n",
     "fig = xana.heatmapPlot(constants[\"Noise\"][:,:,0], x_label='Columns', y_label='Rows', \n",
-    "                       lut_label='Common Mode Corrected Noise (ADU)', \n",
-    "                       aspect=1, x_range=(0, pixels_y), y_range=(0, pixels_x), # vmin=-50, vmax=70000, \n",
+    "                       lut_label='Corrected Noise (ADU)', \n",
+    "                       aspect=1, x_range=(0, pixels_y), y_range=(0, pixels_x),  \n",
     "                       panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)', \n",
     "                       title = 'Dark Noise Map')\n",
     "\n",
     "fig = xana.heatmapPlot(np.log2(constants[\"BadPixelsDark\"][:,:,0]), x_label='Columns', y_label='Rows', \n",
     "                       lut_label='Bad Pixel Value (ADU)', \n",
-    "                       aspect=1, x_range=(0, pixels_y), y_range=(0, pixels_x), # vmin=-50, vmax=70000, \n",
+    "                       aspect=1, x_range=(0, pixels_y), y_range=(0, pixels_x), \n",
     "                       panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)', \n",
     "                       title = 'Dark Bad Pixels Map')"
    ]
@@ -448,7 +489,7 @@
    "outputs": [],
    "source": [
     "if corr_bools.get('relgain'):\n",
-    "    display(Markdown('We will now retrieve the relative gain map from the calibration database'))\n",
+    "    display(Markdown('We will now retrieve the relative gain map from the calibration database.'))\n",
     "    metadata = ConstantMetaData()\n",
     "    relgain = Constants.CCD(DetectorTypes.pnCCD).RelativeGain()\n",
     "    metadata.calibration_constant = relgain\n",
@@ -456,7 +497,7 @@
     "    condition = Conditions.Illuminated.CCD(bias_voltage=bias_voltage,\n",
     "                                           integration_time=integration_time,\n",
     "                                           gain_setting=gain,\n",
-    "                                           temperature=fix_temperature,\n",
+    "                                           temperature=fix_temperature_top,\n",
     "                                           pixels_x=pixels_x,\n",
     "                                           pixels_y=pixels_y, \n",
     "                                           photon_energy=photon_energy)\n",
@@ -472,12 +513,13 @@
     "    print(f\"Retrieved RelativeGain constant with creation time: {relgain_time}\")\n",
     "\n",
     "    display(Markdown('### Relative Gain Map Retrieval'))\n",
-    "    fig = xana.heatmapPlot(constants[\"RelativeGain\"], figsize=(8, 8), x_label='Columns', y_label='Rows', lut_label='Relative Gain', \n",
+    "    fig = xana.heatmapPlot(constants[\"RelativeGain\"], figsize=(8, 8), x_label='Columns', y_label='Rows', \n",
+    "                           lut_label='Relative Gain', \n",
     "                           aspect=1, x_range=(0, pixels_y), y_range=(0, pixels_x), vmin=0.8, vmax=1.2, \n",
-    "                           panel_x_label='Along Rows', panel_y_label='Along Columns', \n",
+    "                           panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)', \n",
     "                           panel_top_low_lim = 0.5, panel_top_high_lim = 1.5, panel_side_low_lim = 0.5, \n",
     "                           panel_side_high_lim = 1.5, \n",
-    "                           title = f'1st Relative Gain Map for pnCCD (Gain = {gain})')"
+    "                           title = f'Relative Gain Map for pnCCD (Gain = 1/{int(gain)})')"
    ]
   },
   {
@@ -494,7 +536,7 @@
     "                                             commonModeBlockSize,\n",
     "                                             commonModeAxis,\n",
     "                                             parallel=False, dType=np.float32, stride=1,\n",
-    "                                             noiseMap=constants[\"Noise\"].astype(np.float32), minFrac=0)\n",
+    "                                             noiseMap=constants[\"Noise\"].astype(np.float32), minFrac=0.25)\n",
     "    cmCorrection.debug()\n",
     "\n",
     "if corr_bools.get('pattern_class'):\n",
@@ -574,7 +616,7 @@
     "    histCalCommonModeCor.debug()\n",
     "    \n",
     "if corr_bools.get('pattern_class'):\n",
-    "# Will contain split events pattern data:\n",
+    "    # Will contain split events pattern data:\n",
     "    histCalPcorr = xcal.HistogramCalculator(sensorSize, \n",
     "                                            bins=bins, \n",
     "                                            range=bin_range,\n",
@@ -582,7 +624,7 @@
     "                                            cores=cpuCores,\n",
     "                                            blockSize=blockSize)\n",
     "    histCalPcorr.debug()\n",
-    "# Will contain singles events data:\n",
+    "    # Will contain singles events data:\n",
     "    histCalPcorrS = xcal.HistogramCalculator(sensorSize, \n",
     "                                             bins=bins, \n",
     "                                             range=bin_range,\n",
@@ -619,7 +661,7 @@
    },
    "outputs": [],
    "source": [
-    "def copy_and_sanitize_non_cal_data(infile, outfile, h5base):\n",
+    "def copy_and_sanitize_non_cal_data(infile: str, outfile: str, h5base: str):\n",
     "    '''This function reads the .h5 data and writes the corrected .h5 data.'''\n",
     "    if h5base.startswith(\"/\"):\n",
     "        h5base = h5base[1:]\n",
@@ -645,21 +687,17 @@
    "outputs": [],
    "source": [
     "# Data corrections and event classifications happen here. Also, the corrected data are written to datasets:\n",
-    "uncor_mean_im = None\n",
-    "uncor_single_im = None\n",
-    "offset_mean_im = None\n",
-    "offset_single_im = None\n",
-    "cm_mean_im = None\n",
-    "cm_single_im = None\n",
-    "gain_mean_im = None\n",
-    "mean_im_cc = None\n",
-    "single_im_cc = None\n",
+    "\n",
+    "# Initialize 5 numpy array of zeros with the shape of (1024, 1024, 0)\n",
+    "uncor, off_cor, g_cor, cm_cor, final_cor = np.zeros((5, 1024, 1024, 0), dtype=np.float32)\n",
     "\n",
     "offsetMap = np.squeeze(constants[\"Offset\"])\n",
     "noiseMap = np.squeeze(constants[\"Noise\"])\n",
     "badPixelMap = np.squeeze(constants[\"BadPixelsDark\"])\n",
+    "\n",
     "if corr_bools.get('relgain'):\n",
     "    relGain = constants[\"RelativeGain\"]\n",
+    "\n",
     "for k, f in enumerate(file_list):\n",
     "    with h5py.File(f, 'r', driver='core') as infile:\n",
     "        out_fileb = \"{}/{}\".format(out_folder, f.split(\"/\")[-1])\n",
@@ -670,32 +708,31 @@
     "        \n",
     "        try:\n",
     "            with h5py.File(out_file, \"w\") as ofile:\n",
-    "            \n",
     "                copy_and_sanitize_non_cal_data(infile, ofile, h5path)\n",
     "                data = infile[h5path+\"/image\"][()]\n",
+    "                # Getting rid of empty frames:\n",
     "                nzidx = np.count_nonzero(data, axis=(1, 2))\n",
     "                data = data[nzidx != 0, ...]\n",
-    "                            \n",
+    "                \n",
+    "                # If you want to analyze only a certain number of frames instead of all available good frames: \n",
     "                if limit_images > 0:\n",
     "                    data = data[:limit_images,...]\n",
+    "                    \n",
+    "                # used for array shapes in the corrected data sets that we create and save in this loop:\n",
     "                oshape = data.shape\n",
+    "                \n",
     "                data = np.moveaxis(data, 0, 2)\n",
-    "                            \n",
+    "                \n",
+    "                # data set to save offset corrected images:\n",
     "                ddset = ofile.create_dataset(h5path+\"/pixels\",\n",
     "                                             oshape,\n",
     "                                             chunks=(chunk_size_idim, oshape[1], oshape[2]),\n",
     "                                             dtype=np.float32)\n",
-    "                \n",
+    "                # data set to create bad pixels:\n",
     "                ddsetm = ofile.create_dataset(h5path+\"/mask\",\n",
     "                                             oshape,\n",
     "                                             chunks=(chunk_size_idim, oshape[1], oshape[2]),\n",
     "                                             dtype=np.uint32, compression=\"gzip\")\n",
-    "\n",
-    "                if corr_bools.get('relgain'):\n",
-    "                    ddsetg = ofile.create_dataset(h5path+\"/gain\",\n",
-    "                                                  oshape,\n",
-    "                                                  chunks=(chunk_size_idim, oshape[1], oshape[2]),\n",
-    "                                                  dtype=np.float32, compression=\"gzip\")\n",
     "                \n",
     "                data = data.astype(np.float32) \n",
     "                \n",
@@ -706,63 +743,71 @@
     "                if corr_bools.get('relgain'):\n",
     "                    rg = np.repeat(relGain[:,:,None], data.shape[2], axis=2) # rg = relative gain\n",
     "                \n",
-    "                # uncor_mean_im = averaged over all non-corrected images (in the first sequence only)\n",
-    "                if uncor_mean_im is None:\n",
-    "                        uncor_mean_im = np.nanmean(data, axis=2)  \n",
-    "                        uncor_single_im = data[...,0] # The non-corrected image corresponding to the first frame\n",
+    "                # non-corrected images for first sequence only:\n",
+    "                if k == 0:\n",
+    "                    uncor = np.append(uncor, data, axis=2)\n",
     "                        \n",
     "                histCalRaw.fill(data) # filling histogram with raw uncorrected data\n",
     "                \n",
-    "                # masking data for bad pixels and equating the values to np.nan so that the pattern classifier \n",
-    "                # ignores them:\n",
+    "                # equating bad pixels' values to np.nan so that the pattern classifier ignores them:\n",
     "                data[bpix != 0] = np.nan\n",
     "            \n",
-    "                data -= offset # offset correction                    \n",
+    "                data -= offset # offset correction  \n",
+    "                \n",
+    "                # Offset corrected images for first sequence only:\n",
+    "                if k == 0:\n",
+    "                    off_cor = np.append(off_cor, data, axis=2)\n",
     "                histCalOffsetCor.fill(data) # filling histogram with offset corrected data\n",
     "\n",
     "                ddset[...] = np.moveaxis(data, 2, 0)\n",
     "                ddsetm[...] = np.moveaxis(bpix, 2, 0)\n",
     "                ofile.flush()\n",
     "\n",
-    "                # mean_image = averaged over all offset corrected images (in the first sequence only)\n",
-    "                if offset_mean_im is None:\n",
-    "                        offset_mean_im = np.nanmean(data, axis=2)  \n",
-    "                        offset_single_im = data[...,0] # The offset corrected image corresponding to the first frame \n",
-    "                        \n",
-    "                # cm: common mode, c: classifications, p: even patterns\n",
+    "                # cm: common mode\n",
     "                if corr_bools.get('common_mode'):\n",
+    "                    \n",
+    "                    # data set to save common mode corrected images:\n",
     "                    ddsetcm = ofile.create_dataset(h5path+\"/pixels_cm\",\n",
     "                                                   oshape,\n",
     "                                                   chunks=(chunk_size_idim, oshape[1], oshape[2]),\n",
     "                                                   dtype=np.float32)\n",
     "                    \n",
+    "                    # common mode correction:\n",
     "                    data = cmCorrection.correct(data.astype(np.float32),  # common mode correction\n",
-    "                                                    cellTable=np.zeros(data.shape[2], np.int32)) \n",
+    "                                                cellTable=np.zeros(data.shape[2], np.int32)) \n",
+    "                    \n",
+    "                    # discarding events caused by saturated pixels:\n",
+    "                    # we equate these values to np.nan so that the pattern classifier ignores them:\n",
+    "                    data[data >= saturated_threshold] = np.nan \n",
     "                    histCalCommonModeCor.fill(data) # filling histogram with common mode corrected data\n",
-    "                    # common mode corrected images:\n",
-    "                    if cm_mean_im is None:\n",
-    "                        cm_mean_im = np.nanmean(data, axis=2)  \n",
-    "                        cm_single_im = data[...,0] # The common mode corrected image corresponding to the first frame \n",
+    "                    # common mode corrected images for first sequence only:\n",
+    "                    if k == 0:\n",
+    "                        cm_cor = np.append(cm_cor, data, axis=2)\n",
+    "                    \n",
     "                    ddsetcm[...] = np.moveaxis(data, 2, 0)\n",
     "                    \n",
     "                if corr_bools.get('relgain'):\n",
+    "                    # data set to save gain corrected images:\n",
+    "                    ddsetg = ofile.create_dataset(h5path+\"/gain\",\n",
+    "                                                  oshape,\n",
+    "                                                  chunks=(chunk_size_idim, oshape[1], oshape[2]),\n",
+    "                                                  dtype=np.float32, compression=\"gzip\")\n",
+    "                \n",
     "                    data /= rg  # relative gain correction \n",
     "                    histCalGainCor.fill(data) # filling histogram with gain corrected data\n",
+    "                    # gain corrected images for first sequence only:\n",
+    "                    if k == 0:\n",
+    "                        g_cor = np.append(g_cor, data, axis=2)\n",
     "                    ddsetg[...] = np.moveaxis(rg, 2, 0).astype(np.float32)\n",
     "\n",
-    "                    # gain_mean_image = averaged over gain corrected images (in the first sequence only)\n",
-    "                    if gain_mean_im is None:\n",
-    "                            gain_mean_im = np.nanmean(data, axis=2)  \n",
-    "                            gain_single_im = data[...,0] # The gain corrected image corresponding to the first frame\n",
-    "\n",
     "                if corr_bools.get('pattern_class'):\n",
-    "\n",
-    "\n",
+    "                    # data set to save split event corrected images:\n",
+    "                    # c: classifications, p: even patterns\n",
     "                    ddsetc = ofile.create_dataset(h5path+\"/pixels_classified\",\n",
     "                                                  oshape,\n",
     "                                                  chunks=(chunk_size_idim, oshape[1], oshape[2]),\n",
     "                                                  dtype=np.float32, compression=\"gzip\")\n",
-    "\n",
+    "                    # data set to save different valid patterns:\n",
     "                    ddsetp = ofile.create_dataset(h5path+\"/patterns\",\n",
     "                                                 oshape,\n",
     "                                                 chunks=(chunk_size_idim, oshape[1], oshape[2]),\n",
@@ -775,8 +820,9 @@
     "                    # Dividing the data into left and right hemispheres:\n",
     "                    dataLH = data[:, :pixels_x//2, :]\n",
     "                    dataRH = data[:, pixels_x//2:, :]\n",
-    "\n",
-    "                    dataLH, patternsLH = patternClassifierLH.classify(dataLH) # pattern classification on corrected data\n",
+    "                    \n",
+    "                    # pattern classification on corrected data\n",
+    "                    dataLH, patternsLH = patternClassifierLH.classify(dataLH) \n",
     "                    dataRH, patternsRH = patternClassifierRH.classify(dataRH)\n",
     "\n",
     "                    data[:, :pixels_x//2, :] = dataLH\n",
@@ -794,11 +840,11 @@
     "                    data[patterns != 100] = np.nan # Discard doubles, triples, quadruple, clusters, first singles\n",
     "                    histCalPcorrS.fill(data) # filling histogram with singles events data\n",
     "                     \n",
-    "                    # mean_im_cc = averaged over all pattern classified corrected images \n",
-    "                    # (in the first sequence only)\n",
-    "                    if mean_im_cc is None:\n",
-    "                        mean_im_cc = np.nanmean(data, axis=2) \n",
-    "                        single_im_cc = data[...,0] # The final corrected image corresponding to the first frame\n",
+    "                    # split event corrected images for first sequence only (also these events are only \n",
+    "                    # singles events):\n",
+    "                    if k == 0:\n",
+    "                        final_cor = np.append(final_cor, data, axis=2)\n",
+    "                    \n",
     "        except Exception as e:\n",
     "            print(f\"Couldn't calibrate data in {f}: {e}\\n\")\n",
     "\n",
@@ -847,28 +893,12 @@
     "if corr_bools.get('relgain'):\n",
     "    np.savez(os.path.join(out_folder, 'Gain_Corrected_Events.npz'), gain_cor_HistMids, gain_cor_HistVals)\n",
     "if corr_bools.get('pattern_class'):\n",
-    "    np.savez(os.path.join(out_folder, 'Split_Events.npz'), split_HistMids, split_HistVals)\n",
+    "    np.savez(os.path.join(out_folder, 'Split_Events_Corrected_Events.npz'), split_HistMids, split_HistVals)\n",
     "    np.savez(os.path.join(out_folder, 'Singles_Events.npz'), singles_HistMids, singles_HistVals)\n",
     "\n",
     "print(\"Various spectra are saved to disk in the form of histograms. Please check {}\".format(out_folder))"
    ]
   },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# depending on the gain, the ranges of data are different, so we set a few parameters so that the plots always look\n",
-    "# good.\n",
-    "\n",
-    "if gain == 1:\n",
-    "    x_range = (0, 35000)\n",
-    "#TODO: make it more adaptive for more than only 2 gains [below was for gain==64 only\n",
-    "else:\n",
-    "    x_range = (0, 2000)"
-   ]
-  },
   {
    "cell_type": "code",
    "execution_count": null,
@@ -904,7 +934,7 @@
     "                   'label': 'Common Mode Corrected'})\n",
     "\n",
     "if corr_bools.get('relgain'):\n",
-    "    xrange = (cti_limit_low, cti_limit_high)\n",
+    "    xrange = bin_range\n",
     "    figure.append({'x': gain_cor_HistMids,\n",
     "                   'y': gain_cor_HistVals,\n",
     "                   'y_err': np.sqrt(gain_cor_HistVals[:]),\n",
@@ -912,9 +942,6 @@
     "                   'errorstyle': 'bars',\n",
     "                   'errorcoarsing': 2,\n",
     "                   'label': 'Gain Corrected'})\n",
-    "else:\n",
-    "    xrange = x_range\n",
-    "\n",
     "    \n",
     "if corr_bools.get('pattern_class'):    \n",
     "    figure.extend([{'x': split_HistMids,\n",
@@ -934,7 +961,7 @@
     "                   'label': 'Singles Events'\n",
     "                  }])\n",
     "fig = xana.simplePlot(figure, aspect=1, x_label='ADU', y_label='Number of Occurrences', figsize='2col',\n",
-    "                      y_log=True, x_range=x_range, title = '1 ADU per bin is used.',\n",
+    "                      y_log=True, x_range=bin_range, title = '1 ADU per bin is used.',\n",
     "                      legend='top-right-frame-1col')"
    ]
   },
@@ -1116,7 +1143,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "### Mean Images of the First Sequence ###"
+    "### Various Images Averaged Over All Frames of Only the First Sequence ###"
    ]
   },
   {
@@ -1131,31 +1158,43 @@
    },
    "outputs": [],
    "source": [
+    "uncor_mean_im = np.nanmean(uncor, axis=2)\n",
+    "offset_mean_im = np.nanmean(off_cor, axis=2)\n",
+    "\n",
+    "if corr_bools.get('common_mode'):\n",
+    "    cm_mean_im = np.nanmean(cm_cor, axis=2)\n",
+    "if corr_bools.get('relgain'):\n",
+    "    gain_mean_im = np.nanmean(g_cor, axis=2)\n",
+    "if corr_bools.get('pattern_class'):\n",
+    "    mean_im_cc = np.nanmean(final_cor, axis=2)\n",
+    "\n",
     "fig = xana.heatmapPlot(uncor_mean_im, x_label='Columns', y_label='Rows', lut_label='Signal (ADU)', aspect=1, \n",
     "                       x_range=(0, pixels_y), y_range=(0, pixels_x),  \n",
     "                       panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)',\n",
-    "                       title = 'Uncorrected Image Averaged over the First Sequence')\n",
+    "                       title = 'Uncorrected Image Averaged over Frames in the First Sequence')\n",
     "\n",
     "fig = xana.heatmapPlot(offset_mean_im, x_label='Columns', y_label='Rows', lut_label='Signal (ADU)', aspect=1, \n",
     "                       x_range=(0, pixels_y), y_range=(0, pixels_x),  \n",
     "                       panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)',  \n",
-    "                       title = 'Offset Corrected Image Averaged over the First Sequence')\n",
+    "                       title = 'Offset Corrected Image Averaged over Frames in the First Sequence')\n",
+    "\n",
+    "if corr_bools.get('common_mode'):\n",
+    "    fig = xana.heatmapPlot(cm_mean_im, x_label='Columns', y_label='Rows', lut_label='Signal (ADU)', aspect=1, \n",
+    "                           x_range=(0, pixels_y), y_range=(0, pixels_x), \n",
+    "                           panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)', \n",
+    "                           title = 'Common Mode Corrected Image Averaged over Frames in the First Sequence')\n",
+    "\n",
     "if corr_bools.get('relgain'):\n",
     "    fig = xana.heatmapPlot(gain_mean_im, x_label='Columns', y_label='Rows', \n",
-    "                           lut_label='Gain Corrected Signal (ADU)', aspect=1, \n",
+    "                           lut_label='Signal (ADU)', aspect=1, \n",
     "                           x_range=(0, pixels_y), y_range=(0, pixels_x),   \n",
     "                           panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)',  \n",
-    "                           title = 'Mean Gain Corrected Image of the First Sequence')\n",
+    "                           title = 'Gain Corrected Image Averaged over Frames in the First Sequence')\n",
     "\n",
-    "if corr_bools.get('pattern_class') and corr_bools.get('common_mode'):\n",
-    "    fig = xana.heatmapPlot(cm_mean_im, x_label='Columns', y_label='Rows', lut_label='Signal (ADU)', aspect=1, \n",
-    "                           x_range=(0, pixels_y), y_range=(0, pixels_x), \n",
-    "                           panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)', \n",
-    "                           title = 'Common Mode Corrected Image Averaged over the First Sequence')\n",
-    "    \n",
+    "if corr_bools.get('pattern_class'):\n",
     "    fig = xana.heatmapPlot(mean_im_cc, x_label='Columns', y_label='Rows', lut_label='Signal (ADU)', aspect=1,\n",
-    "                           x_range=(0, pixels_y), y_range=(0, pixels_x),\n",
-    "                           title = 'Image after All Corrections Averaged over the First Sequence')"
+    "                           x_range=(0, pixels_y), y_range=(0, pixels_x), vmin=0, vmax= 18000,\n",
+    "                           title = 'Image of Single Events Averaged over Frames in the First Sequence')"
    ]
   },
   {
@@ -1177,27 +1216,35 @@
    },
    "outputs": [],
    "source": [
-    "fig = xana.heatmapPlot(uncor_single_im, x_label='Columns', y_label='Rows', lut_label='Signal (ADU)', aspect=1, \n",
+    "fig = xana.heatmapPlot(uncor[:,:,0], x_label='Columns', y_label='Rows', lut_label='Signal (ADU)', aspect=1, \n",
     "                       x_range=(0, pixels_y), y_range=(0, pixels_x), \n",
     "                       panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)',  \n",
-    "                       title = 'Uncorrected Image (First Frame of First Sequence)')\n",
+    "                       title = 'Uncorrected Image (First Frame of the First Sequence)')\n",
     "\n",
-    "fig = xana.heatmapPlot(offset_single_im, x_label='Columns', y_label='Rows', lut_label='Signal (ADU)', aspect=1, \n",
+    "fig = xana.heatmapPlot(off_cor[:,:,0], x_label='Columns', y_label='Rows', lut_label='Signal (ADU)', aspect=1, \n",
     "                       x_range=(0, pixels_y), y_range=(0, pixels_x),\n",
     "                       panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)',   \n",
-    "                       title = 'Offset Corrected Image (First Frame of First Sequence)')\n",
+    "                       title = 'Offset Corrected Image (First Frame of the First Sequence)')\n",
     "\n",
-    "if corr_bools.get('pattern_class'):\n",
-    "    if corr_bools.get('common_mode'):\n",
-    "        fig = xana.heatmapPlot(cm_single_im, x_label='Columns', y_label='Rows', lut_label='Signal (ADU)', aspect=1, \n",
+    "if corr_bools.get('common_mode'):\n",
+    "        fig = xana.heatmapPlot(cm_cor[:,:,2], x_label='Columns', y_label='Rows', lut_label='Signal (ADU)', \n",
+    "                               aspect=1, \n",
     "                               x_range=(0, pixels_y), y_range=(0, pixels_x), \n",
     "                               panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)', \n",
-    "                               title = 'Common Mode Corrected Image (First Frame of First Sequence)')\n",
-    "    \n",
-    "    fig = xana.heatmapPlot(single_im_cc, x_label='Columns', y_label='Rows', lut_label='Signal (ADU)', aspect=1,\n",
+    "                               title = 'Common Mode Corrected Image (First Frame of the First Sequence)')\n",
+    "        \n",
+    "if corr_bools.get('relgain'):\n",
+    "        fig = xana.heatmapPlot(g_cor[:,:,0], x_label='Columns', y_label='Rows', lut_label='Signal (ADU)', \n",
+    "                               aspect=1, \n",
+    "                               x_range=(0, pixels_y), y_range=(0, pixels_x), \n",
+    "                               panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)', \n",
+    "                               title = 'Gain Corrected Image (First Frame of the First Sequence)')\n",
+    "\n",
+    "if corr_bools.get('pattern_class'):    \n",
+    "    fig = xana.heatmapPlot(final_cor[:,:,0], x_label='Columns', y_label='Rows', lut_label='Signal (ADU)', aspect=1,\n",
     "                           x_range=(0, pixels_y), y_range=(0, pixels_x), \n",
     "                           panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)',  \n",
-    "                           title = 'Image after All Corrections (First Frame of First Sequence)')"
+    "                           title = 'Image of Single Events (First Frame of the First Sequence)')"
    ]
   },
   {
@@ -1209,20 +1256,20 @@
     "# Resetting the histogram calculators:\n",
     "histCalRaw.reset()\n",
     "histCalOffsetCor.reset()\n",
-    "if corr_bools.get('pattern_class'):\n",
-    "    histCalPcorr.reset()\n",
-    "    histCalPcorrS.reset()\n",
     "if corr_bools.get('common_mode'):\n",
     "    histCalCommonModeCor.reset()\n",
     "if corr_bools.get('relgain'):\n",
-    "    histCalGainCor.reset()"
+    "    histCalGainCor.reset()\n",
+    "if corr_bools.get('pattern_class'):\n",
+    "    histCalPcorr.reset()\n",
+    "    histCalPcorrS.reset()"
    ]
   },
   {
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "Next, the corrected event patterns are read from the patterns/ dataset created previously and are separated into 4 different categories (singles, doubles, triples and quadruples) using the pattern indices. However, this is done only for one sequence, corresponding to the seq_num variable, as an example. \n",
+    "Next, the corrected event patterns are read from the patterns/dataset created previously and are separated into 4 different categories (singles, doubles, triples and quadruples) using the pattern indices. However, this is done only for one sequence, corresponding to the seq_num variable, as an example. \n",
     "\n",
     "Note that the number of bins and the bin range for the following histograms may be different from those presented above (depending on gain) to make the counts more noticible and the peaks more defined.\n",
     "\n",
@@ -1293,15 +1340,14 @@
     "    # bin edges array has one extra element => need to plot from 0 to the one before the last element to have the \n",
     "    # same size as h-array => in what follows, we use e[:-1] (-1 means one before the last element)\n",
     "\n",
-    "    #TODO adapt the title depending on corrections\n",
-    "    display(Markdown('### Histograms of Offset, Common Mode and Gain Corrected Events for One Sequence Only'))\n",
+    "    display(Markdown('### Histograms of Corrected Events for One Sequence Only'))\n",
     "    fig = plt.figure(figsize=(10, 7))\n",
     "    ax = fig.add_subplot(111)\n",
     "    ax.step(e[:-1], h, color='blue', label='Events Involving Single Pixels Only') \n",
     "    ax.semilogy() # y-axis is log, x-axis is linear\n",
-    "    ax.set_xlabel(\"Energy (ADU) [{} bins per {} ADU.]\".format(event_bins, b_range[1]-b_range[0]))\n",
-    "    ax.set_ylabel(\"Gain Corrected Events (counts) for One Sequence\")\n",
-    "    ax.set_xlim(x_range)\n",
+    "    ax.set_xlabel(\"Energy (ADU) [{} bins per {} ADU]\".format(event_bins, b_range[1]-b_range[0]))\n",
+    "    ax.set_ylabel(\"Corrected Events for One Sequence (counts)\")\n",
+    "    ax.set_xlim(b_range)\n",
     "\n",
     "    h = 0\n",
     "    for double in doubles:\n",
@@ -1331,13 +1377,6 @@
     "    l = ax.legend()"
    ]
   },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": []
-  },
   {
    "cell_type": "code",
    "execution_count": null,
-- 
GitLab