diff --git a/notebooks/pnCCD/Characterize_pnCCD_Dark_NBC.ipynb b/notebooks/pnCCD/Characterize_pnCCD_Dark_NBC.ipynb
index ed69341365b1eeac08fc673bd985430fb6859552..788cb6cf3aa09cbf661701dbd52a39bab93d2d4a 100644
--- a/notebooks/pnCCD/Characterize_pnCCD_Dark_NBC.ipynb
+++ b/notebooks/pnCCD/Characterize_pnCCD_Dark_NBC.ipynb
@@ -6,7 +6,7 @@
    "source": [
     "# pnCCD Dark Characterization\n",
     "\n",
-    "Author: DET Group, modified by Kiana Setoodehnia, Version: 4.0 (December 2020)\n",
+    "Author: DET Group, modified by Kiana Setoodehnia, Version: 5.0\n",
     "\n",
     "The following notebook provides dark image analysis of the pnCCD detector. Dark characterization evaluates offset and noise of the detector and gives information about bad pixels. \n",
     "\n",
@@ -22,12 +22,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-12-06T10:54:38.999974Z",
-     "start_time": "2018-12-06T10:54:38.983406Z"
-    }
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "in_folder = \"/gpfs/exfel/exp/SQS/202031/p900166/raw\"  # input folder, required\n",
@@ -79,12 +74,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-12-06T10:54:39.467334Z",
-     "start_time": "2018-12-06T10:54:39.427784Z"
-    }
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "import datetime\n",
@@ -144,12 +134,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-12-06T10:54:40.058101Z",
-     "start_time": "2018-12-06T10:54:40.042615Z"
-    }
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "# Calibration Database Settings, and Some Initial Run Parameters & Paths:\n",
@@ -238,12 +223,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-12-06T10:54:40.555804Z",
-     "start_time": "2018-12-06T10:54:40.452978Z"
-    }
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "# Reading Parameters such as Detector Bias, Gain, etc. from the Data:\n",
@@ -280,12 +260,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-12-06T10:55:21.238009Z",
-     "start_time": "2018-12-06T10:54:54.586435Z"
-    }
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "step_timer.start()\n",
@@ -327,12 +302,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-12-06T10:56:20.686534Z",
-     "start_time": "2018-12-06T10:56:11.721829Z"
-    }
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "#************** HISTOGRAMS *******************#\n",
@@ -901,15 +871,10 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-12-06T10:56:22.741284Z",
-     "start_time": "2018-12-06T10:56:20.688393Z"
-    }
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
-    "step_time.start()\n",
+    "step_timer.start()\n",
     "constant_maps = {\n",
     "    'Offset': offsetMap[..., np.newaxis],\n",
     "    'Noise': noiseMapCM_2nd[..., np.newaxis],\n",
diff --git a/notebooks/pnCCD/Correct_pnCCD_NBC.ipynb b/notebooks/pnCCD/Correct_pnCCD_NBC.ipynb
index 5b715e5972993cc2b8cca21bdafcefe4ad930ed8..06f1b26657ee53bab62a8be2c6ed7c2c0f279372 100644
--- a/notebooks/pnCCD/Correct_pnCCD_NBC.ipynb
+++ b/notebooks/pnCCD/Correct_pnCCD_NBC.ipynb
@@ -6,7 +6,7 @@
    "source": [
     "# pnCCD Data Correction #\n",
     "\n",
-    "Authors: DET Group, Modified by Kiana Setoodehnia on December 2020 - Version 4.0\n",
+    "Authors: DET Group, Modified by Kiana Setoodehnia - Version 5.0\n",
     "\n",
     "The following notebook provides offset, common mode, relative gain, split events and pattern classification corrections of images acquired with the pnCCD. This notebook *does not* yet correct for charge transfer inefficiency."
    ]
@@ -14,46 +14,41 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-12-06T15:54:23.218849Z",
-     "start_time": "2018-12-06T15:54:23.166497Z"
-    }
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
-    "in_folder = \"/gpfs/exfel/exp/SQS/202031/p900166/raw\" # input folder\n",
-    "out_folder = '/gpfs/exfel/data/scratch/ahmedk/test/remove' # output folder\n",
-    "run = 347 # which run to read data from\n",
-    "sequences = [-1] # sequences to correct, set to -1 for all, range allowed\n",
+    "in_folder = \"/gpfs/exfel/exp/SQS/202031/p900166/raw\"  # input folder\n",
+    "out_folder = ''  # output folder\n",
+    "run = 347  # which run to read data from\n",
+    "sequences = [-1]  # sequences to correct, set to -1 for all, range allowed\n",
+    "sequences_per_node = 1  # number of sequences running on the same slurm node.\n",
     "\n",
-    "db_module = \"pnCCD_M205_M206\"\n",
     "karabo_da = 'PNCCD01' # data aggregators\n",
     "karabo_da_control = \"PNCCD02\" # file inset for control data\n",
     "karabo_id = \"SQS_NQS_PNCCD1MP\" # karabo prefix of PNCCD devices\n",
     "receiver_id = \"PNCCD_FMT-0\" # inset for receiver devices\n",
     "path_template = 'RAW-R{:04d}-{}-S{:05d}.h5' # the template to use to access data\n",
     "path_template_seqs = \"{}/r{:04d}/*PNCCD01-S*.h5\"\n",
-    "h5path = '{}/CAL/{}:output' # path to data in the HDF5 file \n",
-    "h5path_ctrl = '{}/CTRL/TCTRL'\n",
+    "instrument_source_template = '{}/CAL/{}:output' # path to data in the HDF5 file \n",
+    "control_source_template = '{}/CTRL/TCTRL'  # control source template\n",
+    "mdl_source_template = \"{}/MDL/{}\"  # middlelayer source template\n",
     "\n",
-    "overwrite = True # keep this as True to not overwrite the output \n",
-    "use_dir_creation_date = True # To obtain creation time of the run\n",
-    "number_dark_frames = 0 # number of images to be used, if set to 0 all available images are used\n",
-    "chunk_size_idim = 1 # H5 chunking size of output data\n",
+    "overwrite = True  # keep this as True to not overwrite the output \n",
     "\n",
-    "cpuCores = 40\n",
+    "# Parameters affecting data correction.\n",
     "commonModeBlockSize = [512, 512] # size of the detector in pixels for common mode calculations\n",
     "commonModeAxis = 0 # axis along which common mode will be calculated, 0 = row, and 1 = column \n",
     "split_evt_primary_threshold = 4. # primary threshold for split event classification in terms of n sigma noise\n",
     "split_evt_secondary_threshold = 3. # secondary threshold for split event classification in terms of n sigma noise\n",
-    "sequences_per_node = 1\n",
-    "limit_images = 0\n",
+    "saturated_threshold = 32000. # full well capacity in ADU\n",
+    "chunk_size_idim = 1  # H5 chunking size of output data\n",
+    "# ONLY FOR TESTING\n",
+    "limit_images = 0  # this parameter is used for limiting number of images to correct from a sequence file. ONLY FOR TESTING.\n",
     "\n",
-    "# pnCCD parameters:\n",
+    "# Conditions for retrieving calibration constants\n",
     "fix_temperature_top = 0. # fix temperature for top sensor in K, set to 0. to use value from slow data.\n",
     "fix_temperature_bot = 0. # fix temperature for bottom senspr in K, set to 0. to use value from slow data.\n",
-    "gain = 0.1 # the detector's gain setting, It is later read from file and this value is overwritten\n",
+    "gain = -1  # the detector's gain setting, It is later read from file and this value is overwritten\n",
     "bias_voltage = 0. # the detector's bias voltage. set to 0. to use value from slow data.\n",
     "integration_time = 70  # detector's integration time\n",
     "photon_energy = 1.6 # Al fluorescence in keV\n",
@@ -61,14 +56,16 @@
     "cal_db_interface = \"tcp://max-exfl016:8015\" # calibration DB interface to use\n",
     "cal_db_timeout = 300000 # timeout on caldb requests\n",
     "creation_time = \"\" # To overwrite the measured creation_time. Required Format: YYYY-MM-DD HR:MN:SC.ms e.g. 2019-07-04 11:02:41.00\n",
+    "use_dir_creation_date = True  # To obtain creation time of the run\n",
     "\n",
+    "# Booleans for selecting corrections to apply.\n",
     "only_offset = False # Only, apply offset.\n",
     "common_mode = True # Apply common mode correction\n",
     "relgain = True # Apply relative gain correction\n",
-    "cti = False # Apply charge transfer inefficiency correction (not implemented, yet)\n",
-    "pattern_classification = True # classify split events\n",
+    "pattern_classification = True  # classify split events\n",
     "\n",
-    "saturated_threshold = 32000. # full well capacity in ADU\n",
+    "# TODO: REMOVE\n",
+    "db_module = \"\"\n",
     "\n",
     "\n",
     "def balance_sequences(in_folder, run, sequences, sequences_per_node, karabo_da):\n",
@@ -98,37 +95,32 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-12-06T15:54:23.455376Z",
-     "start_time": "2018-12-06T15:54:23.413579Z"
-    }
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
-    "import copy\n",
     "import datetime\n",
-    "import glob\n",
     "import os\n",
-    "import time\n",
-    "import traceback\n",
     "import warnings\n",
-    "import traceback\n",
-    "from datetime import timedelta\n",
+    "from pathlib import Path\n",
     "from typing import Tuple\n",
     "warnings.filterwarnings('ignore')\n",
     "\n",
     "import h5py\n",
     "import matplotlib.pyplot as plt\n",
+    "import numpy as np\n",
     "import pasha as psh\n",
-    "from extra_data import H5File, RunDirectory\n",
-    "from iminuit import Minuit\n",
     "from IPython.display import Markdown, display\n",
+    "from extra_data import H5File, RunDirectory\n",
+    "from prettytable import PrettyTable\n",
     "\n",
     "%matplotlib inline\n",
-    "import numpy as np\n",
+    "prettyPlotting=True\n",
+    "\n",
     "import XFELDetAna.xfelprofiler as xprof\n",
-    "from cal_tools.pnccdlib import VALID_GAINS, extract_slow_data\n",
+    "from XFELDetAna import xfelpyanatools as xana\n",
+    "from XFELDetAna import xfelpycaltools as xcal\n",
+    "from XFELDetAna.plotting.util import prettyPlotting\n",
+    "from cal_tools import pnccdlib\n",
     "from cal_tools.tools import (\n",
     "    get_constant_from_db_and_time,\n",
     "    get_dir_creation_date,\n",
@@ -139,16 +131,9 @@
     "from cal_tools import h5_copy_except\n",
     "from iCalibrationDB import Conditions, ConstantMetaData, Constants, Detectors, Versions\n",
     "from iCalibrationDB.detectors import DetectorTypes\n",
-    "from prettytable import PrettyTable\n",
     "\n",
     "profiler = xprof.Profiler()\n",
-    "profiler.disable()\n",
-    "\n",
-    "from XFELDetAna import xfelpyanatools as xana\n",
-    "from XFELDetAna import xfelpycaltools as xcal\n",
-    "from XFELDetAna.plotting.util import prettyPlotting\n",
-    "\n",
-    "prettyPlotting=True"
+    "profiler.disable()"
    ]
   },
   {
@@ -173,9 +158,8 @@
     "print(file_loc)\n",
     "\n",
     "# Paths to the data:\n",
-    "ped_dir = \"{}/r{:04d}\".format(in_folder, run)\n",
-    "h5path = h5path.format(karabo_id, receiver_id)\n",
-    "print(\"HDF5 path to data: {}\\n\".format(h5path))\n",
+    "instrument_src = instrument_source_template.format(karabo_id, receiver_id)\n",
+    "print(f\"Instrument H5File source: {instrument_src}\\n\")\n",
     "\n",
     "# Run's creation time:\n",
     "if creation_time:\n",
@@ -212,6 +196,7 @@
     ")\n",
     "file_list = []\n",
     "print(f\"Processing a total of {total_sequences} sequence files:\")\n",
+    "# TODO: use extra data and sort it.\n",
     "for f in mapped_files[karabo_da].queue:\n",
     "    file_list.append(f)\n",
     "    print(f)"
@@ -223,25 +208,27 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "# extract slow data\n",
+    "# extract control data\n",
     "if karabo_da_control:\n",
-    "    mdl_path = f\"{karabo_id}/MDL/{'{}'}\"\n",
-    "    ctrl_path = h5path_ctrl.format(karabo_id)\n",
-    "    ctrl_dc = RunDirectory(ped_dir).select(\n",
+    "    mdl_src = mdl_source_template.format(karabo_id, \"{}\")\n",
+    "    ctrl_src = control_source_template.format(karabo_id)\n",
+    "    #TODO sequences and select files\n",
+    "    ctrl_dc = RunDirectory(Path(in_folder) / f\"r{run:04d}\").select(\n",
     "        [\n",
-    "            [ctrl_path, \"*\"],\n",
-    "            [mdl_path.format(\"*\"), \"*\"],]\n",
+    "            [ctrl_src, \"*\"],\n",
+    "            [mdl_src.format(\"*\"), \"*\"],]\n",
     "    )\n",
+    "    ctrl_data = pnccdlib.PnccdCtrl(ctrl_dc, ctrl_src, mdl_src)\n",
+    "\n",
+    "    if bias_voltage == 0.:\n",
+    "        bias_voltage = ctrl_data.get_bias_voltage()\n",
+    "    if gain == -1:\n",
+    "        gain = ctrl_data.get_gain()\n",
+    "    if fix_temperature_top == 0:\n",
+    "        fix_temperature_top = ctrl_data.get_fix_temperature_top()\n",
+    "    if fix_temperature_bot == 0:\n",
+    "        fix_temperature_bot = ctrl_data.get_fix_temperature_bot()\n",
     "\n",
-    "    bias_voltage, gain, fix_temperature_top, fix_temperature_bot = extract_slow_data(  # noqa\n",
-    "        ctrl_dc,\n",
-    "        ctrl_path,\n",
-    "        mdl_path,\n",
-    "        bias_voltage,\n",
-    "        gain,\n",
-    "        fix_temperature_top,\n",
-    "        fix_temperature_bot,\n",
-    "    )\n",
     "# Printing the Parameters Read from the Data File:\n",
     "display(Markdown('### Detector Parameters'))\n",
     "print(f\"Bias voltage is {bias_voltage:0.1f} V.\")\n",
@@ -257,7 +244,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "gain_k = [k for k, v in VALID_GAINS.items() if v == gain][0]\n",
+    "gain_k = [k for k, v in pnccdlib.VALID_GAINS.items() if v == gain][0]\n",
     "if gain_k == 'a':\n",
     "    split_evt_mip_threshold = 1000. # MIP threshold in ADU for event classification (10 times average noise)\n",
     "\n",
@@ -336,36 +323,27 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-12-06T15:54:23.913269Z",
-     "start_time": "2018-12-06T15:54:23.868910Z"
-    }
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "# Sensor size and block size definitions (important for common mode and other corrections):\n",
-    "\n",
-    "sensorSize = [pixels_x, pixels_y]\n",
     "run_parallel = False\n",
+    "memoryCells = 1 # pnCCD has 1 memory cell\n",
+    "sensorSize = [pixels_x, pixels_y]\n",
     "blockSize = [sensorSize[0]//2, sensorSize[1]//2] # sensor area will be analysed according to blockSize\n",
-    "xcal.defaultBlockSize = blockSize # for xcal.HistogramCalculators \n",
-    "memoryCells = 1 # pnCCD has 1 memory cell"
+    "xcal.defaultBlockSize = blockSize # for xcal.HistogramCalculators "
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-12-06T15:54:23.913269Z",
-     "start_time": "2018-12-06T15:54:23.868910Z"
-    }
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
+    "step_timer = StepTimer()\n",
+    "\n",
     "# Output Folder Creation:\n",
-    "os.makedirs(out_folder, exist_ok=True)"
+    "os.makedirs(out_folder, exist_ok=True if overwrite else False)"
    ]
   },
   {
@@ -428,12 +406,10 @@
    ]
   },
   {
-   "cell_type": "code",
-   "execution_count": null,
+   "cell_type": "markdown",
    "metadata": {},
-   "outputs": [],
    "source": [
-    "## Constants retrieval"
+    "## Retrieving calibration constants"
    ]
   },
   {
@@ -442,8 +418,8 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "display(Markdown('### Dark Data Retrieval'))\n",
-    "\n",
+    "display(Markdown('### Dark constants retrieval'))\n",
+    "step_timer.start()\n",
     "db_parms = cal_db_interface, cal_db_timeout\n",
     "\n",
     "constants = get_dark(db_parms, bias_voltage, gain, integration_time,\n",
@@ -465,7 +441,8 @@
     "                       lut_label='Bad Pixel Value (ADU)', \n",
     "                       aspect=1, x_range=(0, pixels_y), y_range=(0, pixels_x), \n",
     "                       panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)', \n",
-    "                       title = 'Dark Bad Pixels Map')"
+    "                       title = 'Dark Bad Pixels Map')\n",
+    "step_timer.done_step(\"Dark constants retrieval\")"
    ]
   },
   {
@@ -475,7 +452,8 @@
    "outputs": [],
    "source": [
     "if corr_bools.get('relgain'):\n",
-    "    display(Markdown('We will now retrieve the relative gain map from the calibration database.'))\n",
+    "    step_timer.start()\n",
+    "    display(Markdown('### Relative gain constant retrieval'))\n",
     "    metadata = ConstantMetaData()\n",
     "    relgain = Constants.CCD(DetectorTypes.pnCCD).RelativeGain()\n",
     "    metadata.calibration_constant = relgain\n",
@@ -505,7 +483,8 @@
     "                           panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)', \n",
     "                           panel_top_low_lim = 0.5, panel_top_high_lim = 1.5, panel_side_low_lim = 0.5, \n",
     "                           panel_side_high_lim = 1.5, \n",
-    "                           title = f'Relative Gain Map for pnCCD (Gain = 1/{int(gain)})')"
+    "                           title = f'Relative Gain Map for pnCCD (Gain = 1/{int(gain)})')\n",
+    "    step_timer.done_step(\"Relative gain constant retrieval\")"
    ]
   },
   {
@@ -515,7 +494,6 @@
    "outputs": [],
    "source": [
     "#************************ Calculators ************************#\n",
-    "\n",
     "if corr_bools.get('common_mode'):\n",
     "    # Common Mode Correction Calculator:\n",
     "    cmCorrection = xcal.CommonModeCorrection([pixels_x, pixels_y],\n",
@@ -534,8 +512,7 @@
     "                                                 split_evt_mip_threshold,\n",
     "                                                 tagFirstSingles=3, # track along y-axis, left to right (see \n",
     "                                                 nCells=memoryCells, # split_event.py file in pydetlib/lib/src/\n",
-    "                                                 cores=cpuCores,     # XFELDetAna/algorithms)\n",
-    "                                                 allowElongated=False,\n",
+    "                                                 allowElongated=False, # XFELDetAna/algorithms)\n",
     "                                                 blockSize=[pixels_x, pixels_y//2],\n",
     "                                                 parallel=run_parallel)\n",
     "\n",
@@ -547,7 +524,6 @@
     "                                                 split_evt_mip_threshold,\n",
     "                                                 tagFirstSingles=4, # track along y-axis, right to left\n",
     "                                                 nCells=memoryCells,\n",
-    "                                                 cores=cpuCores,\n",
     "                                                 allowElongated=False,\n",
     "                                                 blockSize=[pixels_x, pixels_y//2],\n",
     "                                                 parallel=run_parallel)\n",
@@ -565,12 +541,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-12-06T15:54:28.771629Z",
-     "start_time": "2018-12-06T15:54:28.346051Z"
-    }
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "#***************** Histogram Calculators ******************#\n",
@@ -580,7 +551,6 @@
     "    bins=bins, \n",
     "    range=bin_range,\n",
     "    nCells=memoryCells,\n",
-    "    cores=cpuCores,\n",
     "    blockSize=blockSize,\n",
     "    parallel=run_parallel,\n",
     ")\n",
@@ -590,7 +560,6 @@
     "    bins=bins, \n",
     "    range=bin_range,\n",
     "    nCells=memoryCells, \n",
-    "    cores=cpuCores,\n",
     "    blockSize=blockSize,\n",
     "    parallel=run_parallel,\n",
     ")\n",
@@ -601,7 +570,6 @@
     "        bins=bins,\n",
     "        range=bin_range,\n",
     "        nCells=memoryCells,\n",
-    "        cores=cpuCores,\n",
     "        blockSize=blockSize,\n",
     "        parallel=run_parallel,\n",
     "    )\n",
@@ -613,7 +581,6 @@
     "        bins=bins,\n",
     "        range=bin_range,\n",
     "        nCells=memoryCells, \n",
-    "        cores=cpuCores,\n",
     "        blockSize=blockSize,\n",
     "        parallel=run_parallel,\n",
     "    )\n",
@@ -623,7 +590,6 @@
     "        bins=bins,\n",
     "        range=bin_range,\n",
     "        nCells=memoryCells,\n",
-    "        cores=cpuCores,\n",
     "        blockSize=blockSize,\n",
     "        parallel=run_parallel,\n",
     "    )\n",
@@ -634,17 +600,16 @@
     "        bins=bins,\n",
     "        range=bin_range,\n",
     "        nCells=memoryCells,\n",
-    "        cores=cpuCores,\n",
     "        blockSize=blockSize,\n",
     "        parallel=run_parallel,\n",
     "    )"
    ]
   },
   {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
+   "cell_type": "markdown",
+   "metadata": {
+    "tags": []
+   },
    "source": [
     "## Applying corrections to the raw data"
    ]
@@ -656,9 +621,13 @@
    "outputs": [],
    "source": [
     "def correct_train(wid, index, tid, data):\n",
-    "    \n",
-    "    data = np.squeeze(data[h5path][\"data.image\"].astype(np.float32))\n",
-    "    raw_data[..., index] = data\n",
+    "\n",
+    "    data = np.squeeze(data[instrument_src][\"data.image\"].astype(np.float32))\n",
+    "\n",
+    "    if seq_n == 0:\n",
+    "        raw_data[index, ...] = data\n",
+    "\n",
+    "    histCalRaw.fill(data)  # filling histogram with raw uncorrected data\n",
     "    # equating bad pixels' values to np.nan\n",
     "    # so that the pattern classifier ignores them:\n",
     "    # TODO: To clear this up. Is it on purpose to save corrected data with nans?\n",
@@ -666,8 +635,9 @@
     "\n",
     "    data -= offset # offset correction\n",
     "    # TODO: to clear this up. why save the badpixels map in the corrected data?\n",
-    "    bpix_data[..., index] = bpix\n",
-    "    off_data[..., index] = data\n",
+    "    bpix_data[index, ...] = bpix\n",
+    "    off_data[index, ...] = data\n",
+    "    histCalOffsetCor.fill(data) # filling histogram with offset corrected data\n",
     "\n",
     "    # cm: common mode\n",
     "    if corr_bools.get('common_mode'):\n",
@@ -675,21 +645,24 @@
     "        # common mode correction:\n",
     "        data = np.squeeze(cmCorrection.correct(\n",
     "            data,  # common mode correction\n",
-    "            cellTable=cell_table,\n",
+    "            cellTable=np.zeros(pixels_y, np.int32),\n",
     "        ))\n",
     "\n",
     "        # discarding events caused by saturated pixels:\n",
     "        # we equate these values to np.nan so that the pattern classifier ignores them:\n",
     "        data[data >= saturated_threshold] = np.nan\n",
-    "        cm_data[..., index] = data\n",
+    "        cm_data[index, ...] = data\n",
+    "        histCalCommonModeCor.fill(data) # filling histogram with common mode corrected data\n",
     "\n",
     "    if corr_bools.get('relgain'):\n",
     "        data /= rg  # relative gain correction\n",
-    "        rg_data[..., index] = rg.astype(np.float32) # TODO: Why saving the calibration constant data?\n",
-    "    rg_corr_data[..., index] = data\n",
+    "        rg_data[index, ...] = rg.astype(np.float32) # TODO: Why saving the calibration constant data?\n",
     "\n",
-    "    if corr_bools.get('pattern_class'):\n",
+    "    if seq_n == 0:\n",
+    "        rg_corr_data[index, ...] = data\n",
+    "    histCalGainCor.fill(data)  # filling histogram with gain corrected data\n",
     "\n",
+    "    if corr_bools.get('pattern_class'):\n",
     "        # Dividing the data into left and right hemispheres:\n",
     "        dataLH = data[:, :pixels_x//2]\n",
     "        dataRH = data[:, pixels_x//2:]\n",
@@ -699,13 +672,17 @@
     "\n",
     "        data[:, :pixels_x//2] = np.squeeze(dataLH)\n",
     "        data[:, pixels_x//2:] = np.squeeze(dataRH)\n",
+    "\n",
     "        patterns = np.zeros(data.shape, patternsLH.dtype)\n",
     "        patterns[:, :pixels_x//2] = np.squeeze(patternsLH)\n",
     "        patterns[:, pixels_x//2:] = np.squeeze(patternsRH)\n",
     "\n",
     "        data[data < split_evt_primary_threshold*noise] = 0\n",
-    "        cls_data[..., index] = np.squeeze(data)\n",
-    "        ptrn_data[..., index] = np.squeeze(patterns)"
+    "        cls_data[index, ...] = np.squeeze(data)\n",
+    "        histCalPcorr.fill(data) # filling histogram with split events corrected data\n",
+    "        ptrn_data[index, ...] = np.squeeze(patterns)\n",
+    "        data[patterns != 100] = np.nan # Discard doubles, triples, quadruple, clusters, first singles\n",
+    "        histCalPcorrS.fill(data) # filling histogram with singles events data"
    ]
   },
   {
@@ -714,181 +691,156 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "step_timer = StepTimer()\n",
     "# 10 is a number chosen after testing 1 ... 71 parallel threads\n",
     "parallel_num_threads = 10\n",
+    "context = psh.context.ThreadContext(num_workers=parallel_num_threads)\n",
     "\n",
-    "# Data corrections and event classifications happen here.\n",
-    "# Also, the corrected data are written to datasets:\n",
+    "plt_uncor = None\n",
+    "\n",
+    "data_path = \"INSTRUMENT/\"+instrument_src+\"/data/\"\n",
     "\n",
     "offset = np.squeeze(constants[\"Offset\"])\n",
     "noise = np.squeeze(constants[\"Noise\"])\n",
     "bpix = np.squeeze(constants[\"BadPixelsDark\"])\n",
     "rg = constants.get(\"RelativeGain\")\n",
     "\n",
-    "cell_table = np.zeros(1024, np.int32)\n",
+    "corr_comp_fields = {}\n",
+    "kw = {\"compression\": \"gzip\"}\n",
+    "comp_fields = [\"gain\", \"patterns\", \"pixels_classified\"]"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def write_dataset(ofile, data, field, kw={}):\n",
+    "    ofile.create_dataset(\n",
+    "        f\"{data_path}/{field}\",\n",
+    "        data=data,\n",
+    "        chunks=(chunk_size_idim, pixels_x, pixels_y),\n",
+    "        dtype=data.dtype,\n",
+    "        **kw,\n",
+    "    )"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Data corrections and event classifications happen here.\n",
+    "# Also, the corrected data are written to datasets:\n",
+    "for seq_n, seq_f in enumerate(file_list):\n",
+    "    f_dc = H5File(seq_f)\n",
+    "    out_fileb = f\"{out_folder}/{seq_f.split('/')[-1]}\"\n",
+    "    out_file = out_fileb.replace(\"RAW\", \"CORR\")\n",
     "\n",
-    "plt_uncor = None\n",
+    "    step_timer.start()\n",
+    "    dshape = f_dc[instrument_src, \"data.image\"].shape\n",
+    "    n_imgs = dshape[0]\n",
+    "    # If you want to analyze only a certain number of frames\n",
+    "    # instead of all available good frames.\n",
+    "    if limit_images > 0:\n",
+    "        n_imgs = min(n_imgs, limit_images)\n",
+    "    data_shape = (n_imgs, dshape[1], dshape[2])\n",
+    "    print(f\"Correcting file: {seq_f} of shape {data_shape}.\")\n",
+    "\n",
+    "    data_dc = f_dc.select(\n",
+    "        instrument_src, \"data.image\",\n",
+    "        require_all=True).select_trains(np.s_[:n_imgs])\n",
+    "    \n",
+    "    # Allocating shared arrays for data arrays for each correction stage.\n",
     "\n",
-    "for k, f in enumerate(file_list):\n",
-    "    f_dc = H5File(f)\n",
-    "    out_fileb = f\"{out_folder}/{f.split('/')[-1]}\"\n",
-    "    out_file = out_fileb.replace(\"RAW\", \"CORR\")\n",
+    "    off_data = context.alloc(shape=data_shape, dtype=np.float32)\n",
+    "    bpix_data = context.alloc(shape=data_shape, dtype=np.uint32)\n",
     "\n",
-    "    context = psh.context.ThreadContext(num_workers=parallel_num_threads)\n",
-    "    try:\n",
-    "        step_timer.start()\n",
-    "        dshape = f_dc[h5path, \"data.image\"].shape\n",
-    "        n_imgs = dshape[0]\n",
-    "        # If you want to analyze only a certain number of frames\n",
-    "        # instead of all available good frames.\n",
-    "        if limit_images > 0:\n",
-    "            n_imgs = limit_images\n",
-    "        print(f\"Correcting file: {f} of shape {(n_imgs,) + dshape[-2:]}.\")\n",
-    "\n",
-    "        data_dc = f_dc.select(\n",
-    "            h5path, \"data.image\",\n",
-    "            require_all=True).select_trains(np.s_[:n_imgs])\n",
-    "\n",
-    "        # Allocating shared arrays for data arrays for each correction stage.\n",
-    "        raw_data = context.alloc(\n",
-    "            shape=(dshape[1], dshape[2], n_imgs), dtype=np.float32)\n",
-    "\n",
-    "        off_data = context.alloc(\n",
-    "            shape=(dshape[1], dshape[2], n_imgs), dtype=np.float32)\n",
-    "        bpix_data = context.alloc(\n",
-    "            shape=(dshape[1], dshape[2], n_imgs), dtype=np.float32)\n",
+    "    if corr_bools.get('common_mode'):\n",
+    "        cm_data = context.alloc(shape=data_shape, dtype=np.float32)\n",
     "\n",
-    "        if corr_bools.get('common_mode'):\n",
-    "            cm_data = context.alloc(\n",
-    "                shape=(dshape[1], dshape[2], n_imgs), dtype=np.float32)\n",
+    "    if corr_bools.get('relgain'):\n",
+    "        rg_data = context.alloc(shape=data_shape, dtype=np.float32)\n",
+    "        \n",
+    "\n",
+    "    if corr_bools.get('pattern_class'):\n",
+    "        cls_data = context.alloc(shape=data_shape, dtype=np.float32)\n",
+    "        ptrn_data = context.alloc(shape=data_shape, dtype=np.int32)\n",
+    "\n",
+    "    # Only for plotting\n",
+    "    if seq_n == 0:\n",
+    "        raw_data = context.alloc(shape=data_shape, dtype=np.float32)\n",
     "        if corr_bools.get('relgain'):\n",
-    "            rg_data = context.alloc(\n",
-    "                shape=(dshape[1], dshape[2], n_imgs), dtype=np.float32)\n",
     "            rg_corr_data = context.alloc(\n",
-    "                shape=(dshape[1], dshape[2], n_imgs), dtype=np.float32)\n",
-    "        if corr_bools.get('pattern_class'):\n",
-    "            cls_data = context.alloc(\n",
-    "                shape=(dshape[1], dshape[2], n_imgs), dtype=np.float32)\n",
-    "            ptrn_data = context.alloc(\n",
-    "                shape=(dshape[1], dshape[2], n_imgs), dtype=np.float32)\n",
-    "        final_cor = context.alloc(\n",
-    "            shape=(dshape[1], dshape[2], n_imgs), dtype=np.float32)\n",
-    "\n",
-    "        # data set to save split event corrected images:\n",
-    "        # The calculation of the cluster map:\n",
-    "        step_timer.done_step(f'Preparation.')\n",
-    "        step_timer.start()\n",
-    "        context.map(\n",
-    "            correct_train,\n",
-    "            data_dc,\n",
-    "        )\n",
-    "        \n",
-    "        # Filling histogram calculators.\n",
-    "        histCalRaw.fill(raw_data)  # filling histogram with raw uncorrected data\n",
-    "        histCalOffsetCor.fill(off_data) # filling histogram with offset corrected data\n",
+    "                shape=data_shape, dtype=np.float32)\n",
+    "\n",
+    "    # data set to save split event corrected images:\n",
+    "    # The calculation of the cluster map:\n",
+    "    step_timer.done_step(f'Preparing shared arrays.')\n",
+    "\n",
+    "    step_timer.start()\n",
+    "\n",
+    "    context.map(correct_train, data_dc)\n",
+    "\n",
+    "    step_timer.done_step(f'Correction.')\n",
+    "\n",
+    "    step_timer.start()\n",
+    "    corr_arrays = {\n",
+    "        \"pixels\": off_data.astype(np.float32),\n",
+    "        \"mask\": bpix_data.astype(np.uint32), \n",
+    "    }\n",
+    "\n",
+    "    if seq_n == 0:\n",
+    "        # Offset corrected images for first sequence only:\n",
+    "        off_cor = off_data.copy()\n",
+    "        # cm: common mode\n",
     "        if corr_bools.get('common_mode'):\n",
-    "            histCalCommonModeCor.fill(cm_data) # filling histogram with common mode corrected data\n",
+    "            # common mode corrected images for first sequence only:\n",
+    "            cm_cor = cm_data.copy()\n",
+    "            corr_arrays[\"pixels_cm\"] = cm_data.astype(np.float32)\n",
+    "\n",
     "        if corr_bools.get('relgain'):\n",
-    "            histCalGainCor.fill(rg_corr_data)  # filling histogram with gain corrected data\n",
-    "        if corr_bools.get('pattern_class'):    \n",
-    "            histCalPcorr.fill(cls_data) # filling histogram with split events corrected data\n",
-    "            cls_data[ptrn_data != 100] = np.nan # Discard doubles, triples, quadruple, clusters, first singles\n",
-    "            histCalPcorrS.fill(cls_data) # filling histogram with singles events data\n",
-    "\n",
-    "        step_timer.done_step(f'Correction.')\n",
-    "\n",
-    "        step_timer.start()\n",
-    "\n",
-    "        if k == 0:\n",
-    "            plt_uncor = raw_data\n",
-    "            # Offset corrected images for first sequence only:\n",
-    "            off_cor = np.copy(off_data)\n",
-    "            final_cor = off_cor\n",
-    "            # cm: common mode\n",
-    "            if corr_bools.get('common_mode'):\n",
-    "                # common mode corrected images for first sequence only:\n",
-    "                cm_cor = np.copy(cm_data)\n",
-    "                final_cor = cm_cor\n",
-    "            if corr_bools.get('relgain'):\n",
-    "                # gain corrected images for first sequence only:\n",
-    "                g_cor = np.copy(rg_corr_data)\n",
-    "                final_cor = g_cor\n",
-    "            if corr_bools.get('pattern_class'):\n",
-    "                # split event corrected images for first sequence only\n",
-    "                # (also these events are only singles events):\n",
-    "                final_cor = cls_data\n",
-    "        with h5py.File(out_file, 'w') as ofile:\n",
-    "            # Copy RAW non-calibrated sources.\n",
-    "            with h5py.File(f, 'r') as sfile:\n",
-    "                h5_copy_except.h5_copy_except_paths(\n",
-    "                    sfile, ofile,\n",
-    "                    [\"INSTRUMENT/\"+h5path+\"/data/image\"],\n",
-    "                )\n",
-    "            # TODO: to clear this up: why save corrected data in data/pixels rather than data/image.\n",
-    "            # corr_run : /gpfs/exfel/d/proc/SQS/202131/p900236/r0044/CORR-R0044-PNCCD01-S00000.h5 still has data/image. most probably a bug.\n",
-    "            # data set to save offset corrected images:\n",
-    "            ddset = ofile.create_dataset(\n",
-    "                \"INSTRUMENT/\"+h5path+\"/data/pixels\",\n",
-    "                data=np.moveaxis(off_data, 2, 0),\n",
-    "                chunks=(chunk_size_idim, dshape[1], dshape[2]),\n",
-    "                dtype=np.float32,\n",
-    "            )\n",
-    "            # data set to create bad pixels:\n",
-    "            ddsetm = ofile.create_dataset(\n",
-    "                \"INSTRUMENT/\"+h5path+\"/data/mask\",\n",
-    "                data=np.moveaxis(bpix_data, 2, 0),\n",
-    "                chunks=(chunk_size_idim, dshape[1], dshape[2]),\n",
-    "                dtype=np.uint32,\n",
-    "                compression=\"gzip\",\n",
-    "            )\n",
-    "            # cm: common mode\n",
-    "            if corr_bools.get('common_mode'):\n",
-    "                # data set to save common mode corrected images:\n",
-    "                ddsetcm = ofile.create_dataset(\n",
-    "                    \"INSTRUMENT/\"+h5path+\"/data/pixels_cm\",\n",
-    "                    data=np.moveaxis(cm_data, 2, 0),\n",
-    "                    chunks=(chunk_size_idim, dshape[1], dshape[2]),\n",
-    "                    dtype=np.float32,\n",
-    "                )\n",
-    "            if corr_bools.get('relgain'):\n",
-    "                # data set to save gain corrected images:\n",
-    "                ddsetg = ofile.create_dataset(\n",
-    "                    \"INSTRUMENT/\"+h5path+\"/data/gain\",\n",
-    "                    data=np.moveaxis(rg_data, 2, 0),\n",
-    "                    chunks=(chunk_size_idim, dshape[1], dshape[2]),\n",
-    "                    dtype=np.float32,\n",
-    "                    compression=\"gzip\",\n",
-    "                )\n",
-    "            data = corr_data\n",
-    "\n",
-    "            if corr_bools.get('pattern_class'):\n",
-    "                # c: classifications, p: even patterns\n",
-    "                ddsetc = ofile.create_dataset(\n",
-    "                    \"INSTRUMENT/\"+h5path+\"/data/pixels_classified\",\n",
-    "                    data=np.moveaxis(cls_data, 2, 0),\n",
-    "                    chunks=(chunk_size_idim, dshape[1], dshape[2]),\n",
-    "                    dtype=np.float32,\n",
-    "                    compression=\"gzip\",\n",
-    "                )\n",
-    "                # data set to save different valid patterns:\n",
-    "                ddsetp = ofile.create_dataset(\n",
-    "                    \"INSTRUMENT/\"+h5path+\"/data/patterns\",\n",
-    "                    data=np.moveaxis(ptrn_data, 2, 0),\n",
-    "                    chunks=(chunk_size_idim, dshape[1], dshape[2]),\n",
-    "                    dtype=np.int32,\n",
-    "                    compression=\"gzip\",\n",
-    "                )\n",
-    "        step_timer.done_step(f'Storing data.')\n",
-    "        \"\"\"\n",
-    "    except Exception as e:\n",
-    "        print(f\"Couldn't calibrate data in {f}: {e}\\n\")\n",
-    "        traceback.print_exc(limit=1)\n",
+    "            # gain corrected images for first sequence only:\n",
+    "            g_cor = rg_corr_data.copy()\n",
+    "            corr_arrays[\"gain\"] = rg_data.astype(np.float32)\n",
     "\n",
+    "        if corr_bools.get('pattern_class'):\n",
+    "            # split event corrected images for first sequence only\n",
+    "            # (also these events are only singles events):\n",
+    "            cls_cor = cls_data.copy()\n",
+    "            corr_arrays[\"pixels_classified\"] = cls_data.astype(np.float32)\n",
+    "            corr_arrays[\"patterns\"] = ptrn_data.astype(np.int32)\n",
+    "\n",
+    "    # Storing corrected data sources.\n",
+    "    with h5py.File(out_file, 'w') as ofile:\n",
+    "        # Copy RAW non-calibrated sources.\n",
+    "        with h5py.File(f, 'r') as sfile:\n",
+    "            h5_copy_except.h5_copy_except_paths(\n",
+    "                sfile, ofile,\n",
+    "                [\"INSTRUMENT/\"+instrument_src+\"/data/image\"],\n",
+    "            )\n",
+    "        for field, arr in corr_arrays.items():\n",
+    "            write_dataset(\n",
+    "                ofile, arr, field,\n",
+    "                kw=kw if field in comp_fields else {})\n",
+    "        # TODO: to clear this up: why save corrected data in data/pixels rather than data/image.\n",
+    "    step_timer.done_step(f'Storing data.')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
     "print(\"In addition to offset correction, the following corrections were performed:\")\n",
     "for k, v in corr_bools.items():\n",
     "    if v:\n",
-    "        print(k)"
+    "        print(\"  -\", k.upper())\n",
+    "\n",
+    "print(f\"Total processing time {step_timer.timespan():.01f} s\")\n",
+    "step_timer.print_summary()"
    ]
   },
   {
@@ -1001,7 +953,7 @@
     "fig = xana.simplePlot(figure, aspect=1, x_label='ADU', y_label='Number of Occurrences', figsize='2col',\n",
     "                      y_log=True, x_range=bin_range, title = '1 ADU per bin is used.',\n",
     "                      legend='top-right-frame-1col')\n",
-    "step_timer.done_step('Plotting (Raw vs. Corrected Spectra.)')"
+    "step_timer.done_step('Plotting')"
    ]
   },
   {
@@ -1085,8 +1037,9 @@
    "outputs": [],
    "source": [
     "display(Markdown('### Classification Results - Tabulated Statistics'))\n",
-    "step_timer.start()\n",
+    "\n",
     "if corr_bools.get('pattern_class'):\n",
+    "    step_timer.start()\n",
     "    t0 = PrettyTable()\n",
     "    t0.title = \"Total Number of Counts after All Corrections\"\n",
     "    t0.field_names = [\"Hemisphere\", \"Singles\", \"First-Singles\", \"Clusters\"]\n",
@@ -1106,18 +1059,13 @@
     "    t1.add_row([3, patternStatsLH['doubles'][3], patternStatsRH['doubles'][3], patternStatsLH['triples'][3], \n",
     "                patternStatsRH['triples'][3], patternStatsLH['quads'][3], patternStatsRH['quads'][3]])\n",
     "    print(t1)\n",
-    "step_timer.done_step('Classification Results - Tabulated Statistics')"
+    "    step_timer.done_step('Classification Results - Tabulated Statistics')"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-12-06T16:10:56.190150Z",
-     "start_time": "2018-12-06T16:10:56.177570Z"
-    }
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "if corr_bools.get('pattern_class'):\n",
@@ -1152,18 +1100,13 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-12-06T16:10:56.203219Z",
-     "start_time": "2018-12-06T16:10:56.191509Z"
-    }
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "display(Markdown('### Classification Results - Pie Charts'))\n",
-    "step_timer.start()\n",
     "\n",
     "if corr_bools.get('pattern_class'):\n",
+    "    step_timer.start()\n",
     "    fig = plt.figure(figsize=(12, 7))\n",
     "    ax = fig.add_subplot(1, 2, 1)\n",
     "    labels = ['Singles', 'Doubles', 'Triples', 'Quads']\n",
@@ -1176,7 +1119,7 @@
     "    ax.set_title(\"Pattern Occurrence in RH\")\n",
     "    # Set aspect ratio to be equal so that pie is drawn as a circle.\n",
     "    a = ax.axis('equal')\n",
-    "step_timer.done_step('Classification Results - Pie Charts')"
+    "    step_timer.done_step('Classification Results - Pie Charts')"
    ]
   },
   {
@@ -1189,23 +1132,20 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-12-06T16:10:56.212586Z",
-     "start_time": "2018-12-06T16:10:56.204731Z"
-    }
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
-    "uncor_mean_im = np.nanmean(plt_uncor, axis=2)\n",
-    "offset_mean_im = np.nanmean(off_cor, axis=2)\n",
+    "step_timer.start()\n",
+    "\n",
+    "uncor_mean_im = np.nanmean(raw_data, axis=0)\n",
+    "offset_mean_im = np.nanmean(off_cor, axis=0)\n",
     "\n",
     "if corr_bools.get('common_mode'):\n",
-    "    cm_mean_im = np.nanmean(cm_cor, axis=2)\n",
+    "    cm_mean_im = np.nanmean(cm_cor, axis=0)\n",
     "if corr_bools.get('relgain'):\n",
-    "    gain_mean_im = np.nanmean(g_cor, axis=2)\n",
+    "    gain_mean_im = np.nanmean(g_cor, axis=0)\n",
     "if corr_bools.get('pattern_class'):\n",
-    "    mean_im_cc = np.nanmean(final_cor, axis=2)\n",
+    "    mean_im_cc = np.nanmean(cls_cor, axis=0)\n",
     "\n",
     "fig = xana.heatmapPlot(uncor_mean_im, x_label='Columns', y_label='Rows', lut_label='Signal (ADU)', aspect=1, \n",
     "                       x_range=(0, pixels_y), y_range=(0, pixels_x),  \n",
@@ -1233,7 +1173,8 @@
     "if corr_bools.get('pattern_class'):\n",
     "    fig = xana.heatmapPlot(mean_im_cc, x_label='Columns', y_label='Rows', lut_label='Signal (ADU)', aspect=1,\n",
     "                           x_range=(0, pixels_y), y_range=(0, pixels_x), vmin=0, vmax= 18000,\n",
-    "                           title = 'Image of Single Events Averaged over Frames in the First Sequence')"
+    "                           title = 'Image of Single Events Averaged over Frames in the First Sequence')\n",
+    "step_timer.done_step(\"Plotting\")"
    ]
   },
   {
@@ -1246,43 +1187,40 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-12-06T16:11:08.317130Z",
-     "start_time": "2018-12-06T16:11:05.788655Z"
-    }
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
-    "fig = xana.heatmapPlot(plt_uncor[:,:,0], x_label='Columns', y_label='Rows', lut_label='Signal (ADU)', aspect=1, \n",
+    "step_timer.start()\n",
+    "fig = xana.heatmapPlot(raw_data[0, :, :], x_label='Columns', y_label='Rows', lut_label='Signal (ADU)', aspect=1, \n",
     "                       x_range=(0, pixels_y), y_range=(0, pixels_x), \n",
     "                       panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)',  \n",
     "                       title = 'Uncorrected Image (First Frame of the First Sequence)')\n",
     "\n",
-    "fig = xana.heatmapPlot(off_cor[:,:,0], x_label='Columns', y_label='Rows', lut_label='Signal (ADU)', aspect=1, \n",
+    "fig = xana.heatmapPlot(off_cor[0, :, :], x_label='Columns', y_label='Rows', lut_label='Signal (ADU)', aspect=1, \n",
     "                       x_range=(0, pixels_y), y_range=(0, pixels_x),\n",
     "                       panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)',   \n",
     "                       title = 'Offset Corrected Image (First Frame of the First Sequence)')\n",
     "\n",
     "if corr_bools.get('common_mode'):\n",
-    "        fig = xana.heatmapPlot(cm_cor[:,:,2], x_label='Columns', y_label='Rows', lut_label='Signal (ADU)', \n",
+    "        fig = xana.heatmapPlot(cm_cor[0, :, :], x_label='Columns', y_label='Rows', lut_label='Signal (ADU)', \n",
     "                               aspect=1, \n",
     "                               x_range=(0, pixels_y), y_range=(0, pixels_x), \n",
     "                               panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)', \n",
     "                               title = 'Common Mode Corrected Image (First Frame of the First Sequence)')\n",
     "        \n",
     "if corr_bools.get('relgain'):\n",
-    "        fig = xana.heatmapPlot(g_cor[:,:,0], x_label='Columns', y_label='Rows', lut_label='Signal (ADU)', \n",
+    "        fig = xana.heatmapPlot(g_cor[0, :, :], x_label='Columns', y_label='Rows', lut_label='Signal (ADU)', \n",
     "                               aspect=1, \n",
     "                               x_range=(0, pixels_y), y_range=(0, pixels_x), \n",
     "                               panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)', \n",
     "                               title = 'Gain Corrected Image (First Frame of the First Sequence)')\n",
     "\n",
     "if corr_bools.get('pattern_class'):    \n",
-    "    fig = xana.heatmapPlot(final_cor[:,:,0], x_label='Columns', y_label='Rows', lut_label='Signal (ADU)', aspect=1,\n",
+    "    fig = xana.heatmapPlot(cls_cor[0, :, :], x_label='Columns', y_label='Rows', lut_label='Signal (ADU)', aspect=1,\n",
     "                           x_range=(0, pixels_y), y_range=(0, pixels_x), \n",
     "                           panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)',  \n",
-    "                           title = 'Image of Single Events (First Frame of the First Sequence)')"
+    "                           title = 'Image of Single Events (First Frame of the First Sequence)')\n",
+    "step_timer.done_step(\"Plotting\")"
    ]
   },
   {
@@ -1328,8 +1266,8 @@
     "    with H5File(\n",
     "        f\"{out_folder}/{path_template.format(run, karabo_da, sequences[0]).replace('RAW', 'CORR')}\"\n",
     "    ) as dc:\n",
-    "        data = dc[h5path, \"data.pixels_classified\"].ndarray()\n",
-    "        patterns = dc[h5path, \"data.patterns\"].ndarray()\n",
+    "        data = dc[instrument_src, \"data.pixels_classified\"].ndarray()\n",
+    "        patterns = dc[instrument_src, \"data.patterns\"].ndarray()\n",
     "    # events' patterns indices are as follows: 100 (singles), 101 (first singles), 200 - 203 (doubles),\n",
     "    # 300 - 303 (triples), and 400 - 403 (quadruples). Note that for the last three types of patterns, \n",
     "    # there are left, right, up, and down indices.\n",
@@ -1337,23 +1275,23 @@
     "    # Separating the events:\n",
     "    # Singles and First Singles:\n",
     "    for s in range(100, 102):\n",
-    "        single = copy.copy(data[...])\n",
+    "        single = data.copy()\n",
     "        single[patterns != s] = np.nan\n",
     "        singles.append(single)\n",
     "\n",
     "\n",
     "    for d in range(200, 204):\n",
-    "        double = copy.copy(data[...])\n",
+    "        double = data.copy()\n",
     "        double[patterns != d] = np.nan\n",
     "        doubles.append(double)\n",
     "\n",
     "    for t in range(300, 304):\n",
-    "        triple = copy.copy(data[...])\n",
+    "        triple = data.copy()\n",
     "        triple[patterns != t] = np.nan\n",
     "        triples.append(triple)  \n",
     "\n",
     "    for q in range(400, 404):\n",
-    "        quad = copy.copy(data[...])\n",
+    "        quad = data.copy()\n",
     "        quad[patterns != q] = np.nan\n",
     "        quads.append(quad)"
    ]
@@ -1365,6 +1303,7 @@
    "outputs": [],
    "source": [
     "if corr_bools.get('pattern_class'):\n",
+    "    step_timer.start()\n",
     "    hA = 0\n",
     "    h = 0\n",
     "    for single in singles:\n",
@@ -1409,7 +1348,8 @@
     "    ax.step(e[:-1], h, color='purple', label='Events Splitting on Quadruple Pixels')\n",
     "\n",
     "    ax.step(e[:-1], hA, color='grey', label='All Valid Events')\n",
-    "    l = ax.legend()"
+    "    l = ax.legend()\n",
+    "    step_timer.done_step(\"Plotting\")"
    ]
   },
   {
@@ -1436,7 +1376,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.6.7"
+   "version": "3.8.11"
   },
   "latex_envs": {
    "LaTeX_envs_menu_present": true,
@@ -1457,5 +1397,5 @@
   }
  },
  "nbformat": 4,
- "nbformat_minor": 1
+ "nbformat_minor": 4
 }