diff --git a/notebooks/pnCCD/Characterize_pnCCD_Dark_NBC.ipynb b/notebooks/pnCCD/Characterize_pnCCD_Dark_NBC.ipynb
index b23ef32bfcfde18bba57315e5f93fe120e7b07df..cb1e13f64cfc762a9080a2c63c551e93b582d8ea 100644
--- a/notebooks/pnCCD/Characterize_pnCCD_Dark_NBC.ipynb
+++ b/notebooks/pnCCD/Characterize_pnCCD_Dark_NBC.ipynb
@@ -6,7 +6,7 @@
    "source": [
     "# pnCCD Dark Characterization\n",
     "\n",
-    "Author: DET Group, modified by Kiana Setoodehnia, Version: 4.0 (December 2020)\n",
+    "Author: DET Group, modified by Kiana Setoodehnia, Version: 5.0\n",
     "\n",
     "The following notebook provides dark image analysis of the pnCCD detector. Dark characterization evaluates offset and noise of the detector and gives information about bad pixels. \n",
     "\n",
@@ -22,31 +22,21 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-12-06T10:54:38.999974Z",
-     "start_time": "2018-12-06T10:54:38.983406Z"
-    }
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
-    "cluster_profile = \"noDB\"  # ipcluster profile to use\n",
     "in_folder = \"/gpfs/exfel/exp/SQS/202031/p900166/raw\"  # input folder, required\n",
-    "out_folder = '/gpfs/exfel/data/scratch/setoodeh'  # output folder, required\n",
-    "sequence = 0  # sequence file to use\n",
+    "out_folder = '/gpfs/exfel/data/scratch/ahmedk/test/remove'  # output folder, required\n",
     "run = 339 # which run to read data from, required\n",
     "\n",
-    "# Data files parameters:\n",
-    "db_module = \"pnCCD_M205_M206\" # the device name for pnCCD detector\n",
+    "# Data files parameters.\n",
     "karabo_da = ['PNCCD01'] # data aggregators\n",
-    "karabo_da_control = \"PNCCD02\" # file inset for control data\n",
     "karabo_id = \"SQS_NQS_PNCCD1MP\" # karabo prefix of PNCCD devices\n",
     "receiver_id = \"PNCCD_FMT-0\" # inset for receiver devices\n",
     "path_template = 'RAW-R{:04d}-{}-S{{:05d}}.h5'  # the template to use to access data\n",
-    "h5path = '/INSTRUMENT/{}/CAL/{}:output/data/image/' # path in the HDF5 file the data is at\n",
-    "h5path_ctrl = '/CONTROL/{}/CTRL/TCTRL'\n",
+    "instrument_source_template = '{}/CAL/{}:output'  # data source path in h5file. Template filled with karabo_id and receiver_id\n",
     "\n",
-    "# Database access parameters:\n",
+    "# Database access parameters.\n",
     "use_dir_creation_date = True  # use dir creation date as data production reference date\n",
     "cal_db_interface = \"tcp://max-exfl016:8021\"  # calibration DB interface to use\n",
     "cal_db_timeout = 300000 # timeout on caldb requests\n",
@@ -54,51 +44,52 @@
     "local_output = True # if True, the notebook saves dark constants locally\n",
     "creation_time = \"\" # To overwrite the measured creation_time. Required Format: YYYY-MM-DD HR:MN:SC.00 e.g. 2019-07-04 11:02:41.00\n",
     "\n",
-    "number_dark_frames = 0  # number of images to be used, if set to 0 all available images are used\n",
-    "chunkSize = 100 # number of images to read per chunk\n",
+    "# Parameters used for injecting calibration constants.\n",
+    "set_parameters_manually = False  # Boolean for setting parameter values manually instead of reading control values.\n",
     "fix_temperature_top = 0.  # fix temperature of top pnCCD sensor in K. Set to 0, to use the value from slow data\n",
     "fix_temperature_bot = 0.  # fix temperature of bottom pnCCD sensor in K. Set to 0, to use the value from slow data\n",
-    "gain = 0.1  # the detector's gain setting. Set to 0.1 to use the value from the slow data\n",
+    "temp_limits = 5  # temperature limits in which calibration parameters are considered equal\n",
+    "gain = -1  # the detector's gain setting. Set to -1 to use the value from the slow data.\n",
     "bias_voltage = 0. # the detector's bias voltage. set to 0. to use the value from slow data.\n",
-    "integration_time = 70  # detector's integration time\n",
+    "integration_time = 70  # detector's integration time.\n",
+    "\n",
+    "# Parameters affecting dark constants calculation.\n",
     "commonModeAxis = 0 # axis along which common mode will be calculated (0: along rows, 1: along columns)\n",
     "commonModeBlockSize = [512, 512] # size of the detector in pixels for common mode calculations\n",
     "sigmaNoise = 10.  # pixels whose signal value exceeds sigmaNoise*noise will be considered as cosmics and are masked\n",
     "bad_pixel_offset_sigma = 4.  # any pixel whose offset beyond this standard deviations is a bad pixel\n",
     "bad_pixel_noise_sigma = 4.  # any pixel whose noise beyond this standard deviations is a bad pixel\n",
-    "temp_limits = 5  # temperature limits in which calibration parameters are considered equal\n",
+    "max_trains = 500  # Maximum number of trains to use for dark processing.\n",
+    "min_trains = 1  # Minimum number of trains to proceed with dark processing.\n",
+    "\n",
+    "# Don't delete. myMDC sends this parameter by default.\n",
+    "operation_mode = ''  # Detector operation mode, optional\n",
     "\n",
-    "run_parallel = True # for parallel computation\n",
-    "cpuCores = 40 # specifies the number of running cpu cores\n",
-    "operation_mode = ''  # Detector operation mode, optional"
+    "# TODO: Remove unused parameter\n",
+    "db_module = \"\" # the device name for pnCCD detector"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-12-06T10:54:39.467334Z",
-     "start_time": "2018-12-06T10:54:39.427784Z"
-    }
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
-    "import copy\n",
     "import datetime\n",
     "import os\n",
     "import warnings\n",
     "\n",
     "warnings.filterwarnings('ignore')\n",
     "\n",
-    "import h5py\n",
     "import matplotlib.pyplot as plt\n",
     "import numpy as np\n",
+    "import pasha as psh\n",
+    "from extra_data import RunDirectory\n",
     "\n",
     "%matplotlib inline\n",
-    "import XFELDetAna.xfelprofiler as xprof\n",
+    "from cal_tools import pnccdlib\n",
     "from cal_tools.enums import BadPixels\n",
-    "from cal_tools.pnccdlib import VALID_GAINS, extract_slow_data\n",
+    "from cal_tools.step_timing import StepTimer\n",
     "from cal_tools.tools import (\n",
     "    get_dir_creation_date,\n",
     "    get_pdu_from_db,\n",
@@ -107,41 +98,13 @@
     "    save_const_to_h5,\n",
     "    send_to_db,\n",
     ")\n",
-    "from iCalibrationDB import Conditions, Constants, Detectors, Versions\n",
+    "from iCalibrationDB import Conditions, Constants\n",
     "from iCalibrationDB.detectors import DetectorTypes\n",
     "from IPython.display import Markdown, display\n",
     "from prettytable import PrettyTable\n",
     "\n",
-    "profiler = xprof.Profiler()\n",
-    "profiler.disable()\n",
-    "from XFELDetAna.util import env\n",
-    "\n",
-    "env.iprofile = cluster_profile\n",
     "from XFELDetAna import xfelpyanatools as xana\n",
-    "from XFELDetAna import xfelpycaltools as xcal\n",
-    "from XFELDetAna.plotting.util import prettyPlotting\n",
-    "\n",
-    "prettyPlotting=True\n",
-    "from XFELDetAna.detectors.fastccd import readerh5 as fastccdreaderh5\n",
-    "from XFELDetAna.xfelreaders import ChunkReader"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-12-06T10:54:39.467334Z",
-     "start_time": "2018-12-06T10:54:39.427784Z"
-    }
-   },
-   "outputs": [],
-   "source": [
-    "def nImagesOrLimit(nImages, limit):\n",
-    "    if limit == 0:\n",
-    "        return nImages\n",
-    "    else:\n",
-    "        return min(nImages, limit)"
+    "from XFELDetAna import xfelpycaltools as xcal"
    ]
   },
   {
@@ -162,12 +125,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-12-06T10:54:40.058101Z",
-     "start_time": "2018-12-06T10:54:40.042615Z"
-    }
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "# Calibration Database Settings, and Some Initial Run Parameters & Paths:\n",
@@ -177,11 +135,8 @@
     "pixels_y = 1024 # number of columns of the pnCCD \n",
     "print(f\"pnCCD size is: {pixels_x}x{pixels_y} pixels.\")\n",
     "\n",
-    "ped_dir = \"{}/r{:04d}\".format(in_folder, run)\n",
-    "fp_name = path_template.format(run, karabo_da[0])\n",
-    "fp_path = '{}/{}'.format(ped_dir, fp_name)\n",
-    "filename = fp_path.format(sequence)\n",
-    "h5path = h5path.format(karabo_id, receiver_id)\n",
+    "instrument_src = instrument_source_template.format(\n",
+    "    karabo_id, receiver_id)\n",
     "\n",
     "# Output Folder Creation:\n",
     "os.makedirs(out_folder, exist_ok=True)\n",
@@ -206,8 +161,7 @@
     "cal_db_interface = get_random_db_interface(cal_db_interface)\n",
     "print(f'Calibration database interface: {cal_db_interface}')\n",
     "print(f\"Sending constants to the calibration database: {db_output}\")\n",
-    "print(f\"HDF5 path to data: {h5path}\")\n",
-    "print(f\"Reading data from: {filename}\")\n",
+    "print(f\"Instrument H5File source: {instrument_src}\")\n",
     "print(f\"Run number: {run}\")"
    ]
   },
@@ -217,38 +171,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "# Extracting slow data:\n",
-    "if karabo_da_control:\n",
-    "    ctrl_fname = os.path.join(ped_dir, path_template.format(run, karabo_da_control)).format(sequence)\n",
-    "    ctrl_path = h5path_ctrl.format(karabo_id)\n",
-    "    mdl_ctrl_path = f\"/CONTROL/{karabo_id}/MDL/\"\n",
-    "\n",
-    "    (bias_voltage, gain,\n",
-    "     fix_temperature_top,\n",
-    "     fix_temperature_bot) = extract_slow_data(karabo_id, karabo_da_control, ctrl_fname, ctrl_path,\n",
-    "                                              mdl_ctrl_path, bias_voltage, gain,\n",
-    "                                              fix_temperature_top, fix_temperature_bot)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-12-06T10:54:40.555804Z",
-     "start_time": "2018-12-06T10:54:40.452978Z"
-    }
-   },
-   "outputs": [],
-   "source": [
-    "# Reading Parameters such as Detector Bias, Gain, etc. from the Data:\n",
-    "memoryCells = 1 # pnCCD has 1 memory cell\n",
-    "sensorSize = [pixels_x, pixels_y]\n",
-    "blockSize = [sensorSize[0]//2, sensorSize[1]//2]# sensor area will be analysed according to blocksize\n",
-    "xcal.defaultBlockSize = blockSize\n",
-    "nImages = fastccdreaderh5.getDataSize(filename, h5path)[0] # specifies total number of images to proceed\n",
-    "nImages = nImagesOrLimit(nImages, number_dark_frames)\n",
-    "profile = False"
+    "step_timer = StepTimer()"
    ]
   },
   {
@@ -257,50 +180,57 @@
    "metadata": {},
    "outputs": [],
    "source": [
+    "step_timer.start()\n",
+    "\n",
+    "run_dc = RunDirectory(f\"{in_folder}/r{run:04d}\")\n",
+    "\n",
+    "# extract control data\n",
+    "if not set_parameters_manually:\n",
+    "    ctrl_data = pnccdlib.PnccdCtrl(run_dc, karabo_id)\n",
+    "    \n",
+    "    if bias_voltage == 0:\n",
+    "        bias_voltage = ctrl_data.get_bias_voltage()\n",
+    "    if gain == -1:\n",
+    "        gain = ctrl_data.get_gain()\n",
+    "    if fix_temperature_top == 0:\n",
+    "        fix_temperature_top = ctrl_data.get_fix_temperature_top()\n",
+    "    if fix_temperature_bot == 0:\n",
+    "        fix_temperature_bot = ctrl_data.get_fix_temperature_bot()\n",
+    "\n",
+    "\n",
     "# Printing the Parameters Read from the Data File:\n",
     "display(Markdown('### Detector Parameters'))\n",
-    "print(f\"Bias voltage is {bias_voltage:0.2f} V.\")\n",
-    "print(f\"Detector gain is set to {gain}.\")\n",
+    "print(f\"Bias voltage is {bias_voltage:0.1f} V.\")\n",
+    "print(f\"Detector gain is set to 1/{int(gain)}.\")\n",
     "print(f\"Detector integration time is set to {integration_time} ms\")\n",
     "print(f\"Top pnCCD sensor is at temperature of {fix_temperature_top:0.2f} K\")\n",
     "print(f\"Bottom pnCCD sensor is at temperature of {fix_temperature_bot:0.2f} K\")\n",
-    "print(\"Number of dark images to analyze:\", nImages) "
+    "step_timer.done_step(f'Reading control data.')"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-12-06T10:54:41.584031Z",
-     "start_time": "2018-12-06T10:54:41.578462Z"
-    }
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
-    "# Reading Files in Chunks:\n",
+    "# Reading Parameters such as Detector Bias, Gain, etc. from the Data:\n",
+    "sensorSize = [pixels_x, pixels_y]\n",
+    "# sensor area will be analysed according to blocksize\n",
+    "blockSize = [sensorSize[0]//2, sensorSize[1]//2]\n",
+    "xcal.defaultBlockSize = blockSize\n",
     "\n",
-    "# Chunk reader returns an iterator to access the data in the file within the ranges:\n",
-    "reader = ChunkReader(filename, fastccdreaderh5.readData, nImages, chunkSize, path=h5path, \n",
-    "                     pixels_x=pixels_x, pixels_y=pixels_y)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-12-06T10:54:41.899511Z",
-     "start_time": "2018-12-06T10:54:41.864816Z"
-    }
-   },
-   "outputs": [],
-   "source": [
-    "# Calculators:\n",
+    "pixel_data = (instrument_src, \"data.image\")\n",
+    "data_dc = run_dc.select(*pixel_data, require_all=True)\n",
+    "n_trains = data_dc[pixel_data].shape[0]\n",
     "\n",
-    "# noiseCal is a noise map calculator, which internally also produces a per-pixel mean map, i.e., an offset map:\n",
-    "noiseCal = xcal.NoiseCalculator(sensorSize, memoryCells, cores=cpuCores, blockSize=blockSize,\n",
-    "                                runParallel=run_parallel)"
+    "if max_trains != 0:\n",
+    "    n_trains = min(n_trains, max_trains)\n",
+    "if n_trains < min_trains:\n",
+    "    raise ValueError(\n",
+    "        f\"Files {data_dc.files} consists of less than\"\n",
+    "        f\" the required number of {min_trains} trains to proceed with \"\n",
+    "        \"dark processing.\")"
    ]
   },
   {
@@ -315,55 +245,16 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-12-06T10:55:21.238009Z",
-     "start_time": "2018-12-06T10:54:54.586435Z"
-    }
-   },
-   "outputs": [],
-   "source": [
-    "counter1 = 1 # To count how many \"if data.shape[2] >= chunkSize\" instances are there.\n",
-    "counter2 = 0 # To count how many \"if data.shape[2] < chunkSize\" instances are there.\n",
-    "chunkSize_new = 0 # See below\n",
-    "\n",
-    "for data in reader.readChunks():\n",
-    "    data = data.astype(np.float32)\n",
-    "    dx = np.count_nonzero(data, axis=(0, 1)) \n",
-    "    data = data[:,:,dx != 0] # Getting rid of empty frames\n",
-    "    # Some sequences may have less than 500 frames in them. To find out how many images there are, we will \n",
-    "    # temporarily change chunkSize to be the same as whatever number of frames the last chunk of data has:\n",
-    "    if data.shape[2] < chunkSize:\n",
-    "        chunkSize_new = data.shape[2]\n",
-    "        print(\"Number of images are less than chunkSize. chunkSize is temporarily changed to {}.\"\n",
-    "              .format(chunkSize_new))\n",
-    "        images = images + chunkSize_new\n",
-    "        counter2 += 1 \n",
-    "    else:\n",
-    "        images = counter1*chunkSize + counter2*chunkSize_new\n",
-    "        counter1 += 1\n",
-    "\n",
-    "    noiseCal.fill(data) # Filling the histogram calculator with data\n",
-    "    chunkSize = 100 # resetting the chunkSize to its default value for the next sequence or data-chunk\n",
-    "\n",
-    "print('A total number of {} images are processed.'.format(images))"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-12-06T10:55:21.238009Z",
-     "start_time": "2018-12-06T10:54:54.586435Z"
-    }
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
-    "offsetMap = noiseCal.getOffset() # Producing offset map\n",
-    "noiseMap = noiseCal.get() # Producing noise map\n",
-    "noiseCal.reset() # Resetting noise calculator\n",
-    "print(\"Initial maps are created.\")"
+    "step_timer.start()\n",
+    "data = data_dc.select_trains(np.s_[:n_trains])[pixel_data].ndarray()\n",
+    "data = data.astype(np.float32)\n",
+    "\n",
+    "noiseMap = np.std(data, axis=0, dtype=np.float64)\n",
+    "offsetMap = np.mean(data, axis=0)\n",
+    "step_timer.done_step(f'Initial maps are created from {n_trains} trains.')"
    ]
   },
   {
@@ -373,7 +264,7 @@
    "outputs": [],
    "source": [
     "# pnCCD valid gains are 1, 1/4, 1/16, 1/64, 1/256, 1/340 and 1/512:\n",
-    "gain_k = [k for k, v in VALID_GAINS.items() if v == gain][0]\n",
+    "gain_k = [k for k, v in pnccdlib.VALID_GAINS.items() if v == gain][0]\n",
     "if gain_k == 'a' or gain_k == 'b':\n",
     "    xrange = (0, 200) # x-axis range for the noise histogram plots\n",
     "    bins = 2000 # number of bins for Histogram Calculators \n",
@@ -381,7 +272,7 @@
     "else:\n",
     "    xrange = (0, 20)\n",
     "    bins = 100\n",
-    "    bin_range = [-50, 50] "
+    "    bin_range = [-50, 50]"
    ]
   },
   {
@@ -396,17 +287,11 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-12-06T10:56:20.686534Z",
-     "start_time": "2018-12-06T10:56:11.721829Z"
-    },
-    "scrolled": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "#************** HISTOGRAMS *******************#\n",
-    "\n",
+    "step_timer.start()\n",
     "fig = plt.figure(figsize=(12,4))\n",
     "ax = fig.add_subplot(121)\n",
     "xana.histPlot(ax, offsetMap.flatten(), bins=2000, plot_errors=False)\n",
@@ -425,15 +310,16 @@
     "\n",
     "#************** HEAT MAPS *******************#\n",
     "\n",
-    "fig = xana.heatmapPlot(offsetMap[:,:,0], x_label='Column Number', y_label='Row Number',  aspect=1,\n",
+    "fig = xana.heatmapPlot(offsetMap, x_label='Column Number', y_label='Row Number', aspect=1,\n",
     "                       x_range=(0, pixels_y), y_range=(0, pixels_x), vmin=6000, vmax=15000, \n",
     "                       lut_label='Offset (ADU)', panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)',\n",
     "                       title = 'Offset Map')\n",
     "\n",
-    "fig = xana.heatmapPlot(noiseMap[:,:,0], x_label='Column Number', y_label='Row Number', aspect=1,\n",
+    "fig = xana.heatmapPlot(noiseMap, x_label='Column Number', y_label='Row Number', aspect=1,\n",
     "                       lut_label='Uncorrected Noise (ADU)', x_range=(0, pixels_y),\n",
     "                       y_range=(0, pixels_x), vmax=2*np.mean(noiseMap), panel_x_label='Row Stat (ADU)', \n",
-    "                       panel_y_label='Column Stat (ADU)', title = 'Uncorrected NoiseMap')"
+    "                       panel_y_label='Column Stat (ADU)', title = 'Uncorrected NoiseMap')\n",
+    "step_timer.done_step(f'Offset and Noise Maps before Common Mode Correction.')"
    ]
   },
   {
@@ -469,7 +355,7 @@
     "bad_pixels[:,0] = EDGE_PIXELS \n",
     "bad_pixels[:,1023] = EDGE_PIXELS  \n",
     "\n",
-    "fig = xana.heatmapPlot(np.log2(bad_pixels[:, :, 0]),\n",
+    "fig = xana.heatmapPlot(np.log2(bad_pixels),\n",
     "                       x_label='Columns', y_label='Rows', lut_label='Bad Pixel Value (ADU)',\n",
     "                       x_range=(0, pixels_y), y_range=(0, pixels_x), vmin=0, vmax=32,\n",
     "                       panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)', \n",
@@ -482,11 +368,6 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "# Offset Correction:\n",
-    "\n",
-    "offsetCorrection = xcal.OffsetCorrection(sensorSize, offsetMap, nCells = memoryCells, cores=cpuCores, gains=None,\n",
-    "                                         runParallel=run_parallel, blockSize=blockSize)\n",
-    "\n",
     "# Common Mode Correction:\n",
     "# In this method, the median of all (assuming stride = 1) pixels that are read out at the same time along a column \n",
     "# is subtracted from the signal in each pixel in that column. Here, the signals in the pixels refer to the value in \n",
@@ -495,12 +376,15 @@
     "# in a column per quadrant is smaller than the value set for minFrac parameter for a particular column, that column \n",
     "# will be ignored for calculation of common mode values and that column is not corrected for common mode.\n",
     "# minFrac = 0 means no column is ignored except those containing nan values (bad pixels):\n",
-    "cmCorrection = xcal.CommonModeCorrection(sensorSize,\n",
-    "                                         commonModeBlockSize,\n",
-    "                                         commonModeAxis, parallel=run_parallel, dType=np.float32, stride=1,\n",
-    "                                         noiseMap=noiseMap.astype(np.float32), minFrac=0)\n",
-    "\n",
-    "cmCorrection.debug()"
+    "cmCorrection = xcal.CommonModeCorrection(\n",
+    "    sensorSize,\n",
+    "    commonModeBlockSize,\n",
+    "    commonModeAxis,\n",
+    "    parallel=False,\n",
+    "    dType=np.float32, stride=1,\n",
+    "    noiseMap=noiseMap[..., np.newaxis],  # np.float32\n",
+    "    minFrac=0,\n",
+    ")"
    ]
   },
   {
@@ -514,11 +398,11 @@
     "# negative bin to ensure the offset and common mode corrections actually move the signal to zero:\n",
     "\n",
     "# For offset corrected data:\n",
-    "histCalCorrected = xcal.HistogramCalculator(sensorSize, bins=bins, range=bin_range, memoryCells=memoryCells,\n",
-    "                                            cores=cpuCores, gains=None, blockSize=blockSize)\n",
+    "histCalCorrected = xcal.HistogramCalculator(sensorSize, bins=bins, range=bin_range, memoryCells=1,\n",
+    "                                            parallel=False, gains=None, blockSize=blockSize)\n",
     "# For common mode corrected data:\n",
-    "histCalCMCorrected = xcal.HistogramCalculator(sensorSize, bins=bins, range=bin_range, memoryCells=memoryCells,\n",
-    "                                              cores=cpuCores, gains=None, blockSize=blockSize)"
+    "histCalCMCorrected = xcal.HistogramCalculator(sensorSize, bins=bins, range=bin_range, memoryCells=1,\n",
+    "                                              parallel=False, gains=None, blockSize=blockSize)"
    ]
   },
   {
@@ -533,41 +417,33 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "tags": []
+   },
    "outputs": [],
    "source": [
-    "counter1 = 1 # To count how many \"if data.shape[2] >= chunkSize\" instances are there.\n",
-    "counter2 = 0 # To count how many \"if data.shape[2] < chunkSize\" instances are there.\n",
-    "chunkSize_new = 0 # See below\n",
-    "\n",
-    "for data in reader.readChunks():\n",
-    "    data = data.astype(np.float32)\n",
-    "    dx = np.count_nonzero(data, axis=(0, 1))\n",
-    "    data = data[:,:,dx != 0]\n",
-    "    \n",
-    "    # Some sequences may have less than 500 frames in them. To find out how many images there are, we will \n",
-    "    # temporarily change chunkSize to be the same as whatever number of frames the last chunk of data has:\n",
-    "    if data.shape[2] < chunkSize:\n",
-    "        chunkSize_new = data.shape[2]\n",
-    "        print(\"Number of images are less than chunkSize. chunkSize is temporarily changed to {}.\"\n",
-    "              .format(chunkSize_new))\n",
-    "        images = images + chunkSize_new\n",
-    "        counter2 += 1 \n",
-    "    else:\n",
-    "        images = counter1*chunkSize + counter2*chunkSize_new\n",
-    "        counter1 += 1\n",
-    "        \n",
-    "    data -= offsetMap.data # Offset correction\n",
-    "    offset_corr_data = copy.copy(data) # I am copying this so that I can have access to it in the table below\n",
-    "    histCalCorrected.fill(data)\n",
-    "    cellTable=np.zeros(data.shape[2], np.int32) # Common mode correction\n",
-    "    data = cmCorrection.correct(data.astype(np.float32), cellTable=cellTable) \n",
-    "    histCalCMCorrected.fill(data)\n",
-    "    noiseCal.fill(data)  # Filling calculators with data\n",
-    "    chunkSize = 100 # resetting the chunkSize to its default value for the next sequence or data-chunk\n",
-    "\n",
-    "print('A total number of {} images are processed.'.format(images))\n",
-    "print(\"Offset and common mode corrections are applied.\")"
+    "step_timer.start()\n",
+    "def correct_image(wid, idx, d):\n",
+    "    d -= offsetMap\n",
+    "    offset_corr_data[idx, ...] = d\n",
+    "\n",
+    "    d = np.squeeze(cmCorrection.correct(\n",
+    "        d[..., np.newaxis],\n",
+    "        cellTable=np.zeros((1,), np.int32)\n",
+    "    ))\n",
+    "    corr_data[idx, ...] = d\n",
+    "\n",
+    "context = psh.context.ThreadContext(num_workers=10)\n",
+    "\n",
+    "data_shape = (n_trains, sensorSize[0], sensorSize[1])\n",
+    "\n",
+    "corr_data = context.alloc(shape=data_shape, dtype=np.float32)\n",
+    "offset_corr_data = context.alloc(shape=data_shape, dtype=np.float32)\n",
+    "\n",
+    "context.map(correct_image, data.copy())\n",
+    "histCalCorrected.fill(offset_corr_data)\n",
+    "histCalCMCorrected.fill(corr_data)\n",
+    "step_timer.done_step(f'Offset and common mode corrections are applied.')"
    ]
   },
   {
@@ -576,7 +452,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "noiseMapCM = noiseCal.get() # Producing common mode corrected noise map\n",
+    "noiseMapCM = np.std(corr_data, axis=0, dtype=np.float64) # Producing common mode corrected noise map\n",
     "ho, eo, co, so = histCalCorrected.get()\n",
     "hCM, eCM, cCM, sCM = histCalCMCorrected.get()"
    ]
@@ -588,10 +464,10 @@
    "outputs": [],
    "source": [
     "# We are copying these so that we can replot them later after the calculators are reset:\n",
-    "ho_second_trial = copy.copy(ho)\n",
-    "co_second_trial = copy.copy(co)\n",
-    "hCM_second_trial = copy.copy(hCM)\n",
-    "cCM_second_trial = copy.copy(cCM)"
+    "ho_second_trial = ho.copy()\n",
+    "co_second_trial = co.copy()\n",
+    "hCM_second_trial = hCM.copy()\n",
+    "cCM_second_trial = cCM.copy()"
    ]
   },
   {
@@ -609,6 +485,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
+    "step_timer.start()\n",
     "do = [{'x': co,\n",
     "     'y': ho,\n",
     "     'drawstyle': 'steps-post',\n",
@@ -630,13 +507,14 @@
     "t0.title = \"Comparison of the First Round of Corrections - Bad Pixels Not Excluded\"\n",
     "t0.field_names = [\"Dark Pedestal After Offset Correction\", \"Dark Pedestal After Offset and Common Mode Corrections\"]\n",
     "t0.add_row([\"Mean: {:0.3f} ADU\".format(np.mean(offset_corr_data)), \"Mean: {:0.3f} ADU\"\n",
-    "            .format(np.mean(data))])\n",
+    "            .format(np.mean(corr_data))])\n",
     "t0.add_row([\"Median: {:0.3f} ADU\".format(np.median(offset_corr_data)), \"Median: {:0.3f} ADU\"\n",
-    "            .format(np.median(data))])\n",
+    "            .format(np.median(corr_data))])\n",
     "t0.add_row([\"Standard Deviation: {:0.3f} ADU\".format(np.std(offset_corr_data)), \n",
     "            \"Standard Deviation: {:0.3f} ADU\"\n",
-    "            .format(np.std(data))])\n",
-    "print(t0,'\\n')"
+    "            .format(np.std(corr_data))])\n",
+    "print(t0,'\\n')\n",
+    "step_timer.done_step(\"Plotting time\")"
    ]
   },
   {
@@ -655,6 +533,7 @@
    "outputs": [],
    "source": [
     "#***** HISTOGRAM OF CORRECTED NOISE MAP *******#\n",
+    "step_timer.start()\n",
     "fig = plt.figure(figsize=(12,4))\n",
     "ax = fig.add_subplot(122)\n",
     "xana.histPlot(ax, noiseMapCM.flatten(), bins=2000, plot_errors=False)\n",
@@ -663,13 +542,14 @@
     "t = ax.set_title(\"Histogram of the Noise Map After Offset and \\n Common Mode Corrections\")\n",
     "t = ax.set_xlim(xrange)\n",
     "\n",
-    "fig = xana.heatmapPlot(noiseMapCM[:, :, 0],\n",
+    "fig = xana.heatmapPlot(noiseMapCM,\n",
     "                       x_label='Columns', y_label='Rows',\n",
     "                       lut_label='Corrected Noise (ADU)',\n",
     "                       x_range=(0, pixels_y),\n",
     "                       y_range=(0, pixels_x), vmax=2*np.mean(noiseMap), panel_x_label='Row Stat (ADU)', \n",
     "                       panel_y_label='Column Stat (ADU)', \n",
-    "                       title='Noise Map After Offset and Common Mode Corrections')"
+    "                       title='Noise Map After Offset and Common Mode Corrections')\n",
+    "step_timer.done_step(\"Plotting time\")"
    ]
   },
   {
@@ -679,7 +559,6 @@
    "outputs": [],
    "source": [
     "# Resetting the calculators:\n",
-    "noiseCal.reset() # resetting noise calculator\n",
     "histCalCorrected.reset() # resetting histogram calculator\n",
     "histCalCMCorrected.reset() # resetting histogram calculator"
    ]
@@ -699,15 +578,25 @@
    "metadata": {},
    "outputs": [],
    "source": [
+    "step_timer.start()\n",
     "mnoffset = np.nanmedian(offsetMap)\n",
-    "stdoffset = np.nanstd(offsetMap)\n",
+    "stdoffset = np.nanstd(offsetMap, dtype=np.float64)\n",
     "bad_pixels[(offsetMap < mnoffset-bad_pixel_offset_sigma*stdoffset) |\n",
     "           (offsetMap > mnoffset+bad_pixel_offset_sigma*stdoffset)] = BadPixels.OFFSET_OUT_OF_THRESHOLD.value\n",
     "\n",
     "mnnoise = np.nanmedian(noiseMapCM)\n",
-    "stdnoise = np.nanstd(noiseMapCM)\n",
+    "stdnoise = np.nanstd(noiseMapCM, dtype=np.float64)\n",
     "bad_pixels[(noiseMapCM < mnnoise-bad_pixel_noise_sigma*stdnoise) |\n",
-    "           (noiseMapCM > mnnoise+bad_pixel_noise_sigma*stdnoise)] = BadPixels.NOISE_OUT_OF_THRESHOLD.value"
+    "           (noiseMapCM > mnnoise+bad_pixel_noise_sigma*stdnoise)] = BadPixels.NOISE_OUT_OF_THRESHOLD.value\n",
+    "\n",
+    "# Each pnCCD sensor has 22 rows and 60 columns cut in the middle of it out by a laser:\n",
+    "\n",
+    "hole_mask = np.zeros(bad_pixels.shape, np.uint32) \n",
+    "hole_mask[489:533, 481:541] = BadPixels.NON_SENSITIVE.value\n",
+    "\n",
+    "# Assigning this masked area as bad pixels:\n",
+    "bad_pixels = np.bitwise_or(bad_pixels, hole_mask)\n",
+    "step_timer.done_step(\"Calculating BadPixelMap.\")"
    ]
   },
   {
@@ -720,25 +609,17 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "scrolled": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
-    "# Each pnCCD sensor has 22 rows and 60 columns cut in the middle of it out by a laser:\n",
-    "\n",
-    "hole_mask = np.zeros(bad_pixels.shape, np.uint32) \n",
-    "hole_mask[489:533,481:541,:] = BadPixels.NON_SENSITIVE.value\n",
-    "\n",
-    "# Assigning this masked area as bad pixels:\n",
-    "bad_pixels = np.bitwise_or(bad_pixels, hole_mask)\n",
-    "\n",
-    "fig = xana.heatmapPlot(np.log2(bad_pixels[:, :, 0]),\n",
+    "step_timer.start()\n",
+    "fig = xana.heatmapPlot(np.log2(bad_pixels),\n",
     "                       x_label='Columns', y_label='Rows',\n",
     "                       lut_label='Bad Pixel Value (ADU)',\n",
     "                       x_range=(0, pixels_y),\n",
     "                       y_range=(0, pixels_x), vmin=0, vmax=32, panel_x_label='Row Stat (ADU)', \n",
-    "                       panel_y_label='Column Stat (ADU)', title = 'Second Bad Pixels Map with the Hole in Center')"
+    "                       panel_y_label='Column Stat (ADU)', title = 'Second Bad Pixels Map with the Hole in Center')\n",
+    "step_timer.done_step(\"Plotting time\")"
    ]
   },
   {
@@ -756,8 +637,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "event_threshold = sigmaNoise*np.median(noiseMapCM) # for exclusion of possible cosmic ray events\n",
-    "noiseCal.setBadPixelMask(bad_pixels != 0) # setting bad pixels map for the noise calculator"
+    "event_threshold = sigmaNoise * np.median(noiseMapCM)  # for exclusion of possible cosmic ray events"
    ]
   },
   {
@@ -766,11 +646,14 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "cmCorrection = xcal.CommonModeCorrection(sensorSize,\n",
-    "                                         commonModeBlockSize,\n",
-    "                                         commonModeAxis, parallel=run_parallel, dType=np.float32, stride=1,\n",
-    "                                         noiseMap=noiseMapCM.astype(np.float32), minFrac=0)\n",
-    "cmCorrection.debug()"
+    "cmCorrection = xcal.CommonModeCorrection(\n",
+    "    sensorSize,\n",
+    "    commonModeBlockSize,\n",
+    "    commonModeAxis,\n",
+    "    parallel=False,\n",
+    "    dType=np.float32, stride=1,\n",
+    "    noiseMap=noiseMapCM[..., np.newaxis], minFrac=0,\n",
+    ")"
    ]
   },
   {
@@ -779,46 +662,33 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "counter1 = 1 # To count how many \"if data.shape[2] >= chunkSize\" instances are there.\n",
-    "counter2 = 0 # To count how many \"if data.shape[2] < chunkSize\" instances are there.\n",
-    "chunkSize_new = 0 # See below\n",
-    "\n",
-    "for data in reader.readChunks():\n",
-    "    data = data.astype(np.float32)\n",
-    "    dx = np.count_nonzero(data, axis=(0, 1))\n",
-    "    data = data[:,:,dx != 0]\n",
-    "    data_mask = np.repeat(bad_pixels, data.shape[2], axis=2) # Converting bad_pixels to the same shape as the data\n",
-    "    data[data_mask != 0] = np.nan # masking data for bad pixels and equating the values to np.nan\n",
-    "    \n",
-    "    # Some sequences may have less than 500 frames in them. To find out how many images there are, we will \n",
-    "    # temporarily change chunkSize to be the same as whatever number of frames the last chunk of data has:\n",
-    "    if data.shape[2] < chunkSize:\n",
-    "        chunkSize_new = data.shape[2]\n",
-    "        print(\"Number of images are less than chunkSize. chunkSize is temporarily changed to {}.\"\n",
-    "              .format(chunkSize_new))\n",
-    "        images = images + chunkSize_new\n",
-    "        counter2 += 1 \n",
-    "    else:\n",
-    "        images = counter1*chunkSize + counter2*chunkSize_new\n",
-    "        counter1 += 1\n",
-    "    \n",
-    "    data_copy = offsetCorrection.correct(copy.copy(data))\n",
-    "    cellTable=np.zeros(data_copy.shape[2], np.int32)\n",
-    "    data_copy = cmCorrection.correct(data_copy.astype(np.float32), cellTable=cellTable)\n",
-    "    data[data_copy > event_threshold] = np.nan # discarding events caused by cosmic rays\n",
-    "    #data = np.ma.MaskedArray(data, np.isnan(data), fill_value=np.nan) # masking cosmics,default fill_value = 1e+20\n",
+    "step_timer.start()\n",
+    "def correct_image(wid, idx, d):\n",
     "    \n",
-    "    data -= offsetMap.data # Offset correction\n",
-    "    offset_corr_data2 = copy.copy(data) # I am copying this so that I can have access to it in the table below\n",
-    "    histCalCorrected.fill(data)\n",
-    "    cellTable=np.zeros(data.shape[2], np.int32) # Common mode correction\n",
-    "    data = cmCorrection.correct(data.astype(np.float32), cellTable=cellTable) \n",
-    "    histCalCMCorrected.fill(data)\n",
-    "    noiseCal.fill(data)  # Filling calculators with data\n",
-    "    chunkSize = 100 # resetting the chunkSize to its default value for the next sequence or data-chunk\n",
-    "\n",
-    "print(\"Final iteration is Performed.\")\n",
-    "print('A total number of {} images are processed.'.format(images))"
+    "    d[bad_pixels != 0] = np.nan # masking data for bad pixels and equating the values to np.nan\n",
+    "    d_off = d - offsetMap\n",
+    "    d_off_cm = np.squeeze(cmCorrection.correct(\n",
+    "        d_off[..., np.newaxis], cellTable=np.zeros((1,), dtype=np.float32)))\n",
+    "\n",
+    "    # discarding events caused by cosmic rays\n",
+    "    d_off[d_off_cm > event_threshold] = np.nan\n",
+    "    d_off_cm[d_off_cm > event_threshold] = np.nan  \n",
+    "\n",
+    "    offset_corr_data[idx, ...] = d_off  # Will be used for plotting\n",
+    "    corr_data[idx, ...] = np.squeeze(d_off_cm)\n",
+    "\n",
+    "context = psh.context.ThreadContext(num_workers=10)\n",
+    "\n",
+    "corr_data = context.alloc(shape=data_shape, dtype=np.float32)\n",
+    "offset_corr_data = context.alloc(shape=data_shape, dtype=np.float32)\n",
+    "\n",
+    "context.map(correct_image, data.copy())\n",
+    "\n",
+    "histCalCorrected.fill(offset_corr_data)\n",
+    "histCalCMCorrected.fill(corr_data)\n",
+    "\n",
+    "print(f\"A total number of {n_trains} images are processed.\")\n",
+    "step_timer.done_step(\"Final iteration is Performed.\")"
    ]
   },
   {
@@ -827,26 +697,18 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "noiseMapCM_2nd = noiseCal.get().filled(np.nan) # the masked pixels are filled with nans\n",
+    "noiseMapCM_2nd = np.nanstd(corr_data, axis=0, dtype=np.float64)\n",
     "ho2, eo2, co2, so2 = histCalCorrected.get()\n",
     "hCM2, eCM2, cCM2, sCM2 = histCalCMCorrected.get()"
    ]
   },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Plots of the Final Results\n",
-    "\n",
-    "The following plot and table compare the offset and common mode corrected signal with and without the bad pixels."
-   ]
-  },
   {
    "cell_type": "code",
    "execution_count": null,
    "metadata": {},
    "outputs": [],
    "source": [
+    "step_timer.start()\n",
     "do_Final = [{'x': co_second_trial,\n",
     "     'y': ho_second_trial,\n",
     "     'drawstyle': 'steps-post',\n",
@@ -858,7 +720,7 @@
     "     'drawstyle': 'steps-post',\n",
     "     'color': 'red',\n",
     "     'ecolor': 'crimson',\n",
-    "     'label': 'Offset and Common Mode Corrected Signal (BP Incl.)' \n",
+    "     'label': 'Offset and Common Mode Corrected Signal (BP Incl.)'\n",
     "     },\n",
     "    {'x': co2,\n",
     "     'y': ho2,\n",
@@ -873,21 +735,22 @@
     "     'label': 'Offset and Common Mode Corrected Signal (BP Excl.)'\n",
     "     }]\n",
     "\n",
-    "fig = xana.simplePlot(do_Final, figsize='2col', aspect=1, x_label = 'ADU', \n",
-    "                      y_label=\"Counts (logarithmic scale)\", y_log=True, x_range = bin_range, \n",
+    "fig = xana.simplePlot(do_Final, figsize='2col', aspect=1, x_label = 'ADU',\n",
+    "                      y_label=\"Counts (logarithmic scale)\", y_log=True, x_range = bin_range,\n",
     "                      y_range = (0.02, 1e8),\n",
     "                      legend='top-right-frame-1col', title = 'Comparison of Corrections')\n",
     "\n",
     "t0 = PrettyTable()\n",
     "t0.title = \"Comparison of the Second Round of Corrections - Bad Pixels Excluded\"\n",
     "t0.field_names = [\"Dark Pedestal After Offset Correction\", \"Dark Pedestal After Offset and Common Mode Corrections\"]\n",
-    "t0.add_row([\"Mean: {:0.3f} ADU\".format(np.nanmean(offset_corr_data2)), \"Mean: {:0.3f} ADU\"\n",
-    "            .format(np.nanmean(data))])\n",
-    "t0.add_row([\"Median: {:0.3f} ADU\".format(np.nanmedian(offset_corr_data2)), \"Median: {:0.3f} ADU\"\n",
-    "            .format(np.nanmedian(data))])\n",
-    "t0.add_row([\"Standard Deviation: {:0.3f} ADU\".format(np.nanstd(offset_corr_data2)), \n",
-    "            \"Standard Deviation: {:0.3f} ADU\".format(np.nanstd(data))])\n",
-    "print(t0,'\\n')"
+    "t0.add_row([\"Mean: {:0.3f} ADU\".format(np.nanmean(offset_corr_data)), \"Mean: {:0.3f} ADU\"\n",
+    "            .format(np.nanmean(corr_data))])\n",
+    "t0.add_row([\"Median: {:0.3f} ADU\".format(np.nanmedian(offset_corr_data)), \"Median: {:0.3f} ADU\"\n",
+    "            .format(np.nanmedian(corr_data))])\n",
+    "t0.add_row([\"Standard Deviation: {:0.3f} ADU\".format(np.nanstd(offset_corr_data)),\n",
+    "            \"Standard Deviation: {:0.3f} ADU\".format(np.nanstd(corr_data))])\n",
+    "print(t0,'\\n')\n",
+    "step_timer.done_step(\"Plotting time\")"
    ]
   },
   {
@@ -906,7 +769,8 @@
    "outputs": [],
    "source": [
     "#***** HISTOGRAMS OF NOISE MAPS *******#\n",
-    "fig = plt.figure(figsize=(12,4))\n",
+    "step_timer.start()\n",
+    "fig = plt.figure(figsize=(12, 4))\n",
     "ax = fig.add_subplot(121)\n",
     "xana.histPlot(ax, noiseMapCM_2nd.flatten(), bins=2000, plot_errors=False)\n",
     "t = ax.set_xlabel(\"ADU per 2000 bins\")\n",
@@ -914,11 +778,12 @@
     "t = ax.set_title(\"Histogram of the Noise Map After Offset and Common Mode \\n Corrections and exclusion of Bad Pixels\")\n",
     "t = ax.set_xlim(xrange)\n",
     "\n",
-    "fig = xana.heatmapPlot(noiseMapCM_2nd[:,:,0], aspect=1, x_label='Column Number', y_label='Row Number',\n",
+    "fig = xana.heatmapPlot(noiseMapCM_2nd, aspect=1, x_label='Column Number', y_label='Row Number',\n",
     "                       lut_label='Final Corrected Noise (ADU)', x_range=(0, pixels_y), y_range=(0, pixels_x),\n",
     "                       vmax=2*np.mean(noiseMap),\n",
     "                       title = 'Final Offset and Common Mode Corrected Noise Map \\n (Bad Pixels Excluded)', \n",
-    "                       panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)')"
+    "                       panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)')\n",
+    "step_timer.done_step(\"Plotting time\")"
    ]
   },
   {
@@ -933,11 +798,10 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "scrolled": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
+    "step_timer.start()\n",
     "mnoffset = np.nanmedian(offsetMap)\n",
     "stdoffset = np.nanstd(offsetMap)\n",
     "bad_pixels[(offsetMap < mnoffset-bad_pixel_offset_sigma*stdoffset) |\n",
@@ -950,12 +814,13 @@
     "\n",
     "bad_pixels = np.bitwise_or(bad_pixels, hole_mask)\n",
     "\n",
-    "fig = xana.heatmapPlot(np.log2(bad_pixels[:, :, 0]),\n",
+    "fig = xana.heatmapPlot(np.log2(bad_pixels),\n",
     "                       x_label='Columns', y_label='Rows',\n",
     "                       lut_label='Bad Pixel Value (ADU)',\n",
     "                       x_range=(0, pixels_y),\n",
     "                       y_range=(0, pixels_x), vmin=0, vmax=32, panel_x_label='Row Stat (ADU)', \n",
-    "                       panel_y_label='Column Stat (ADU)', title = 'Final Bad Pixels Map')"
+    "                       panel_y_label='Column Stat (ADU)', title = 'Final Bad Pixels Map')\n",
+    "step_timer.done_step(\"Plotting time\")"
    ]
   },
   {
@@ -983,47 +848,39 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-12-06T10:56:22.741284Z",
-     "start_time": "2018-12-06T10:56:20.688393Z"
-    }
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
+    "step_timer.start()\n",
     "constant_maps = {\n",
-    "    'Offset': offsetMap,\n",
-    "    'Noise': noiseMapCM_2nd,\n",
-    "    'BadPixelsDark': bad_pixels\n",
+    "    'Offset': offsetMap[..., np.newaxis],\n",
+    "    'Noise': noiseMapCM_2nd[..., np.newaxis],\n",
+    "    'BadPixelsDark': bad_pixels[..., np.newaxis],\n",
     "}\n",
     "md = None\n",
+    "\n",
+    "det = Constants.CCD(DetectorTypes.pnCCD)\n",
+    "\n",
+    "# set the operating condition\n",
+    "condition = Conditions.Dark.CCD(bias_voltage=bias_voltage,\n",
+    "                                integration_time=integration_time,\n",
+    "                                gain_setting=gain,\n",
+    "                                temperature=fix_temperature_top,\n",
+    "                                pixels_x=pixels_x,\n",
+    "                                pixels_y=pixels_y)\n",
+    "for parm in condition.parameters:\n",
+    "    if parm.name == \"Sensor Temperature\":\n",
+    "        parm.lower_deviation = temp_limits\n",
+    "        parm.upper_deviation = temp_limits\n",
+    "\n",
+    "db_module = get_pdu_from_db(karabo_id, karabo_da, getattr(det, 'Offset')(),\n",
+    "                            condition, cal_db_interface,\n",
+    "                            snapshot_at=creation_time)[0]\n",
+    "\n",
     "for const_name in constant_maps.keys():\n",
-    "    det = Constants.CCD(DetectorTypes.pnCCD)\n",
     "    const = getattr(det, const_name)()\n",
     "    const.data = constant_maps[const_name].data\n",
     "\n",
-    "    # set the operating condition\n",
-    "    condition = Conditions.Dark.CCD(bias_voltage=bias_voltage,\n",
-    "                                    integration_time=integration_time,\n",
-    "                                    gain_setting=gain,\n",
-    "                                    temperature=fix_temperature_top,\n",
-    "                                    pixels_x=pixels_x,\n",
-    "                                    pixels_y=pixels_y)\n",
-    "\n",
-    "    for parm in condition.parameters:\n",
-    "        if parm.name == \"Sensor Temperature\":\n",
-    "            parm.lower_deviation = temp_limits\n",
-    "            parm.upper_deviation = temp_limits\n",
-    "\n",
-    "    # This should be used in case of running notebook \n",
-    "    # by a different method other than myMDC which already\n",
-    "    # sends CalCat info.\n",
-    "    # TODO: Set db_module to \"\" by default in the first cell\n",
-    "    if not db_module:\n",
-    "        db_module = get_pdu_from_db(karabo_id, karabo_da, const,\n",
-    "                                    condition, cal_db_interface,\n",
-    "                                    snapshot_at=creation_time)[0]\n",
-    "\n",
     "    if db_output:\n",
     "        md = send_to_db(db_module, karabo_id, const, condition,\n",
     "                        file_loc=file_loc, report_path=report,\n",
@@ -1039,7 +896,8 @@
     "print(\"Constants parameter conditions are:\\n\")\n",
     "print(f\"• bias_voltage: {bias_voltage}\\n• gain_setting: {gain}\\n\"\n",
     "      f\"• top_temperature: {fix_temperature_top}\\n• integration_time: {integration_time}\\n\"\n",
-    "      f\"• creation_time: {md.calibration_constant_version.begin_at if md is not None else creation_time}\\n\")"
+    "      f\"• creation_time: {md.calibration_constant_version.begin_at if md is not None else creation_time}\\n\")\n",
+    "step_timer.done_step(\"Storing calibration constants.\")"
    ]
   },
   {
@@ -1047,7 +905,10 @@
    "execution_count": null,
    "metadata": {},
    "outputs": [],
-   "source": []
+   "source": [
+    "print(f\"Total processing time {step_timer.timespan():.01f} s\")\n",
+    "step_timer.print_summary()"
+   ]
   }
  ],
  "metadata": {
@@ -1066,7 +927,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.6.7"
+   "version": "3.8.11"
   },
   "latex_envs": {
    "LaTeX_envs_menu_present": true,
@@ -1087,5 +948,5 @@
   }
  },
  "nbformat": 4,
- "nbformat_minor": 1
+ "nbformat_minor": 4
 }
diff --git a/notebooks/pnCCD/Correct_pnCCD_NBC.ipynb b/notebooks/pnCCD/Correct_pnCCD_NBC.ipynb
index db8da2b619ff4062322f6782a48894ef4d124f4c..54f5d78c1e7968212de1414d5340b37a4d7cd176 100644
--- a/notebooks/pnCCD/Correct_pnCCD_NBC.ipynb
+++ b/notebooks/pnCCD/Correct_pnCCD_NBC.ipynb
@@ -6,7 +6,7 @@
    "source": [
     "# pnCCD Data Correction #\n",
     "\n",
-    "Authors: DET Group, Modified by Kiana Setoodehnia on December 2020 - Version 4.0\n",
+    "Authors: DET Group, Modified by Kiana Setoodehnia - Version 5.0\n",
     "\n",
     "The following notebook provides offset, common mode, relative gain, split events and pattern classification corrections of images acquired with the pnCCD. This notebook *does not* yet correct for charge transfer inefficiency."
    ]
@@ -14,48 +14,33 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-12-06T15:54:23.218849Z",
-     "start_time": "2018-12-06T15:54:23.166497Z"
-    }
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
-    "in_folder = \"/gpfs/exfel/exp/SQS/202031/p900166/raw\" # input folder\n",
-    "out_folder = '/gpfs/exfel/data/scratch/setoodeh/Test' # output folder\n",
-    "run = 347 # which run to read data from\n",
-    "sequences = [-1] # sequences to correct, set to -1 for all, range allowed\n",
+    "in_folder = \"/gpfs/exfel/exp/SQS/202031/p900166/raw\"  # input folder\n",
+    "out_folder = \"/gpfs/exfel/data/scratch/ahmedk/test/remove\"  # output folder\n",
+    "run = 347  # which run to read data from\n",
+    "sequences = [0]  # sequences to correct, set to -1 for all, range allowed\n",
+    "sequences_per_node = 1  # number of sequences running on the same slurm node.\n",
     "\n",
-    "db_module = \"pnCCD_M205_M206\"\n",
     "karabo_da = 'PNCCD01' # data aggregators\n",
-    "karabo_da_control = \"PNCCD02\" # file inset for control data\n",
     "karabo_id = \"SQS_NQS_PNCCD1MP\" # karabo prefix of PNCCD devices\n",
     "receiver_id = \"PNCCD_FMT-0\" # inset for receiver devices\n",
-    "path_template = 'RAW-R{:04d}-{}-S{{:05d}}.h5' # the template to use to access data\n",
-    "path_template_seqs = \"{}/r{:04d}/*PNCCD01-S*.h5\"\n",
-    "h5path = '/INSTRUMENT/{}/CAL/{}:output/data/' # path to data in the HDF5 file \n",
-    "h5path_ctrl = '/CONTROL/{}/CTRL/TCTRL'\n",
-    "\n",
-    "overwrite = True # keep this as True to not overwrite the output \n",
-    "use_dir_creation_date = True # To obtain creation time of the run\n",
-    "number_dark_frames = 0 # number of images to be used, if set to 0 all available images are used\n",
-    "chunk_size_idim = 1 # H5 chunking size of output data\n",
-    "cluster_profile = \"noDB\" # ipcluster profile to use\n",
-    "\n",
-    "cpuCores = 40\n",
+    "path_template = 'RAW-R{:04d}-{}-S{:05d}.h5'  # the template to use to access data\n",
+    "instrument_source_template = '{}/CAL/{}:output'  # template for data source name, will be filled with karabo_id and receiver_id.\n",
+    "\n",
+    "# Parameters affecting data correction.\n",
+    "commonModeAxis = 0 # axis along which common mode will be calculated, 0 = row, and 1 = column\n",
     "commonModeBlockSize = [512, 512] # size of the detector in pixels for common mode calculations\n",
-    "commonModeAxis = 0 # axis along which common mode will be calculated, 0 = row, and 1 = column \n",
     "split_evt_primary_threshold = 4. # primary threshold for split event classification in terms of n sigma noise\n",
     "split_evt_secondary_threshold = 3. # secondary threshold for split event classification in terms of n sigma noise\n",
-    "sequences_per_node = 1\n",
-    "limit_images = 0\n",
-    "seq_num = 0  # sequence number for which the last plot at the end of the notebook is plotted\n",
+    "saturated_threshold = 32000.  # full well capacity in ADU\n",
     "\n",
-    "# pnCCD parameters:\n",
+    "# Conditions for retrieving calibration constants\n",
+    "set_parameters_manually = False  # Boolean for setting parameter values manually instead of reading control values.\n",
     "fix_temperature_top = 0. # fix temperature for top sensor in K, set to 0. to use value from slow data.\n",
     "fix_temperature_bot = 0. # fix temperature for bottom senspr in K, set to 0. to use value from slow data.\n",
-    "gain = 0.1 # the detector's gain setting, It is later read from file and this value is overwritten\n",
+    "gain = -1  # the detector's gain setting. Set to -1 to use the value from the slow data.\n",
     "bias_voltage = 0. # the detector's bias voltage. set to 0. to use value from slow data.\n",
     "integration_time = 70  # detector's integration time\n",
     "photon_energy = 1.6 # Al fluorescence in keV\n",
@@ -63,14 +48,27 @@
     "cal_db_interface = \"tcp://max-exfl016:8015\" # calibration DB interface to use\n",
     "cal_db_timeout = 300000 # timeout on caldb requests\n",
     "creation_time = \"\" # To overwrite the measured creation_time. Required Format: YYYY-MM-DD HR:MN:SC.ms e.g. 2019-07-04 11:02:41.00\n",
+    "use_dir_creation_date = True  # To obtain creation time of the run\n",
     "\n",
+    "# Booleans for selecting corrections to apply.\n",
     "only_offset = False # Only, apply offset.\n",
     "common_mode = True # Apply common mode correction\n",
     "relgain = True # Apply relative gain correction\n",
-    "cti = False # Apply charge transfer inefficiency correction (not implemented, yet)\n",
-    "do_pattern_classification = True # classify split events\n",
+    "pattern_classification = True  # classify split events\n",
+    "\n",
+    "# parameters affecting stored output data.\n",
+    "chunk_size_idim = 1  # H5 chunking size of output data\n",
+    "overwrite = True  # keep this as True to not overwrite the output \n",
+    "# ONLY FOR TESTING\n",
+    "limit_images = 0  # this parameter is used for limiting number of images to correct from a sequence file. ONLY FOR TESTING.\n",
+    "\n",
+    "# TODO: REMOVE\n",
+    "db_module = \"\"\n",
     "\n",
-    "saturated_threshold = 32000. # full well capacity in ADU"
+    "\n",
+    "def balance_sequences(in_folder, run, sequences, sequences_per_node, karabo_da):\n",
+    "    from xfel_calibrate.calibrate import balance_sequences as bs\n",
+    "    return bs(in_folder, run, sequences, sequences_per_node, karabo_da)"
    ]
   },
   {
@@ -87,66 +85,46 @@
     "# Dont apply any corrections if only_offset is requested.\n",
     "if not only_offset:\n",
     "    corr_bools[\"relgain\"] = relgain\n",
-    "    corr_bools[\"cti\"] = cti\n",
     "    corr_bools[\"common_mode\"] = common_mode\n",
-    "    corr_bools[\"pattern_class\"] = do_pattern_classification"
+    "    corr_bools[\"pattern_class\"] = pattern_classification"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-12-06T15:54:23.455376Z",
-     "start_time": "2018-12-06T15:54:23.413579Z"
-    }
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
-    "import copy\n",
     "import datetime\n",
-    "import glob\n",
     "import os\n",
-    "import time\n",
-    "import traceback\n",
     "import warnings\n",
-    "from datetime import timedelta\n",
+    "from pathlib import Path\n",
     "from typing import Tuple\n",
-    "\n",
     "warnings.filterwarnings('ignore')\n",
     "\n",
     "import h5py\n",
     "import matplotlib.pyplot as plt\n",
-    "from iminuit import Minuit\n",
+    "import numpy as np\n",
+    "import pasha as psh\n",
     "from IPython.display import Markdown, display\n",
+    "from extra_data import H5File, RunDirectory\n",
+    "from prettytable import PrettyTable\n",
     "\n",
     "%matplotlib inline\n",
-    "import numpy as np\n",
-    "import XFELDetAna.xfelprofiler as xprof\n",
-    "from cal_tools.pnccdlib import VALID_GAINS, extract_slow_data\n",
+    "\n",
+    "from XFELDetAna import xfelpyanatools as xana\n",
+    "from XFELDetAna import xfelpycaltools as xcal\n",
+    "from cal_tools import pnccdlib\n",
     "from cal_tools.tools import (\n",
     "    get_constant_from_db_and_time,\n",
     "    get_dir_creation_date,\n",
     "    get_random_db_interface,\n",
+    "    map_modules_from_folder,\n",
     ")\n",
-    "from iCalibrationDB import Conditions, ConstantMetaData, Constants, Detectors, Versions\n",
-    "from iCalibrationDB.detectors import DetectorTypes\n",
-    "from prettytable import PrettyTable\n",
-    "\n",
-    "profiler = xprof.Profiler()\n",
-    "profiler.disable()\n",
-    "from XFELDetAna.util import env\n",
-    "\n",
-    "env.iprofile = cluster_profile\n",
-    "from XFELDetAna import xfelpyanatools as xana\n",
-    "from XFELDetAna import xfelpycaltools as xcal\n",
-    "from XFELDetAna.plotting.util import prettyPlotting\n",
-    "\n",
-    "prettyPlotting=True\n",
-    "\n",
-    "\n",
-    "if sequences[0] == -1:\n",
-    "    sequences = None"
+    "from cal_tools.step_timing import StepTimer\n",
+    "from cal_tools import h5_copy_except\n",
+    "from iCalibrationDB import Conditions, ConstantMetaData, Constants\n",
+    "from iCalibrationDB.detectors import DetectorTypes"
    ]
   },
   {
@@ -156,23 +134,21 @@
    "outputs": [],
    "source": [
     "# Calibration Database Settings, and Some Initial Run Parameters & Paths:\n",
-    "\n",
     "display(Markdown('### Initial Settings and Paths'))\n",
-    "pixels_x = 1024 # rows of pnCCD in pixels \n",
-    "pixels_y = 1024 # columns of pnCCD in pixels\n",
+    "\n",
+    "# Sensor size and block size definitions (important for common mode and other corrections):\n",
+    "pixels_x = 1024  # rows of pnCCD in pixels \n",
+    "pixels_y = 1024  # columns of pnCCD in pixels\n",
+    "sensorSize = [pixels_x, pixels_y]\n",
+    "# For xcal.HistogramCalculators.\n",
+    "blockSize = [pixels_x//2, pixels_y//2]  # sensor area will be analysed according to blockSize.\n",
+    "\n",
     "print(f\"pnCCD size is: {pixels_x}x{pixels_y} pixels.\")\n",
     "print(f'Calibration database interface selected: {cal_db_interface}')\n",
     "\n",
-    "proposal = list(filter(None, in_folder.strip('/').split('/')))[-2]\n",
-    "file_loc =f'Proposal: {proposal}, Run: {run}'\n",
-    "print(f'Proposal: {proposal}, Run: {run}')\n",
-    "\n",
     "# Paths to the data:\n",
-    "ped_dir = \"{}/r{:04d}\".format(in_folder, run)\n",
-    "fp_name = path_template.format(run, karabo_da)\n",
-    "fp_path = '{}/{}'.format(ped_dir, fp_name)\n",
-    "h5path = h5path.format(karabo_id, receiver_id)\n",
-    "print(\"HDF5 path to data: {}\\n\".format(h5path))\n",
+    "instrument_src = instrument_source_template.format(karabo_id, receiver_id)\n",
+    "print(f\"Instrument H5File source: {instrument_src}\\n\")\n",
     "\n",
     "# Run's creation time:\n",
     "if creation_time:\n",
@@ -189,7 +165,6 @@
     "if not creation_time and use_dir_creation_date:\n",
     "    creation_time = get_dir_creation_date(in_folder, run)\n",
     "\n",
-    "\n",
     "print(f\"Creation time: {creation_time}\")"
    ]
   },
@@ -199,26 +174,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "# Reading all sequences of the run:\n",
-    "file_list = []\n",
-    "total_sequences = 0\n",
-    "fsequences = []\n",
-    "\n",
-    "if sequences is None:\n",
-    "    file_list = glob.glob(fp_path.format(0).replace('00000', '*'))\n",
-    "    file_list = sorted(file_list, key = lambda x: (len (x), x))\n",
-    "    total_sequences = len(file_list)\n",
-    "    fsequences = range(total_sequences)\n",
-    "else:\n",
-    "    for seq in sequences:\n",
-    "        abs_entry = fp_path.format(seq)\n",
-    "        if os.path.isfile(abs_entry):\n",
-    "            file_list.append(abs_entry)\n",
-    "            total_sequences += 1\n",
-    "            fsequences.append(seq)\n",
-    "\n",
-    "sequences = fsequences    \n",
-    "print(f\"This run has a total number of {total_sequences} sequences.\\n\")"
+    "step_timer = StepTimer()"
    ]
   },
   {
@@ -227,17 +183,34 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "# extract slow data\n",
-    "if karabo_da_control:\n",
-    "    ctrl_fname = os.path.join(ped_dir, path_template.format(run, karabo_da_control)).format(sequences[0])\n",
-    "    ctrl_path = h5path_ctrl.format(karabo_id)\n",
-    "    mdl_ctrl_path = f\"/CONTROL/{karabo_id}/MDL/\"\n",
-    "\n",
-    "    (bias_voltage, gain,\n",
-    "     fix_temperature_top,\n",
-    "     fix_temperature_bot) = extract_slow_data(karabo_id, karabo_da_control, ctrl_fname, ctrl_path,\n",
-    "                                              mdl_ctrl_path, bias_voltage, gain,\n",
-    "                                              fix_temperature_top, fix_temperature_bot)"
+    "run_dc = RunDirectory(Path(in_folder) / f\"r{run:04d}\")\n",
+    "ctrl_data = pnccdlib.PnccdCtrl(run_dc, karabo_id)\n",
+    "\n",
+    "proposal = run_dc.run_metadata()[\"proposalNumber\"]\n",
+    "print(f'Proposal: {proposal}, Run: {run}')\n",
+    "\n",
+    "# extract control data\n",
+    "if not set_parameters_manually:\n",
+    "    step_timer.start()\n",
+    "\n",
+    "    if bias_voltage == 0.:\n",
+    "        bias_voltage = ctrl_data.get_bias_voltage()\n",
+    "    if gain == -1:\n",
+    "        gain = ctrl_data.get_gain()\n",
+    "    if fix_temperature_top == 0:\n",
+    "        fix_temperature_top = ctrl_data.get_fix_temperature_top()\n",
+    "    if fix_temperature_bot == 0:\n",
+    "        fix_temperature_bot = ctrl_data.get_fix_temperature_bot()\n",
+    "\n",
+    "    step_timer.done_step(\"Reading control parameters.\")\n",
+    "\n",
+    "# Printing the Parameters Read from the Data File:\n",
+    "display(Markdown('### Detector Parameters'))\n",
+    "print(f\"Bias voltage is {bias_voltage:0.1f} V.\")\n",
+    "print(f\"Detector gain is set to 1/{int(gain)}.\")\n",
+    "print(f\"Detector integration time is set to {integration_time} ms\")\n",
+    "print(f\"Top pnCCD sensor is at temperature of {fix_temperature_top:0.2f} K\")\n",
+    "print(f\"Bottom pnCCD sensor is at temperature of {fix_temperature_bot:0.2f} K\")"
    ]
   },
   {
@@ -246,14 +219,15 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "# Printing the Parameters Read from the Data File:\n",
-    "\n",
-    "display(Markdown('### Detector Parameters'))\n",
-    "print(f\"Bias voltage is {bias_voltage:0.1f} V.\")\n",
-    "print(f\"Detector gain is set to 1/{int(gain)}.\")\n",
-    "print(f\"Detector integration time is set to {integration_time} ms\")\n",
-    "print(f\"Top pnCCD sensor is at temperature of {fix_temperature_top:0.2f} K\")\n",
-    "print(f\"Bottom pnCCD sensor is at temperature of {fix_temperature_bot:0.2f} K\")"
+    "seq_files = []\n",
+    "for f in run_dc.select(instrument_src).files:\n",
+    "    fpath = Path(f.filename)\n",
+    "    if fpath.match(f\"*{karabo_da}*.h5\"):\n",
+    "        seq_files.append(fpath)\n",
+    "if sequences != [-1]:\n",
+    "    seq_files = sorted([f for f in seq_files if any(f.match(f\"*-S{s:05d}.h5\") for s in sequences)])\n",
+    "print(f\"Processing a total of {len(seq_files)} sequence files:\")\n",
+    "print(*seq_files, sep='\\n')"
    ]
   },
   {
@@ -262,7 +236,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "gain_k = [k for k, v in VALID_GAINS.items() if v == gain][0]\n",
+    "gain_k = [k for k, v in pnccdlib.VALID_GAINS.items() if v == gain][0]\n",
     "if gain_k == 'a':\n",
     "    split_evt_mip_threshold = 1000. # MIP threshold in ADU for event classification (10 times average noise)\n",
     "\n",
@@ -343,45 +317,9 @@
    "execution_count": null,
    "metadata": {},
    "outputs": [],
-   "source": [
-    "display(Markdown('### List of Files to be Processed'))\n",
-    "print(\"Reading data from the following files:\")\n",
-    "for i in range(len(file_list)):\n",
-    "    print(file_list[i])"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-12-06T15:54:23.913269Z",
-     "start_time": "2018-12-06T15:54:23.868910Z"
-    }
-   },
-   "outputs": [],
-   "source": [
-    "# Sensor size and block size definitions (important for common mode and other corrections):\n",
-    "\n",
-    "sensorSize = [pixels_x, pixels_y]\n",
-    "blockSize = [sensorSize[0]//2, sensorSize[1]//2] # sensor area will be analysed according to blockSize\n",
-    "xcal.defaultBlockSize = blockSize # for xcal.HistogramCalculators \n",
-    "memoryCells = 1 # pnCCD has 1 memory cell"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-12-06T15:54:23.913269Z",
-     "start_time": "2018-12-06T15:54:23.868910Z"
-    }
-   },
-   "outputs": [],
    "source": [
     "# Output Folder Creation:\n",
-    "os.makedirs(out_folder, exist_ok=True)"
+    "os.makedirs(out_folder, exist_ok=True if overwrite else False)"
    ]
   },
   {
@@ -443,16 +381,21 @@
     "    return constants"
    ]
   },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Retrieving calibration constants"
+   ]
+  },
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "scrolled": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
-    "display(Markdown('### Dark Data Retrieval'))\n",
-    "\n",
+    "display(Markdown('### Dark constants retrieval'))\n",
+    "step_timer.start()\n",
     "db_parms = cal_db_interface, cal_db_timeout\n",
     "\n",
     "constants = get_dark(db_parms, bias_voltage, gain, integration_time,\n",
@@ -474,7 +417,8 @@
     "                       lut_label='Bad Pixel Value (ADU)', \n",
     "                       aspect=1, x_range=(0, pixels_y), y_range=(0, pixels_x), \n",
     "                       panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)', \n",
-    "                       title = 'Dark Bad Pixels Map')"
+    "                       title = 'Dark Bad Pixels Map')\n",
+    "step_timer.done_step(\"Dark constants retrieval\")"
    ]
   },
   {
@@ -484,7 +428,8 @@
    "outputs": [],
    "source": [
     "if corr_bools.get('relgain'):\n",
-    "    display(Markdown('We will now retrieve the relative gain map from the calibration database.'))\n",
+    "    step_timer.start()\n",
+    "    display(Markdown('### Relative gain constant retrieval'))\n",
     "    metadata = ConstantMetaData()\n",
     "    relgain = Constants.CCD(DetectorTypes.pnCCD).RelativeGain()\n",
     "    metadata.calibration_constant = relgain\n",
@@ -514,7 +459,8 @@
     "                           panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)', \n",
     "                           panel_top_low_lim = 0.5, panel_top_high_lim = 1.5, panel_side_low_lim = 0.5, \n",
     "                           panel_side_high_lim = 1.5, \n",
-    "                           title = f'Relative Gain Map for pnCCD (Gain = 1/{int(gain)})')"
+    "                           title = f'Relative Gain Map for pnCCD (Gain = 1/{int(gain)})')\n",
+    "    step_timer.done_step(\"Relative gain constant retrieval\")"
    ]
   },
   {
@@ -524,7 +470,6 @@
    "outputs": [],
    "source": [
     "#************************ Calculators ************************#\n",
-    "\n",
     "if corr_bools.get('common_mode'):\n",
     "    # Common Mode Correction Calculator:\n",
     "    cmCorrection = xcal.CommonModeCorrection([pixels_x, pixels_y],\n",
@@ -532,7 +477,6 @@
     "                                             commonModeAxis,\n",
     "                                             parallel=False, dType=np.float32, stride=1,\n",
     "                                             noiseMap=constants[\"Noise\"].astype(np.float32), minFrac=0.25)\n",
-    "    cmCorrection.debug()\n",
     "\n",
     "if corr_bools.get('pattern_class'):\n",
     "    # Pattern Classifier Calculator:\n",
@@ -543,11 +487,10 @@
     "                                                 split_evt_secondary_threshold,\n",
     "                                                 split_evt_mip_threshold,\n",
     "                                                 tagFirstSingles=3, # track along y-axis, left to right (see \n",
-    "                                                 nCells=memoryCells, # split_event.py file in pydetlib/lib/src/\n",
-    "                                                 cores=cpuCores,     # XFELDetAna/algorithms)\n",
-    "                                                 allowElongated=False,\n",
+    "                                                 nCells=1, # split_event.py file in pydetlib/lib/src/\n",
+    "                                                 allowElongated=False, # XFELDetAna/algorithms)\n",
     "                                                 blockSize=[pixels_x, pixels_y//2],\n",
-    "                                                 runParallel=True)\n",
+    "                                                 parallel=False)\n",
     "\n",
     "    # Right Hemisphere:\n",
     "    patternClassifierRH = xcal.PatternClassifier([pixels_x, pixels_y//2],\n",
@@ -556,31 +499,25 @@
     "                                                 split_evt_secondary_threshold,\n",
     "                                                 split_evt_mip_threshold,\n",
     "                                                 tagFirstSingles=4, # track along y-axis, right to left\n",
-    "                                                 nCells=memoryCells,\n",
-    "                                                 cores=cpuCores,\n",
+    "                                                 nCells=1,\n",
     "                                                 allowElongated=False,\n",
     "                                                 blockSize=[pixels_x, pixels_y//2],\n",
-    "                                                 runParallel=True)\n",
+    "                                                 parallel=False)\n",
     "\n",
-    "    patternClassifierLH._imagesPerChunk = 500\n",
-    "    patternClassifierRH._imagesPerChunk = 500\n",
-    "    patternClassifierLH.debug()\n",
-    "    patternClassifierRH.debug()\n",
+    "    patternClassifierLH._imagesPerChunk = 1\n",
+    "    patternClassifierRH._imagesPerChunk = 1\n",
     "\n",
+    "    patternClassifierLH._noisemap = constants[\"Noise\"][:, :pixels_x//2]\n",
+    "    patternClassifierRH._noisemap = constants[\"Noise\"][:, pixels_x//2:]\n",
     "    # Setting bad pixels:\n",
-    "    patternClassifierLH.setBadPixelMask(constants[\"BadPixelsDark\"][:, :pixels_y//2] != 0)\n",
-    "    patternClassifierRH.setBadPixelMask(constants[\"BadPixelsDark\"][:, pixels_y//2:] != 0)"
+    "    patternClassifierLH.setBadPixelMask(constants[\"BadPixelsDark\"][:, :pixels_x//2] != 0)\n",
+    "    patternClassifierRH.setBadPixelMask(constants[\"BadPixelsDark\"][:, pixels_x//2:] != 0)"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-12-06T15:54:28.771629Z",
-     "start_time": "2018-12-06T15:54:28.346051Z"
-    }
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "#***************** Histogram Calculators ******************#\n",
@@ -588,91 +525,163 @@
     "histCalRaw = xcal.HistogramCalculator(sensorSize, \n",
     "                                      bins=bins, \n",
     "                                      range=bin_range,\n",
-    "                                      nCells=memoryCells, \n",
-    "                                      cores=cpuCores,\n",
-    "                                      blockSize=blockSize) \n",
-    "histCalRaw.debug()\n",
+    "                                      nCells=1, \n",
+    "                                      parallel=False,\n",
+    "                                      blockSize=blockSize)\n",
     "# Will contain offset corrected data:\n",
     "histCalOffsetCor = xcal.HistogramCalculator(sensorSize, \n",
     "                                            bins=bins, \n",
     "                                            range=bin_range,\n",
-    "                                            nCells=memoryCells, \n",
-    "                                            cores=cpuCores,\n",
+    "                                            nCells=1, \n",
+    "                                            parallel=False,\n",
     "                                            blockSize=blockSize)\n",
-    "histCalOffsetCor.debug()\n",
     "if corr_bools.get('common_mode'):\n",
     "    # Will contain common mode corrected data:\n",
-    "    histCalCommonModeCor = xcal.HistogramCalculator(sensorSize, \n",
-    "                                                    bins=bins, \n",
+    "    histCalCommonModeCor = xcal.HistogramCalculator(sensorSize,\n",
+    "                                                    bins=bins,\n",
     "                                                    range=bin_range,\n",
-    "                                                    nCells=memoryCells, \n",
-    "                                                    cores=cpuCores,\n",
+    "                                                    nCells=1, \n",
+    "                                                    parallel=False,\n",
     "                                                    blockSize=blockSize)\n",
-    "    histCalCommonModeCor.debug()\n",
-    "    \n",
     "if corr_bools.get('pattern_class'):\n",
     "    # Will contain split events pattern data:\n",
-    "    histCalPcorr = xcal.HistogramCalculator(sensorSize, \n",
-    "                                            bins=bins, \n",
+    "    histCalPcorr = xcal.HistogramCalculator(sensorSize,\n",
+    "                                            bins=bins,\n",
     "                                            range=bin_range,\n",
-    "                                            nCells=memoryCells, \n",
-    "                                            cores=cpuCores,\n",
+    "                                            nCells=1, \n",
+    "                                            parallel=False,\n",
     "                                            blockSize=blockSize)\n",
-    "    histCalPcorr.debug()\n",
     "    # Will contain singles events data:\n",
-    "    histCalPcorrS = xcal.HistogramCalculator(sensorSize, \n",
-    "                                             bins=bins, \n",
+    "    histCalPcorrS = xcal.HistogramCalculator(sensorSize,\n",
+    "                                             bins=bins,\n",
     "                                             range=bin_range,\n",
-    "                                             nCells=memoryCells, \n",
-    "                                             cores=cpuCores,\n",
+    "                                             nCells=1, \n",
+    "                                             parallel=False,\n",
     "                                             blockSize=blockSize)\n",
-    "    histCalPcorrS.debug()\n",
     "if corr_bools.get('relgain'):\n",
     "    # Will contain gain corrected data:\n",
-    "    histCalGainCor = xcal.HistogramCalculator(sensorSize, \n",
-    "                                              bins=bins, \n",
+    "    histCalGainCor = xcal.HistogramCalculator(sensorSize,\n",
+    "                                              bins=bins,\n",
     "                                              range=bin_range,\n",
-    "                                              nCells=memoryCells, \n",
-    "                                              cores=cpuCores,\n",
-    "                                              blockSize=blockSize)\n",
-    "    histCalGainCor.debug()"
+    "                                              nCells=1, \n",
+    "                                              parallel=False,\n",
+    "                                              blockSize=blockSize)"
    ]
   },
   {
    "cell_type": "markdown",
+   "metadata": {
+    "tags": []
+   },
+   "source": [
+    "## Applying corrections to the raw data"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
    "metadata": {},
+   "outputs": [],
    "source": [
-    "Applying offset and common mode corrections to the raw data"
+    "def offset_correction(wid, index, d):\n",
+    "    \"\"\"offset correction.\n",
+    "    Equating bad pixels' values to np.nan,\n",
+    "    so that the pattern classifier ignores them:\n",
+    "    \"\"\"\n",
+    "    d = d.copy()\n",
+    "\n",
+    "    # TODO: To clear this up. Is it on purpose to save corrected data with nans?\n",
+    "    d[bpix != 0] = np.nan\n",
+    "    d -= offset  # offset correction\n",
+    "\n",
+    "    # TODO: to clear this up. why save the badpixels map in the corrected data?\n",
+    "    bpix_data[index, ...] = bpix\n",
+    "    data[index, ...] = d\n",
+    "\n",
+    "def common_mode(wid, index, d):\n",
+    "    \"\"\"common-mode correction.\n",
+    "    Discarding events caused by saturated pixels:\n",
+    "    \"\"\"\n",
+    "    d = np.squeeze(cmCorrection.correct(d, cellTable=np.zeros(pixels_y, np.int32)))\n",
+    "    # we equate these values to np.nan so that the pattern classifier ignores them:\n",
+    "    d[d >= saturated_threshold] = np.nan\n",
+    "    data[index, ...] = d\n",
+    "\n",
+    "\n",
+    "def gain_correction(wid, index, d):\n",
+    "    \"\"\"relative gain correction.\"\"\"\n",
+    "    d /= relativegain  \n",
+    "    data[index, ...] = d\n",
+    "\n",
+    "\n",
+    "def pattern_classification_correction(wid, index, d):\n",
+    "    \"\"\"pattern classification correction.\n",
+    "    data set to save split event corrected images\n",
+    "    \n",
+    "    The calculation of the cluster map:]\n",
+    "    Dividing the data into left and right hemispheres:\n",
+    "    \"\"\"\n",
+    "\n",
+    "    # pattern classification on corrected data\n",
+    "    dataLH, patternsLH = patternClassifierLH.classify(d[:, :pixels_x//2])\n",
+    "    dataRH, patternsRH = patternClassifierRH.classify(d[:, pixels_x//2:])\n",
+    "\n",
+    "    d[:, :pixels_x//2] = np.squeeze(dataLH)\n",
+    "    d[:, pixels_x//2:] = np.squeeze(dataRH)\n",
+    "\n",
+    "    patterns = np.zeros(d.shape, patternsLH.dtype)\n",
+    "    patterns[:, :pixels_x//2] = np.squeeze(patternsLH)\n",
+    "    patterns[:, pixels_x//2:] = np.squeeze(patternsRH)\n",
+    "    d[d < split_evt_primary_threshold*noise] = 0\n",
+    "    data[index, ...] = d\n",
+    "    ptrn_data[index, ...] = patterns\n",
+    "    d[patterns != 100] = np.nan  # Discard doubles, triples, quadruple, clusters, first singles\n",
+    "    filtered_data[index, ...] = d"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-12-06T16:08:53.551111Z",
-     "start_time": "2018-12-06T16:08:53.531064Z"
-    }
-   },
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# 10 is a number chosen after testing 1 ... 71 parallel threads for a node with 72 cpus.\n",
+    "parallel_num_threads = 10\n",
+    "context = psh.context.ThreadContext(num_workers=parallel_num_threads)\n",
+    "\n",
+    "data_path = \"INSTRUMENT/\"+instrument_src+\"/data/\"\n",
+    "\n",
+    "offset = np.squeeze(constants[\"Offset\"])\n",
+    "noise = np.squeeze(constants[\"Noise\"])\n",
+    "bpix = np.squeeze(constants[\"BadPixelsDark\"])\n",
+    "relativegain = constants.get(\"RelativeGain\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
    "outputs": [],
    "source": [
-    "def copy_and_sanitize_non_cal_data(infile: str, outfile: str, h5base: str):\n",
-    "    '''This function reads the .h5 data and writes the corrected .h5 data.'''\n",
-    "    if h5base.startswith(\"/\"):\n",
-    "        h5base = h5base[1:]\n",
-    "    dont_copy = ['image']\n",
-    "    dont_copy = [h5base+\"/{}\".format(do) for do in dont_copy]\n",
-    "\n",
-    "    def visitor(k, item):\n",
-    "        if k not in dont_copy:\n",
-    "            if isinstance(item, h5py.Group):\n",
-    "                outfile.create_group(k)\n",
-    "            elif isinstance(item, h5py.Dataset):\n",
-    "                group = str(k).split(\"/\")\n",
-    "                group = \"/\".join(group[:-1])\n",
-    "                infile.copy(k, outfile[group])\n",
-    "                \n",
-    "    infile.visititems(visitor)"
+    "def write_datasets(corr_arrays, ofile):\n",
+    "    \"\"\"\n",
+    "    Creating datasets first then adding data.\n",
+    "    To have metadata together available at the start of the file,\n",
+    "    so it's quick to see what the file contains\n",
+    "    \"\"\"\n",
+    "    comp_fields = [\"gain\", \"patterns\", \"pixels_classified\"]\n",
+    "    img_grp = ofile[data_path]\n",
+    "\n",
+    "    for field, data in corr_arrays.items():\n",
+    "        kw = dict(chunks=(chunk_size_idim, pixels_x, pixels_y))\n",
+    "        if field in comp_fields:\n",
+    "            kw[\"compression\"] = \"gzip\"\n",
+    "\n",
+    "        img_grp.create_dataset(\n",
+    "            field, shape=data.shape, dtype=data.dtype, **kw)\n",
+    "\n",
+    "    for field, data in corr_arrays.items():\n",
+    "        img_grp[field][:] = data"
    ]
   },
   {
@@ -681,172 +690,144 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "# Data corrections and event classifications happen here. Also, the corrected data are written to datasets:\n",
+    "# Data corrections and event classifications happen here.\n",
+    "# Also, the corrected data are written to datasets:\n",
+    "for seq_n, seq_f in enumerate(seq_files):\n",
+    "    f_dc = H5File(seq_f)\n",
+    "    out_file = f\"{out_folder}/{seq_f.name}\".replace(\"RAW\", \"CORR\")\n",
     "\n",
-    "# Initialize 5 numpy array of zeros with the shape of (1024, 1024, 0)\n",
-    "uncor, off_cor, g_cor, cm_cor, final_cor = np.zeros((5, 1024, 1024, 0), dtype=np.float32)\n",
+    "    step_timer.start()\n",
     "\n",
-    "offsetMap = np.squeeze(constants[\"Offset\"])\n",
-    "noiseMap = np.squeeze(constants[\"Noise\"])\n",
-    "badPixelMap = np.squeeze(constants[\"BadPixelsDark\"])\n",
+    "    dshape = f_dc[instrument_src, \"data.image\"].shape\n",
+    "    n_trains = dshape[0]\n",
     "\n",
-    "if corr_bools.get('relgain'):\n",
-    "    relGain = constants[\"RelativeGain\"]\n",
+    "    # If you want to analyze only a certain number of frames\n",
+    "    # instead of all available good frames.\n",
+    "    if limit_images > 0:\n",
+    "        n_trains = min(n_trains, limit_images)\n",
+    "    data_shape = (n_trains, dshape[1], dshape[2])\n",
     "\n",
-    "for k, f in enumerate(file_list):\n",
-    "    with h5py.File(f, 'r') as infile:\n",
-    "        out_fileb = \"{}/{}\".format(out_folder, f.split(\"/\")[-1])\n",
-    "        out_file = out_fileb.replace(\"RAW\", \"CORR\")\n",
+    "    print(f\"Correcting file: {seq_f} of shape {data_shape}.\")\n",
     "\n",
-    "        data = None\n",
-    "        noise = None\n",
-    "        \n",
-    "        try:\n",
-    "            with h5py.File(out_file, \"w\") as ofile:\n",
-    "                copy_and_sanitize_non_cal_data(infile, ofile, h5path)\n",
-    "                data = infile[h5path+\"/image\"][()]\n",
-    "                # Getting rid of empty frames:\n",
-    "                nzidx = np.count_nonzero(data, axis=(1, 2))\n",
-    "                data = data[nzidx != 0, ...]\n",
-    "                \n",
-    "                # If you want to analyze only a certain number of frames instead of all available good frames: \n",
-    "                if limit_images > 0:\n",
-    "                    data = data[:limit_images,...]\n",
-    "                    \n",
-    "                # used for array shapes in the corrected data sets that we create and save in this loop:\n",
-    "                oshape = data.shape\n",
-    "                \n",
-    "                data = np.moveaxis(data, 0, 2)\n",
-    "                \n",
-    "                # data set to save offset corrected images:\n",
-    "                ddset = ofile.create_dataset(h5path+\"/pixels\",\n",
-    "                                             oshape,\n",
-    "                                             chunks=(chunk_size_idim, oshape[1], oshape[2]),\n",
-    "                                             dtype=np.float32)\n",
-    "                # data set to create bad pixels:\n",
-    "                ddsetm = ofile.create_dataset(h5path+\"/mask\",\n",
-    "                                             oshape,\n",
-    "                                             chunks=(chunk_size_idim, oshape[1], oshape[2]),\n",
-    "                                             dtype=np.uint32, compression=\"gzip\")\n",
-    "                \n",
-    "                data = data.astype(np.float32) \n",
-    "                \n",
-    "                # Creating maps for correction usage:\n",
-    "                offset = np.repeat(offsetMap[...,None], data.shape[2], axis=2)\n",
-    "                noise = np.repeat(noiseMap[...,None], data.shape[2], axis=2)\n",
-    "                bpix = np.repeat(badPixelMap[...,None], data.shape[2], axis=2)\n",
-    "                if corr_bools.get('relgain'):\n",
-    "                    rg = np.repeat(relGain[:,:,None], data.shape[2], axis=2) # rg = relative gain\n",
-    "                \n",
-    "                # non-corrected images for first sequence only:\n",
-    "                if k == 0:\n",
-    "                    uncor = np.append(uncor, data, axis=2)\n",
-    "                        \n",
-    "                histCalRaw.fill(data) # filling histogram with raw uncorrected data\n",
-    "                \n",
-    "                # equating bad pixels' values to np.nan so that the pattern classifier ignores them:\n",
-    "                data[bpix != 0] = np.nan\n",
-    "            \n",
-    "                data -= offset # offset correction  \n",
-    "                \n",
-    "                # Offset corrected images for first sequence only:\n",
-    "                if k == 0:\n",
-    "                    off_cor = np.append(off_cor, data, axis=2)\n",
-    "                histCalOffsetCor.fill(data) # filling histogram with offset corrected data\n",
-    "\n",
-    "                ddset[...] = np.moveaxis(data, 2, 0)\n",
-    "                ddsetm[...] = np.moveaxis(bpix, 2, 0)\n",
-    "                ofile.flush()\n",
-    "\n",
-    "                # cm: common mode\n",
-    "                if corr_bools.get('common_mode'):\n",
-    "                    \n",
-    "                    # data set to save common mode corrected images:\n",
-    "                    ddsetcm = ofile.create_dataset(h5path+\"/pixels_cm\",\n",
-    "                                                   oshape,\n",
-    "                                                   chunks=(chunk_size_idim, oshape[1], oshape[2]),\n",
-    "                                                   dtype=np.float32)\n",
-    "                    \n",
-    "                    # common mode correction:\n",
-    "                    data = cmCorrection.correct(data.astype(np.float32),  # common mode correction\n",
-    "                                                cellTable=np.zeros(data.shape[2], np.int32)) \n",
-    "                    \n",
-    "                    # discarding events caused by saturated pixels:\n",
-    "                    # we equate these values to np.nan so that the pattern classifier ignores them:\n",
-    "                    data[data >= saturated_threshold] = np.nan \n",
-    "                    histCalCommonModeCor.fill(data) # filling histogram with common mode corrected data\n",
-    "                    # common mode corrected images for first sequence only:\n",
-    "                    if k == 0:\n",
-    "                        cm_cor = np.append(cm_cor, data, axis=2)\n",
-    "                    \n",
-    "                    ddsetcm[...] = np.moveaxis(data, 2, 0)\n",
-    "                    \n",
-    "                if corr_bools.get('relgain'):\n",
-    "                    # data set to save gain corrected images:\n",
-    "                    ddsetg = ofile.create_dataset(h5path+\"/gain\",\n",
-    "                                                  oshape,\n",
-    "                                                  chunks=(chunk_size_idim, oshape[1], oshape[2]),\n",
-    "                                                  dtype=np.float32, compression=\"gzip\")\n",
-    "                \n",
-    "                    data /= rg  # relative gain correction \n",
-    "                    histCalGainCor.fill(data) # filling histogram with gain corrected data\n",
-    "                    # gain corrected images for first sequence only:\n",
-    "                    if k == 0:\n",
-    "                        g_cor = np.append(g_cor, data, axis=2)\n",
-    "                    ddsetg[...] = np.moveaxis(rg, 2, 0).astype(np.float32)\n",
-    "\n",
-    "                if corr_bools.get('pattern_class'):\n",
-    "                    # data set to save split event corrected images:\n",
-    "                    # c: classifications, p: even patterns\n",
-    "                    ddsetc = ofile.create_dataset(h5path+\"/pixels_classified\",\n",
-    "                                                  oshape,\n",
-    "                                                  chunks=(chunk_size_idim, oshape[1], oshape[2]),\n",
-    "                                                  dtype=np.float32, compression=\"gzip\")\n",
-    "                    # data set to save different valid patterns:\n",
-    "                    ddsetp = ofile.create_dataset(h5path+\"/patterns\",\n",
-    "                                                 oshape,\n",
-    "                                                 chunks=(chunk_size_idim, oshape[1], oshape[2]),\n",
-    "                                                 dtype=np.int32, compression=\"gzip\")\n",
-    "                    \n",
-    "                    # The calculation of the cluster map:\n",
-    "                    patternClassifierLH._noisemap = noise[:, :pixels_x//2,  :]\n",
-    "                    patternClassifierRH._noisemap = noise[:, pixels_x//2:,  :]\n",
-    "\n",
-    "                    # Dividing the data into left and right hemispheres:\n",
-    "                    dataLH = data[:, :pixels_x//2, :]\n",
-    "                    dataRH = data[:, pixels_x//2:, :]\n",
-    "                    \n",
-    "                    # pattern classification on corrected data\n",
-    "                    dataLH, patternsLH = patternClassifierLH.classify(dataLH) \n",
-    "                    dataRH, patternsRH = patternClassifierRH.classify(dataRH)\n",
-    "\n",
-    "                    data[:, :pixels_x//2, :] = dataLH\n",
-    "                    data[:, pixels_x//2:, :] = dataRH\n",
-    "\n",
-    "                    patterns = np.zeros(data.shape, patternsLH.dtype)\n",
-    "                    patterns[:, :pixels_x//2, :] = patternsLH\n",
-    "                    patterns[:, pixels_x//2:, :] = patternsRH\n",
-    "\n",
-    "                    data[data < split_evt_primary_threshold*noise] = 0\n",
-    "                    ddsetc[...] = np.moveaxis(data, 2, 0)\n",
-    "                    ddsetp[...] = np.moveaxis(patterns, 2, 0)\n",
-    "\n",
-    "                    histCalPcorr.fill(data) # filling histogram with split events corrected data\n",
-    "                    data[patterns != 100] = np.nan # Discard doubles, triples, quadruple, clusters, first singles\n",
-    "                    histCalPcorrS.fill(data) # filling histogram with singles events data\n",
-    "                     \n",
-    "                    # split event corrected images for first sequence only (also these events are only \n",
-    "                    # singles events):\n",
-    "                    if k == 0:\n",
-    "                        final_cor = np.append(final_cor, data, axis=2)\n",
-    "                    \n",
-    "        except Exception as e:\n",
-    "            print(f\"Couldn't calibrate data in {f}: {e}\\n\")\n",
+    "    data_dc = f_dc.select(instrument_src, \"data.image\", require_all=True).select_trains(np.s_[:n_trains])  # noqa\n",
+    "\n",
+    "    raw_data = data_dc[instrument_src, \"data.image\"].ndarray().astype(np.float32)\n",
+    "\n",
+    "    if seq_n == 0:\n",
+    "        raw_plt = raw_data.copy()  # plot first sequence only\n",
+    "\n",
+    "    step_timer.start()\n",
     "\n",
+    "    # Allocating shared arrays for data arrays for each correction stage.\n",
+    "    data = context.alloc(shape=data_shape, dtype=np.float32)\n",
+    "    bpix_data = context.alloc(shape=data_shape, dtype=np.uint32)\n",
+    "    histCalRaw.fill(raw_data)  # filling histogram with raw uncorrected data\n",
+    "    \n",
+    "    # Applying offset correction\n",
+    "    context.map(offset_correction, raw_data)\n",
+    "    histCalOffsetCor.fill(data)  # filling histogram with offset corrected data\n",
+    "\n",
+    "    if seq_n == 0:\n",
+    "        off_data = data.copy()  # plot first sequence only\n",
+    "\n",
+    "\n",
+    "    corr_arrays = {\n",
+    "        \"pixels\": data.copy(),\n",
+    "        \"mask\": bpix_data, \n",
+    "    }\n",
+    "    step_timer.done_step(f'offset correction.')\n",
+    "\n",
+    "    if corr_bools.get('common_mode'):\n",
+    "        step_timer.start()\n",
+    "\n",
+    "        # Applying common mode correction\n",
+    "        context.map(common_mode, data)\n",
+    "        if seq_n == 0:\n",
+    "            cm_data = data.copy()  # plot first sequence only\n",
+    "        corr_arrays[\"pixels_cm\"] = data.copy()\n",
+    "        histCalCommonModeCor.fill(data)  # filling histogram with common mode corrected data\n",
+    "\n",
+    "        step_timer.done_step(f'common-mode correction.')\n",
+    "\n",
+    "    if corr_bools.get('relgain'):\n",
+    "\n",
+    "        step_timer.start()\n",
+    "\n",
+    "        # Applying gain correction\n",
+    "        context.map(gain_correction, data)\n",
+    "        if seq_n == 0:\n",
+    "            rg_data = data.copy()  # plot first sequence only\n",
+    "        # TODO: Why storing a repeated constant for each image in corrected files.\n",
+    "        corr_arrays[\"gain\"] = np.repeat(relativegain[np.newaxis, ...], n_trains, axis=0).astype(np.float32)  # noqa\n",
+    "        histCalGainCor.fill(data)  # filling histogram with gain corrected data\n",
+    "        step_timer.done_step(f'gain correction.')\n",
+    "\n",
+    "    if corr_bools.get('pattern_class'):\n",
+    "        step_timer.start()\n",
+    "\n",
+    "        ptrn_data = context.alloc(shape=data_shape, dtype=np.int32)\n",
+    "        filtered_data = context.alloc(shape=data_shape, dtype=np.int32)\n",
+    "        # Applying pattern classification correction\n",
+    "        # Even thougth data is indeed of dtype np.float32,\n",
+    "        # not specifiying this again screw with the data quality.\n",
+    "        context.map(pattern_classification_correction, data.astype(np.float32))\n",
+    "\n",
+    "        if seq_n == 0:\n",
+    "            cls_data = data.copy()  # plot first sequence only\n",
+    "        # split event corrected images plotted for first sequence only\n",
+    "        # (also these events are only singles events):\n",
+    "        corr_arrays[\"pixels_classified\"] = data.copy()\n",
+    "        corr_arrays[\"patterns\"] = ptrn_data\n",
+    "\n",
+    "        histCalPcorr.fill(data)  # filling histogram with split events corrected data\n",
+    "        # filling histogram with corr data after discarding doubles, triples, quadruple, clusters, and first singles\n",
+    "        histCalPcorrS.fill(filtered_data)\n",
+    "        step_timer.done_step(f'pattern classification correction.')\n",
+    "\n",
+    "    step_timer.start()\n",
+    "\n",
+    "    # Storing corrected data sources.\n",
+    "    with h5py.File(out_file, 'w') as ofile:\n",
+    "        # Copy RAW non-calibrated sources.\n",
+    "        with h5py.File(seq_f, 'r') as sfile:\n",
+    "            h5_copy_except.h5_copy_except_paths(\n",
+    "                sfile, ofile,\n",
+    "                [\"INSTRUMENT/\"+instrument_src+\"/data/image\"],\n",
+    "            )\n",
+    "        # TODO: to clear this up: why save corrected data in data/pixels rather than data/image.\n",
+    "        write_datasets(corr_arrays, ofile)\n",
+    "\n",
+    "    step_timer.done_step(f'Storing data.')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "print(\"In addition to offset correction, the following corrections were performed:\")\n",
+    "for k, v in corr_bools.items():\n",
+    "    if v:\n",
+    "        print(\"  -\", k.upper())\n",
+    "\n",
+    "print(f\"Total processing time {step_timer.timespan():.01f} s\")\n",
+    "step_timer.print_summary()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
     "print(\"In addition to offset correction, the following corrections were performed:\")\n",
     "for k, v in corr_bools.items():\n",
     "    if v:\n",
-    "        print(k)"
+    "        print(\"  -\", k.upper())\n",
+    "\n",
+    "print(f\"Total processing time {step_timer.timespan():.01f} s\")\n",
+    "step_timer.print_summary()"
    ]
   },
   {
@@ -862,7 +843,7 @@
     "# if you use histCalRaw.get(cumulatative = True) and so on, the cumulatative = True turns the counts array such as \n",
     "# RawHistVals and so on into a 1D array instead of keeping the original shape:\n",
     "\n",
-    "RawHistVals, _, RawHistMids, _ = histCalRaw.get() \n",
+    "RawHistVals, _, RawHistMids, _ = histCalRaw.get()\n",
     "off_cor_HistVals, _, off_cor_HistMids, _ = histCalOffsetCor.get()\n",
     "if corr_bools.get('common_mode'):\n",
     "    cm_cor_HistVals, _, cm_HistMids, _ = histCalCommonModeCor.get()\n",
@@ -880,7 +861,7 @@
    "outputs": [],
    "source": [
     "# Saving intermediate data to disk:\n",
-    "\n",
+    "step_timer.start()\n",
     "np.savez(os.path.join(out_folder, 'Raw_Events.npz'), RawHistMids, RawHistVals)\n",
     "np.savez(os.path.join(out_folder, 'Offset_Corrected_Events.npz'), off_cor_HistMids, off_cor_HistVals)\n",
     "if corr_bools.get('common_mode'):\n",
@@ -890,7 +871,7 @@
     "if corr_bools.get('pattern_class'):\n",
     "    np.savez(os.path.join(out_folder, 'Split_Events_Corrected_Events.npz'), split_HistMids, split_HistVals)\n",
     "    np.savez(os.path.join(out_folder, 'Singles_Events.npz'), singles_HistMids, singles_HistVals)\n",
-    "\n",
+    "step_timer.done_step(f'Saving intermediate data to disk.')\n",
     "print(\"Various spectra are saved to disk in the form of histograms. Please check {}\".format(out_folder))"
    ]
   },
@@ -901,6 +882,7 @@
    "outputs": [],
    "source": [
     "display(Markdown('### Raw vs. Corrected Spectra'))\n",
+    "step_timer.start()\n",
     "\n",
     "figure = [{'x': RawHistMids,\n",
     "      'y': RawHistVals,\n",
@@ -937,8 +919,8 @@
     "                   'errorstyle': 'bars',\n",
     "                   'errorcoarsing': 2,\n",
     "                   'label': 'Gain Corrected'})\n",
-    "    \n",
-    "if corr_bools.get('pattern_class'):    \n",
+    "\n",
+    "if corr_bools.get('pattern_class'):\n",
     "    figure.extend([{'x': split_HistMids,\n",
     "                   'y': split_HistVals,\n",
     "                   'y_err': np.sqrt(split_HistVals[:]),\n",
@@ -957,7 +939,8 @@
     "                  }])\n",
     "fig = xana.simplePlot(figure, aspect=1, x_label='ADU', y_label='Number of Occurrences', figsize='2col',\n",
     "                      y_log=True, x_range=bin_range, title = '1 ADU per bin is used.',\n",
-    "                      legend='top-right-frame-1col')"
+    "                      legend='top-right-frame-1col')\n",
+    "step_timer.done_step('Plotting')"
    ]
   },
   {
@@ -969,7 +952,7 @@
     "# This function plots pattern statistics:\n",
     "\n",
     "def classification_plot(patternStats, hemisphere):\n",
-    "    \n",
+    "\n",
     "    print(\"****************** {} HEMISPHERE ******************\\n\"\n",
     "          .format(hemisphere))\n",
     "    fig = plt.figure(figsize=(15, 15))\n",
@@ -994,7 +977,6 @@
     "\n",
     "    smaps = [\"singlemap\", \"firstsinglemap\", \"clustermap\"]\n",
     "    for i, m in enumerate(smaps):\n",
-    "\n",
     "        ax = fig.add_subplot(4, 4, 2+i)\n",
     "        pmap = ax.imshow(patternStats[m], interpolation=\"nearest\", vmax=2*np.nanmedian(patternStats[m]))\n",
     "        ax.set_title(m)\n",
@@ -1003,7 +985,6 @@
     "    mmaps = [\"doublemap\", \"triplemap\", \"quadmap\"]\n",
     "    k = 0\n",
     "    for i, m in enumerate(mmaps):\n",
-    "\n",
     "        for j in range(4):\n",
     "            ax = fig.add_subplot(4, 4, 2+len(smaps)+k)\n",
     "            pmap = ax.imshow(patternStats[m][j], interpolation=\"nearest\", vmax=2*np.median(patternStats[m][j]))\n",
@@ -1045,6 +1026,7 @@
     "display(Markdown('### Classification Results - Tabulated Statistics'))\n",
     "\n",
     "if corr_bools.get('pattern_class'):\n",
+    "    step_timer.start()\n",
     "    t0 = PrettyTable()\n",
     "    t0.title = \"Total Number of Counts after All Corrections\"\n",
     "    t0.field_names = [\"Hemisphere\", \"Singles\", \"First-Singles\", \"Clusters\"]\n",
@@ -1063,18 +1045,14 @@
     "                patternStatsRH['triples'][2], patternStatsLH['quads'][2], patternStatsRH['quads'][2]])\n",
     "    t1.add_row([3, patternStatsLH['doubles'][3], patternStatsRH['doubles'][3], patternStatsLH['triples'][3], \n",
     "                patternStatsRH['triples'][3], patternStatsLH['quads'][3], patternStatsRH['quads'][3]])\n",
-    "    print(t1)"
+    "    print(t1)\n",
+    "    step_timer.done_step('Classification Results - Tabulated Statistics')"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-12-06T16:10:56.190150Z",
-     "start_time": "2018-12-06T16:10:56.177570Z"
-    }
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "if corr_bools.get('pattern_class'):\n",
@@ -1109,17 +1087,13 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-12-06T16:10:56.203219Z",
-     "start_time": "2018-12-06T16:10:56.191509Z"
-    }
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "display(Markdown('### Classification Results - Pie Charts'))\n",
     "\n",
     "if corr_bools.get('pattern_class'):\n",
+    "    step_timer.start()\n",
     "    fig = plt.figure(figsize=(12, 7))\n",
     "    ax = fig.add_subplot(1, 2, 1)\n",
     "    labels = ['Singles', 'Doubles', 'Triples', 'Quads']\n",
@@ -1131,7 +1105,8 @@
     "    pie = ax.pie(reloccurRH, labels=labels, autopct='%1.1f%%', shadow=True)\n",
     "    ax.set_title(\"Pattern Occurrence in RH\")\n",
     "    # Set aspect ratio to be equal so that pie is drawn as a circle.\n",
-    "    a = ax.axis('equal')"
+    "    a = ax.axis('equal')\n",
+    "    step_timer.done_step('Classification Results - Pie Charts')"
    ]
   },
   {
@@ -1144,24 +1119,20 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-12-06T16:10:56.212586Z",
-     "start_time": "2018-12-06T16:10:56.204731Z"
-    },
-    "scrolled": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
-    "uncor_mean_im = np.nanmean(uncor, axis=2)\n",
-    "offset_mean_im = np.nanmean(off_cor, axis=2)\n",
+    "step_timer.start()\n",
+    "\n",
+    "uncor_mean_im = np.nanmean(raw_data, axis=0)\n",
+    "offset_mean_im = np.nanmean(off_data, axis=0)\n",
     "\n",
     "if corr_bools.get('common_mode'):\n",
-    "    cm_mean_im = np.nanmean(cm_cor, axis=2)\n",
+    "    cm_mean_im = np.nanmean(cm_data, axis=0)\n",
     "if corr_bools.get('relgain'):\n",
-    "    gain_mean_im = np.nanmean(g_cor, axis=2)\n",
+    "    gain_mean_im = np.nanmean(rg_data, axis=0)\n",
     "if corr_bools.get('pattern_class'):\n",
-    "    mean_im_cc = np.nanmean(final_cor, axis=2)\n",
+    "    mean_im_cc = np.nanmean(cls_data, axis=0)\n",
     "\n",
     "fig = xana.heatmapPlot(uncor_mean_im, x_label='Columns', y_label='Rows', lut_label='Signal (ADU)', aspect=1, \n",
     "                       x_range=(0, pixels_y), y_range=(0, pixels_x),  \n",
@@ -1189,7 +1160,8 @@
     "if corr_bools.get('pattern_class'):\n",
     "    fig = xana.heatmapPlot(mean_im_cc, x_label='Columns', y_label='Rows', lut_label='Signal (ADU)', aspect=1,\n",
     "                           x_range=(0, pixels_y), y_range=(0, pixels_x), vmin=0, vmax= 18000,\n",
-    "                           title = 'Image of Single Events Averaged over Frames in the First Sequence')"
+    "                           title = 'Image of Single Events Averaged over Frames in the First Sequence')\n",
+    "step_timer.done_step(\"Plotting\")"
    ]
   },
   {
@@ -1202,44 +1174,40 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-12-06T16:11:08.317130Z",
-     "start_time": "2018-12-06T16:11:05.788655Z"
-    },
-    "scrolled": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
-    "fig = xana.heatmapPlot(uncor[:,:,0], x_label='Columns', y_label='Rows', lut_label='Signal (ADU)', aspect=1, \n",
+    "step_timer.start()\n",
+    "fig = xana.heatmapPlot(raw_data[0, :, :], x_label='Columns', y_label='Rows', lut_label='Signal (ADU)', aspect=1, \n",
     "                       x_range=(0, pixels_y), y_range=(0, pixels_x), \n",
     "                       panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)',  \n",
     "                       title = 'Uncorrected Image (First Frame of the First Sequence)')\n",
     "\n",
-    "fig = xana.heatmapPlot(off_cor[:,:,0], x_label='Columns', y_label='Rows', lut_label='Signal (ADU)', aspect=1, \n",
+    "fig = xana.heatmapPlot(off_data[0, :, :], x_label='Columns', y_label='Rows', lut_label='Signal (ADU)', aspect=1, \n",
     "                       x_range=(0, pixels_y), y_range=(0, pixels_x),\n",
     "                       panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)',   \n",
     "                       title = 'Offset Corrected Image (First Frame of the First Sequence)')\n",
     "\n",
     "if corr_bools.get('common_mode'):\n",
-    "        fig = xana.heatmapPlot(cm_cor[:,:,2], x_label='Columns', y_label='Rows', lut_label='Signal (ADU)', \n",
+    "        fig = xana.heatmapPlot(cm_data[0, :, :], x_label='Columns', y_label='Rows', lut_label='Signal (ADU)', \n",
     "                               aspect=1, \n",
     "                               x_range=(0, pixels_y), y_range=(0, pixels_x), \n",
     "                               panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)', \n",
     "                               title = 'Common Mode Corrected Image (First Frame of the First Sequence)')\n",
     "        \n",
     "if corr_bools.get('relgain'):\n",
-    "        fig = xana.heatmapPlot(g_cor[:,:,0], x_label='Columns', y_label='Rows', lut_label='Signal (ADU)', \n",
+    "        fig = xana.heatmapPlot(rg_data[0, :, :], x_label='Columns', y_label='Rows', lut_label='Signal (ADU)', \n",
     "                               aspect=1, \n",
     "                               x_range=(0, pixels_y), y_range=(0, pixels_x), \n",
     "                               panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)', \n",
     "                               title = 'Gain Corrected Image (First Frame of the First Sequence)')\n",
     "\n",
     "if corr_bools.get('pattern_class'):    \n",
-    "    fig = xana.heatmapPlot(final_cor[:,:,0], x_label='Columns', y_label='Rows', lut_label='Signal (ADU)', aspect=1,\n",
+    "    fig = xana.heatmapPlot(cls_data[0, :, :], x_label='Columns', y_label='Rows', lut_label='Signal (ADU)', aspect=1,\n",
     "                           x_range=(0, pixels_y), y_range=(0, pixels_x), \n",
     "                           panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)',  \n",
-    "                           title = 'Image of Single Events (First Frame of the First Sequence)')"
+    "                           title = 'Image of Single Events (First Frame of the First Sequence)')\n",
+    "step_timer.done_step(\"Plotting\")"
    ]
   },
   {
@@ -1282,37 +1250,35 @@
     "    doubles = []\n",
     "    triples = []\n",
     "    quads =  []\n",
-    "\n",
-    "    with h5py.File(\"{}/CORR-R{:04d}-PNCCD01-S{:05d}.h5\".format(out_folder, run, sequences[seq_num]), 'r') as infile:\n",
-    "            data = infile[h5path+\"/pixels_classified\"][()].astype(np.float32) # classifications\n",
-    "            patterns = infile[h5path+\"/patterns\"][()].astype(np.float32) # event patterns\n",
-    "            \n",
-    "            # events' patterns indices are as follows: 100 (singles), 101 (first singles), 200 - 203 (doubles),\n",
-    "            # 300 - 303 (triples), and 400 - 403 (quadruples). Note that for the last three types of patterns, \n",
-    "            # there are left, right, up, and down indices.\n",
-    "\n",
-    "            # Separating the events:\n",
-    "            # Singles and First Singles:\n",
-    "            for s in range(100, 102):\n",
-    "                single = copy.copy(data[...])\n",
-    "                single[patterns != s] = np.nan\n",
-    "                singles.append(single)\n",
-    "            \n",
-    "\n",
-    "            for d in range(200, 204):\n",
-    "                double = copy.copy(data[...])\n",
-    "                double[patterns != d] = np.nan\n",
-    "                doubles.append(double)\n",
-    "\n",
-    "            for t in range(300, 304):\n",
-    "                triple = copy.copy(data[...])\n",
-    "                triple[patterns != t] = np.nan\n",
-    "                triples.append(triple)  \n",
-    "\n",
-    "            for q in range(400, 404):\n",
-    "                quad = copy.copy(data[...])\n",
-    "                quad[patterns != q] = np.nan\n",
-    "                quads.append(quad)"
+    "    with H5File(f\"{out_folder}/{seq_files[0].name.replace('RAW', 'CORR')}\") as dc:  # noqa\n",
+    "        data = dc[instrument_src, \"data.pixels_classified\"].ndarray()\n",
+    "        patterns = dc[instrument_src, \"data.patterns\"].ndarray()\n",
+    "    # events' patterns indices are as follows: 100 (singles), 101 (first singles), 200 - 203 (doubles),\n",
+    "    # 300 - 303 (triples), and 400 - 403 (quadruples). Note that for the last three types of patterns, \n",
+    "    # there are left, right, up, and down indices.\n",
+    "\n",
+    "    # Separating the events:\n",
+    "    # Singles and First Singles:\n",
+    "    for s in range(100, 102):\n",
+    "        single = data.copy()\n",
+    "        single[patterns != s] = np.nan\n",
+    "        singles.append(single)\n",
+    "\n",
+    "\n",
+    "    for d in range(200, 204):\n",
+    "        double = data.copy()\n",
+    "        double[patterns != d] = np.nan\n",
+    "        doubles.append(double)\n",
+    "\n",
+    "    for t in range(300, 304):\n",
+    "        triple = data.copy()\n",
+    "        triple[patterns != t] = np.nan\n",
+    "        triples.append(triple)  \n",
+    "\n",
+    "    for q in range(400, 404):\n",
+    "        quad = data.copy()\n",
+    "        quad[patterns != q] = np.nan\n",
+    "        quads.append(quad)"
    ]
   },
   {
@@ -1322,13 +1288,13 @@
    "outputs": [],
    "source": [
     "if corr_bools.get('pattern_class'):\n",
+    "    step_timer.start()\n",
     "    hA = 0\n",
     "    h = 0\n",
     "    for single in singles:\n",
     "        hs, e = np.histogram(single.flatten(), bins=event_bins, range=b_range) # h: histogram counts, e: bin edges\n",
     "        h += hs\n",
     "        hA += hs # hA: counts all events (see below)\n",
-    "\n",
     "   \n",
     "    # bin edges array has one extra element => need to plot from 0 to the one before the last element to have the \n",
     "    # same size as h-array => in what follows, we use e[:-1] (-1 means one before the last element)\n",
@@ -1367,7 +1333,8 @@
     "    ax.step(e[:-1], h, color='purple', label='Events Splitting on Quadruple Pixels')\n",
     "\n",
     "    ax.step(e[:-1], hA, color='grey', label='All Valid Events')\n",
-    "    l = ax.legend()"
+    "    l = ax.legend()\n",
+    "    step_timer.done_step(\"Plotting\")"
    ]
   },
   {
@@ -1375,7 +1342,10 @@
    "execution_count": null,
    "metadata": {},
    "outputs": [],
-   "source": []
+   "source": [
+    "print(f\"Total processing time {step_timer.timespan():.01f} s\")\n",
+    "step_timer.print_summary()"
+   ]
   }
  ],
  "metadata": {
@@ -1394,7 +1364,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.6.7"
+   "version": "3.8.11"
   },
   "latex_envs": {
    "LaTeX_envs_menu_present": true,
@@ -1415,5 +1385,5 @@
   }
  },
  "nbformat": 4,
- "nbformat_minor": 1
+ "nbformat_minor": 4
 }
diff --git a/src/cal_tools/pnccdlib.py b/src/cal_tools/pnccdlib.py
index 785d289c359ffe6d3e62aa15bb6e41de56d4145d..98594427ed23b03224c856294d800235a200436a 100644
--- a/src/cal_tools/pnccdlib.py
+++ b/src/cal_tools/pnccdlib.py
@@ -1,56 +1,40 @@
-# Extracting slow data:
-import os
-import traceback
-from typing import Tuple
-
-import h5py
-
 VALID_GAINS = {
-    "a" : 1.0,
-    "b" : 4.0,
-    "c" : 16.0,
-    "d" : 64.0,
-    "e" : 256.0,
-    "f" : 340.0,
-    "g" : 512.0
+    "a": 1.0,
+    "b": 4.0,
+    "c": 16.0,
+    "d": 64.0,
+    "e": 256.0,
+    "f": 340.0,
+    "g": 512.0
 }
 
 
-def extract_slow_data(karabo_id: str, karabo_da_control: str,
-                      ctrl_fname: str, ctrl_path: str,
-                      mdl_ctrl_path: str,
-                      bias_voltage: float, gain: float,
-                      fix_temperature_top :float,
-                      fix_temperature_bot :float,
-                      ) -> Tuple[float, float, float, float]:
-    """
-    Extract slow data from given control paths.
-    """
-    try:
-        with h5py.File(ctrl_fname, "r") as f:
-            if bias_voltage == 0.:
-                bias_voltage = abs(f[os.path.join(mdl_ctrl_path,
-                                                  "DAQ_MPOD/u0voltage/value")][0])  # noqa
-            if gain == 0.1:
-                gain = f[os.path.join(mdl_ctrl_path,
-                                      "DAQ_GAIN/pNCCDGain/value")][0]
-            if fix_temperature_top == 0.:
-                fix_temperature_top = f[os.path.join(ctrl_path,
-                                                     "inputA/krdg/value")][0]
-            if fix_temperature_bot == 0.:
-                fix_temperature_bot = f[os.path.join(ctrl_path,
-                                                     "inputB/krdg/value")][0]
-    except IOError:
-        print("Error during reading slow data,"
-              " please check the given h5 path for the control parameters")
-        traceback.print_exc(limit=1)
-        print("bias voltage control h5path:",
-              os.path.join(mdl_ctrl_path, "DAQ_MPOD/u0voltage/value"))
-        print("gain control h5path:",
-              os.path.join(mdl_ctrl_path, "DAQ_GAIN/pNCCDGain/value"))
-        print("fix_temperature_top control h5path:",
-              os.path.join(ctrl_path, "inputA/krdg/value"))
-        print("fix_temperature_bot control h5path:",
-              os.path.join(ctrl_path, "inputB/krdg/value"))
+class PnccdCtrl():
+    def __init__(
+        self,
+        run_dc: "extra_data.DataCollection",  # noqa
+        karabo_id: str
+    ):
+        """ Extract control data from given control paths.
+        :param run_dc: run Extra-data data collection.
+        :param karabo_id: Detector identifier name.
+        """
+        self.run_dc = run_dc
+        self.ctrl_src = f"{karabo_id}/CTRL/TCTRL"
+        self.mdl_src_temp = f"{karabo_id}/MDL/{{}}"
+
+    def get_bias_voltage(self):
+        return(
+            abs(self.run_dc.get_run_value(
+                self.mdl_src_temp.format("DAQ_MPOD"), "u0voltage.value")))
+
+    def get_gain(self):
+        return(
+            self.run_dc.get_run_value(
+                self.mdl_src_temp.format("DAQ_GAIN"), "pNCCDGain.value"))
+
+    def get_fix_temperature_top(self):
+        return self.run_dc.get_run_value(self.ctrl_src, "inputA.krdg.value")
 
-    return bias_voltage, gain, fix_temperature_top, fix_temperature_bot
+    def get_fix_temperature_bot(self):
+        return self.run_dc.get_run_value(self.ctrl_src, "inputB.krdg.value")
diff --git a/src/xfel_calibrate/notebooks.py b/src/xfel_calibrate/notebooks.py
index 47085e19aebbb6b8c2bd83db6a2e501268f4685d..299722f1da4069fc15933f42eeb1774fc58a413c 100644
--- a/src/xfel_calibrate/notebooks.py
+++ b/src/xfel_calibrate/notebooks.py
@@ -120,8 +120,9 @@ notebooks = {
         },
         "CORRECT": {
             "notebook": "notebooks/pnCCD/Correct_pnCCD_NBC.ipynb",
-            "concurrency": {"parameter": None,
-                            "default concurrency": None,
+            "concurrency": {"parameter": "sequences",
+                            "default concurrency": [-1],
+                            "use function": "balance_sequences",
                             "cluster cores": 32},
         },
     },