diff --git a/notebooks/pnCCD/Characterize_pnCCD_Dark_NBC.ipynb b/notebooks/pnCCD/Characterize_pnCCD_Dark_NBC.ipynb
index b23ef32bfcfde18bba57315e5f93fe120e7b07df..086968e2c6387690fa5b1b9ed524a136407584cf 100644
--- a/notebooks/pnCCD/Characterize_pnCCD_Dark_NBC.ipynb
+++ b/notebooks/pnCCD/Characterize_pnCCD_Dark_NBC.ipynb
@@ -30,7 +30,6 @@
    },
    "outputs": [],
    "source": [
-    "cluster_profile = \"noDB\"  # ipcluster profile to use\n",
     "in_folder = \"/gpfs/exfel/exp/SQS/202031/p900166/raw\"  # input folder, required\n",
     "out_folder = '/gpfs/exfel/data/scratch/setoodeh'  # output folder, required\n",
     "sequence = 0  # sequence file to use\n",
@@ -43,8 +42,8 @@
     "karabo_id = \"SQS_NQS_PNCCD1MP\" # karabo prefix of PNCCD devices\n",
     "receiver_id = \"PNCCD_FMT-0\" # inset for receiver devices\n",
     "path_template = 'RAW-R{:04d}-{}-S{{:05d}}.h5'  # the template to use to access data\n",
-    "h5path = '/INSTRUMENT/{}/CAL/{}:output/data/image/' # path in the HDF5 file the data is at\n",
-    "h5path_ctrl = '/CONTROL/{}/CTRL/TCTRL'\n",
+    "h5path = '{}/CAL/{}:output' # path in the HDF5 file the data is at\n",
+    "h5path_ctrl = '{}/CTRL/TCTRL'\n",
     "\n",
     "# Database access parameters:\n",
     "use_dir_creation_date = True  # use dir creation date as data production reference date\n",
@@ -68,9 +67,10 @@
     "bad_pixel_noise_sigma = 4.  # any pixel whose noise beyond this standard deviations is a bad pixel\n",
     "temp_limits = 5  # temperature limits in which calibration parameters are considered equal\n",
     "\n",
-    "run_parallel = True # for parallel computation\n",
-    "cpuCores = 40 # specifies the number of running cpu cores\n",
-    "operation_mode = ''  # Detector operation mode, optional"
+    "cpuCores = 40  # specifies the number of running cpu cores\n",
+    "operation_mode = ''  # Detector operation mode, optional\n",
+    "max_trains = 500  # Maximum number of trains to use for dark processing.\n",
+    "min_trains = 1  # Minimum number of trains required to proceed with dark processing."
    ]
   },
   {
@@ -91,9 +91,10 @@
     "\n",
     "warnings.filterwarnings('ignore')\n",
     "\n",
-    "import h5py\n",
     "import matplotlib.pyplot as plt\n",
     "import numpy as np\n",
+    "import pasha as psh\n",
+    "from extra_data import RunDirectory\n",
     "\n",
     "%matplotlib inline\n",
     "import XFELDetAna.xfelprofiler as xprof\n",
@@ -107,41 +108,19 @@
     "    save_const_to_h5,\n",
     "    send_to_db,\n",
     ")\n",
-    "from iCalibrationDB import Conditions, Constants, Detectors, Versions\n",
+    "from iCalibrationDB import Conditions, Constants\n",
     "from iCalibrationDB.detectors import DetectorTypes\n",
     "from IPython.display import Markdown, display\n",
     "from prettytable import PrettyTable\n",
     "\n",
     "profiler = xprof.Profiler()\n",
     "profiler.disable()\n",
-    "from XFELDetAna.util import env\n",
     "\n",
-    "env.iprofile = cluster_profile\n",
     "from XFELDetAna import xfelpyanatools as xana\n",
     "from XFELDetAna import xfelpycaltools as xcal\n",
     "from XFELDetAna.plotting.util import prettyPlotting\n",
     "\n",
-    "prettyPlotting=True\n",
-    "from XFELDetAna.detectors.fastccd import readerh5 as fastccdreaderh5\n",
-    "from XFELDetAna.xfelreaders import ChunkReader"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-12-06T10:54:39.467334Z",
-     "start_time": "2018-12-06T10:54:39.427784Z"
-    }
-   },
-   "outputs": [],
-   "source": [
-    "def nImagesOrLimit(nImages, limit):\n",
-    "    if limit == 0:\n",
-    "        return nImages\n",
-    "    else:\n",
-    "        return min(nImages, limit)"
+    "prettyPlotting=True"
    ]
   },
   {
@@ -178,9 +157,6 @@
     "print(f\"pnCCD size is: {pixels_x}x{pixels_y} pixels.\")\n",
     "\n",
     "ped_dir = \"{}/r{:04d}\".format(in_folder, run)\n",
-    "fp_name = path_template.format(run, karabo_da[0])\n",
-    "fp_path = '{}/{}'.format(ped_dir, fp_name)\n",
-    "filename = fp_path.format(sequence)\n",
     "h5path = h5path.format(karabo_id, receiver_id)\n",
     "\n",
     "# Output Folder Creation:\n",
@@ -207,8 +183,9 @@
     "print(f'Calibration database interface: {cal_db_interface}')\n",
     "print(f\"Sending constants to the calibration database: {db_output}\")\n",
     "print(f\"HDF5 path to data: {h5path}\")\n",
-    "print(f\"Reading data from: {filename}\")\n",
-    "print(f\"Run number: {run}\")"
+    "print(f\"Run number: {run}\")\n",
+    "\n",
+    "run_dc = RunDirectory(ped_dir)"
    ]
   },
   {
@@ -217,17 +194,28 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "# Extracting slow data:\n",
+    "# extract slow data\n",
     "if karabo_da_control:\n",
-    "    ctrl_fname = os.path.join(ped_dir, path_template.format(run, karabo_da_control)).format(sequence)\n",
+    "    mdl_path = f\"{karabo_id}/MDL/{'{}'}\"\n",
     "    ctrl_path = h5path_ctrl.format(karabo_id)\n",
-    "    mdl_ctrl_path = f\"/CONTROL/{karabo_id}/MDL/\"\n",
     "\n",
-    "    (bias_voltage, gain,\n",
-    "     fix_temperature_top,\n",
-    "     fix_temperature_bot) = extract_slow_data(karabo_id, karabo_da_control, ctrl_fname, ctrl_path,\n",
-    "                                              mdl_ctrl_path, bias_voltage, gain,\n",
-    "                                              fix_temperature_top, fix_temperature_bot)"
+    "    bias_voltage, gain, fix_temperature_top, fix_temperature_bot = extract_slow_data(  # noqa\n",
+    "        run_dc,\n",
+    "        ctrl_path,\n",
+    "        mdl_path,\n",
+    "        bias_voltage,\n",
+    "        gain,\n",
+    "        fix_temperature_top,\n",
+    "        fix_temperature_bot,\n",
+    "    )\n",
+    "\n",
+    "# Printing the Parameters Read from the Data File:\n",
+    "display(Markdown('### Detector Parameters'))\n",
+    "print(f\"Bias voltage is {bias_voltage:0.1f} V.\")\n",
+    "print(f\"Detector gain is set to 1/{int(gain)}.\")\n",
+    "print(f\"Detector integration time is set to {integration_time} ms\")\n",
+    "print(f\"Top pnCCD sensor is at temperature of {fix_temperature_top:0.2f} K\")\n",
+    "print(f\"Bottom pnCCD sensor is at temperature of {fix_temperature_bot:0.2f} K\")"
    ]
   },
   {
@@ -244,45 +232,22 @@
     "# Reading Parameters such as Detector Bias, Gain, etc. from the Data:\n",
     "memoryCells = 1 # pnCCD has 1 memory cell\n",
     "sensorSize = [pixels_x, pixels_y]\n",
-    "blockSize = [sensorSize[0]//2, sensorSize[1]//2]# sensor area will be analysed according to blocksize\n",
+    "blockSize = [sensorSize[0]//2, sensorSize[1]//2]  # sensor area will be analysed according to blocksize\n",
     "xcal.defaultBlockSize = blockSize\n",
-    "nImages = fastccdreaderh5.getDataSize(filename, h5path)[0] # specifies total number of images to proceed\n",
-    "nImages = nImagesOrLimit(nImages, number_dark_frames)\n",
-    "profile = False"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# Printing the Parameters Read from the Data File:\n",
-    "display(Markdown('### Detector Parameters'))\n",
-    "print(f\"Bias voltage is {bias_voltage:0.2f} V.\")\n",
-    "print(f\"Detector gain is set to {gain}.\")\n",
-    "print(f\"Detector integration time is set to {integration_time} ms\")\n",
-    "print(f\"Top pnCCD sensor is at temperature of {fix_temperature_top:0.2f} K\")\n",
-    "print(f\"Bottom pnCCD sensor is at temperature of {fix_temperature_bot:0.2f} K\")\n",
-    "print(\"Number of dark images to analyze:\", nImages) "
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-12-06T10:54:41.584031Z",
-     "start_time": "2018-12-06T10:54:41.578462Z"
-    }
-   },
-   "outputs": [],
-   "source": [
-    "# Reading Files in Chunks:\n",
     "\n",
-    "# Chunk reader returns an iterator to access the data in the file within the ranges:\n",
-    "reader = ChunkReader(filename, fastccdreaderh5.readData, nImages, chunkSize, path=h5path, \n",
-    "                     pixels_x=pixels_x, pixels_y=pixels_y)"
+    "data_dc = run_dc.select(h5path, \"data.image\", require_all=True)\n",
+    "n_trains = data_dc[h5path, \"data.image\"].shape[0]\n",
+    "\n",
+    "if max_trains != 0:\n",
+    "    n_trains = max_trains\n",
+    "if n_trains < min_trains:\n",
+    "    raise ValueError(\n",
+    "        f\"Files {data_dc.files} consists of less than\"\n",
+    "        f\" the required number of {min_trains} trains to proceed with \"\n",
+    "        \"dark processing.\")\n",
+    "\n",
+    "profile = False\n",
+    "run_parallel = False # for parallel computation"
    ]
   },
   {
@@ -299,8 +264,12 @@
     "# Calculators:\n",
     "\n",
     "# noiseCal is a noise map calculator, which internally also produces a per-pixel mean map, i.e., an offset map:\n",
-    "noiseCal = xcal.NoiseCalculator(sensorSize, memoryCells, cores=cpuCores, blockSize=blockSize,\n",
-    "                                runParallel=run_parallel)"
+    "noiseCal = xcal.NoiseCalculator(\n",
+    "    sensorSize, memoryCells,\n",
+    "    cores=cpuCores,\n",
+    "    blockSize=blockSize,\n",
+    "    parallel=run_parallel,\n",
+    ")"
    ]
   },
   {
@@ -323,30 +292,12 @@
    },
    "outputs": [],
    "source": [
-    "counter1 = 1 # To count how many \"if data.shape[2] >= chunkSize\" instances are there.\n",
-    "counter2 = 0 # To count how many \"if data.shape[2] < chunkSize\" instances are there.\n",
-    "chunkSize_new = 0 # See below\n",
-    "\n",
-    "for data in reader.readChunks():\n",
-    "    data = data.astype(np.float32)\n",
-    "    dx = np.count_nonzero(data, axis=(0, 1)) \n",
-    "    data = data[:,:,dx != 0] # Getting rid of empty frames\n",
-    "    # Some sequences may have less than 500 frames in them. To find out how many images there are, we will \n",
-    "    # temporarily change chunkSize to be the same as whatever number of frames the last chunk of data has:\n",
-    "    if data.shape[2] < chunkSize:\n",
-    "        chunkSize_new = data.shape[2]\n",
-    "        print(\"Number of images are less than chunkSize. chunkSize is temporarily changed to {}.\"\n",
-    "              .format(chunkSize_new))\n",
-    "        images = images + chunkSize_new\n",
-    "        counter2 += 1 \n",
-    "    else:\n",
-    "        images = counter1*chunkSize + counter2*chunkSize_new\n",
-    "        counter1 += 1\n",
-    "\n",
-    "    noiseCal.fill(data) # Filling the histogram calculator with data\n",
-    "    chunkSize = 100 # resetting the chunkSize to its default value for the next sequence or data-chunk\n",
-    "\n",
-    "print('A total number of {} images are processed.'.format(images))"
+    "%%time\n",
+    "data = data_dc.select_trains(np.s_[:n_trains])[h5path, \"data.image\"].ndarray()\n",
+    "data = np.moveaxis(data, 0, 2).astype(np.float32)\n",
+    "noiseCal.fill(data) # Filling the histogram calculator with data\n",
+    "\n",
+    "print(f'A total number of {n_trains} images are processed.')"
    ]
   },
   {
@@ -484,8 +435,14 @@
    "source": [
     "# Offset Correction:\n",
     "\n",
-    "offsetCorrection = xcal.OffsetCorrection(sensorSize, offsetMap, nCells = memoryCells, cores=cpuCores, gains=None,\n",
-    "                                         runParallel=run_parallel, blockSize=blockSize)\n",
+    "offsetCorrection = xcal.OffsetCorrection(\n",
+    "    sensorSize, offsetMap,\n",
+    "    nCells=memoryCells,\n",
+    "    cores=cpuCores,\n",
+    "    gains=None,\n",
+    "    parallel=run_parallel,\n",
+    "    blockSize=blockSize,\n",
+    ")\n",
     "\n",
     "# Common Mode Correction:\n",
     "# In this method, the median of all (assuming stride = 1) pixels that are read out at the same time along a column \n",
@@ -495,12 +452,15 @@
     "# in a column per quadrant is smaller than the value set for minFrac parameter for a particular column, that column \n",
     "# will be ignored for calculation of common mode values and that column is not corrected for common mode.\n",
     "# minFrac = 0 means no column is ignored except those containing nan values (bad pixels):\n",
-    "cmCorrection = xcal.CommonModeCorrection(sensorSize,\n",
-    "                                         commonModeBlockSize,\n",
-    "                                         commonModeAxis, parallel=run_parallel, dType=np.float32, stride=1,\n",
-    "                                         noiseMap=noiseMap.astype(np.float32), minFrac=0)\n",
-    "\n",
-    "cmCorrection.debug()"
+    "cmCorrection = xcal.CommonModeCorrection(\n",
+    "    sensorSize,\n",
+    "    commonModeBlockSize,\n",
+    "    commonModeAxis,\n",
+    "    parallel=run_parallel,\n",
+    "    dType=np.float32, stride=1,\n",
+    "    noiseMap=noiseMap.astype(np.float32),\n",
+    "    minFrac=0,\n",
+    ")"
    ]
   },
   {
@@ -515,10 +475,10 @@
     "\n",
     "# For offset corrected data:\n",
     "histCalCorrected = xcal.HistogramCalculator(sensorSize, bins=bins, range=bin_range, memoryCells=memoryCells,\n",
-    "                                            cores=cpuCores, gains=None, blockSize=blockSize)\n",
+    "                                            cores=cpuCores, parallel=run_parallel, gains=None, blockSize=blockSize)\n",
     "# For common mode corrected data:\n",
     "histCalCMCorrected = xcal.HistogramCalculator(sensorSize, bins=bins, range=bin_range, memoryCells=memoryCells,\n",
-    "                                              cores=cpuCores, gains=None, blockSize=blockSize)"
+    "                                              cores=cpuCores, parallel=run_parallel, gains=None, blockSize=blockSize)"
    ]
   },
   {
@@ -536,37 +496,34 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "counter1 = 1 # To count how many \"if data.shape[2] >= chunkSize\" instances are there.\n",
-    "counter2 = 0 # To count how many \"if data.shape[2] < chunkSize\" instances are there.\n",
-    "chunkSize_new = 0 # See below\n",
-    "\n",
-    "for data in reader.readChunks():\n",
-    "    data = data.astype(np.float32)\n",
-    "    dx = np.count_nonzero(data, axis=(0, 1))\n",
-    "    data = data[:,:,dx != 0]\n",
+    "%%time\n",
+    "def correct_image(wid, idx, d):\n",
+    "    d -= offsetMap.data[..., 0] # Offset correction\n",
+    "\n",
+    "    #offset_corr_data = copy.copy(data) # I am copying this so that I can have access to it in the table below\n",
+    "    histCalCorrected.fill(d)\n",
+    "\n",
+    "    offset_corr_data[..., idx] = d\n",
+    "    \n",
+    "    cellTable=np.zeros(1, np.int32) # Common mode correction\n",
     "    \n",
-    "    # Some sequences may have less than 500 frames in them. To find out how many images there are, we will \n",
-    "    # temporarily change chunkSize to be the same as whatever number of frames the last chunk of data has:\n",
-    "    if data.shape[2] < chunkSize:\n",
-    "        chunkSize_new = data.shape[2]\n",
-    "        print(\"Number of images are less than chunkSize. chunkSize is temporarily changed to {}.\"\n",
-    "              .format(chunkSize_new))\n",
-    "        images = images + chunkSize_new\n",
-    "        counter2 += 1 \n",
-    "    else:\n",
-    "        images = counter1*chunkSize + counter2*chunkSize_new\n",
-    "        counter1 += 1\n",
-    "        \n",
-    "    data -= offsetMap.data # Offset correction\n",
-    "    offset_corr_data = copy.copy(data) # I am copying this so that I can have access to it in the table below\n",
-    "    histCalCorrected.fill(data)\n",
-    "    cellTable=np.zeros(data.shape[2], np.int32) # Common mode correction\n",
-    "    data = cmCorrection.correct(data.astype(np.float32), cellTable=cellTable) \n",
-    "    histCalCMCorrected.fill(data)\n",
-    "    noiseCal.fill(data)  # Filling calculators with data\n",
-    "    chunkSize = 100 # resetting the chunkSize to its default value for the next sequence or data-chunk\n",
-    "\n",
-    "print('A total number of {} images are processed.'.format(images))\n",
+    "    d = cmCorrection.correct(d.astype(np.float32), cellTable=cellTable) \n",
+    "    \n",
+    "    histCalCMCorrected.fill(d)\n",
+    "    \n",
+    "    noiseCal.fill(d)  # Filling calculators with data\n",
+    "    \n",
+    "    corr_data[..., idx] = np.squeeze(d)\n",
+    "\n",
+    "context = psh.context.ThreadContext(num_workers=10)\n",
+    "\n",
+    "corr_data = context.alloc(\n",
+    "    shape=(sensorSize[0], sensorSize[1], n_trains), dtype=np.float32)\n",
+    "offset_corr_data = context.alloc(\n",
+    "    shape=(sensorSize[0], sensorSize[1], n_trains), dtype=np.float32)\n",
+    "\n",
+    "context.map(correct_image, np.moveaxis(copy.copy(data), 2, 0))\n",
+    "\n",
     "print(\"Offset and common mode corrections are applied.\")"
    ]
   },
@@ -630,12 +587,12 @@
     "t0.title = \"Comparison of the First Round of Corrections - Bad Pixels Not Excluded\"\n",
     "t0.field_names = [\"Dark Pedestal After Offset Correction\", \"Dark Pedestal After Offset and Common Mode Corrections\"]\n",
     "t0.add_row([\"Mean: {:0.3f} ADU\".format(np.mean(offset_corr_data)), \"Mean: {:0.3f} ADU\"\n",
-    "            .format(np.mean(data))])\n",
+    "            .format(np.mean(corr_data))])\n",
     "t0.add_row([\"Median: {:0.3f} ADU\".format(np.median(offset_corr_data)), \"Median: {:0.3f} ADU\"\n",
-    "            .format(np.median(data))])\n",
+    "            .format(np.median(corr_data))])\n",
     "t0.add_row([\"Standard Deviation: {:0.3f} ADU\".format(np.std(offset_corr_data)), \n",
     "            \"Standard Deviation: {:0.3f} ADU\"\n",
-    "            .format(np.std(data))])\n",
+    "            .format(np.std(corr_data))])\n",
     "print(t0,'\\n')"
    ]
   },
@@ -756,7 +713,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "event_threshold = sigmaNoise*np.median(noiseMapCM) # for exclusion of possible cosmic ray events\n",
+    "event_threshold = sigmaNoise * np.median(noiseMapCM) # for exclusion of possible cosmic ray events\n",
     "noiseCal.setBadPixelMask(bad_pixels != 0) # setting bad pixels map for the noise calculator"
    ]
   },
@@ -766,11 +723,12 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "cmCorrection = xcal.CommonModeCorrection(sensorSize,\n",
-    "                                         commonModeBlockSize,\n",
-    "                                         commonModeAxis, parallel=run_parallel, dType=np.float32, stride=1,\n",
-    "                                         noiseMap=noiseMapCM.astype(np.float32), minFrac=0)\n",
-    "cmCorrection.debug()"
+    "cmCorrection = xcal.CommonModeCorrection(\n",
+    "    sensorSize,\n",
+    "    commonModeBlockSize,\n",
+    "    commonModeAxis, parallel=run_parallel, dType=np.float32, stride=1,\n",
+    "    noiseMap=noiseMapCM.astype(np.float32), minFrac=0,\n",
+    ")"
    ]
   },
   {
@@ -779,46 +737,43 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "counter1 = 1 # To count how many \"if data.shape[2] >= chunkSize\" instances are there.\n",
-    "counter2 = 0 # To count how many \"if data.shape[2] < chunkSize\" instances are there.\n",
-    "chunkSize_new = 0 # See below\n",
-    "\n",
-    "for data in reader.readChunks():\n",
-    "    data = data.astype(np.float32)\n",
-    "    dx = np.count_nonzero(data, axis=(0, 1))\n",
-    "    data = data[:,:,dx != 0]\n",
-    "    data_mask = np.repeat(bad_pixels, data.shape[2], axis=2) # Converting bad_pixels to the same shape as the data\n",
-    "    data[data_mask != 0] = np.nan # masking data for bad pixels and equating the values to np.nan\n",
-    "    \n",
-    "    # Some sequences may have less than 500 frames in them. To find out how many images there are, we will \n",
-    "    # temporarily change chunkSize to be the same as whatever number of frames the last chunk of data has:\n",
-    "    if data.shape[2] < chunkSize:\n",
-    "        chunkSize_new = data.shape[2]\n",
-    "        print(\"Number of images are less than chunkSize. chunkSize is temporarily changed to {}.\"\n",
-    "              .format(chunkSize_new))\n",
-    "        images = images + chunkSize_new\n",
-    "        counter2 += 1 \n",
-    "    else:\n",
-    "        images = counter1*chunkSize + counter2*chunkSize_new\n",
-    "        counter1 += 1\n",
-    "    \n",
-    "    data_copy = offsetCorrection.correct(copy.copy(data))\n",
-    "    cellTable=np.zeros(data_copy.shape[2], np.int32)\n",
-    "    data_copy = cmCorrection.correct(data_copy.astype(np.float32), cellTable=cellTable)\n",
-    "    data[data_copy > event_threshold] = np.nan # discarding events caused by cosmic rays\n",
-    "    #data = np.ma.MaskedArray(data, np.isnan(data), fill_value=np.nan) # masking cosmics,default fill_value = 1e+20\n",
+    "cellTable = np.zeros(1, np.int32)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%%time\n",
+    "def correct_image(wid, idx, d):\n",
     "    \n",
-    "    data -= offsetMap.data # Offset correction\n",
-    "    offset_corr_data2 = copy.copy(data) # I am copying this so that I can have access to it in the table below\n",
-    "    histCalCorrected.fill(data)\n",
-    "    cellTable=np.zeros(data.shape[2], np.int32) # Common mode correction\n",
-    "    data = cmCorrection.correct(data.astype(np.float32), cellTable=cellTable) \n",
-    "    histCalCMCorrected.fill(data)\n",
-    "    noiseCal.fill(data)  # Filling calculators with data\n",
-    "    chunkSize = 100 # resetting the chunkSize to its default value for the next sequence or data-chunk\n",
+    "    d[np.squeeze(bad_pixels) != 0] = np.nan # masking data for bad pixels and equating the values to np.nan\n",
+    "    d_off = offsetCorrection.correct(copy.copy(d)) # Offset correction\n",
+    "\n",
+    "    d_off = cmCorrection.correct(d_off.astype(np.float32), cellTable=cellTable)\n",
+    "    d[np.squeeze(d_off) > event_threshold] = np.nan # discarding events caused by cosmic rays\n",
+    "    d -= offsetMap.data[..., 0] # Offset correction\n",
+    "    offset_corr_data2[..., idx] = d # I am copying this so that I can have access to it in the table below\n",
+    "    histCalCorrected.fill(d)\n",
+    "\n",
+    "    d = cmCorrection.correct(d, cellTable=cellTable)\n",
+    "    histCalCMCorrected.fill(d)\n",
+    "    noiseCal.fill(d)  # Filling calculators with data\n",
+    "    corr_data[..., idx] = np.squeeze(d)\n",
+    "\n",
+    "context = psh.context.ThreadContext(num_workers=10)\n",
+    "\n",
+    "corr_data = context.alloc(\n",
+    "    shape=(sensorSize[0], sensorSize[1], n_trains), dtype=np.float32)\n",
+    "offset_corr_data2 = context.alloc(\n",
+    "    shape=(sensorSize[0], sensorSize[1], n_trains), dtype=np.float32)\n",
+    "\n",
+    "context.map(correct_image, np.moveaxis(copy.copy(data), 2, 0))\n",
     "\n",
     "print(\"Final iteration is Performed.\")\n",
-    "print('A total number of {} images are processed.'.format(images))"
+    "print(f\"A total number of {n_trains} images are processed.\")"
    ]
   },
   {
@@ -827,7 +782,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "noiseMapCM_2nd = noiseCal.get().filled(np.nan) # the masked pixels are filled with nans\n",
+    "noiseMapCM_2nd = noiseCal.get().filled(np.nan)  # the masked pixels are filled with nans\n",
     "ho2, eo2, co2, so2 = histCalCorrected.get()\n",
     "hCM2, eCM2, cCM2, sCM2 = histCalCMCorrected.get()"
    ]
@@ -858,7 +813,7 @@
     "     'drawstyle': 'steps-post',\n",
     "     'color': 'red',\n",
     "     'ecolor': 'crimson',\n",
-    "     'label': 'Offset and Common Mode Corrected Signal (BP Incl.)' \n",
+    "     'label': 'Offset and Common Mode Corrected Signal (BP Incl.)'\n",
     "     },\n",
     "    {'x': co2,\n",
     "     'y': ho2,\n",
@@ -873,8 +828,8 @@
     "     'label': 'Offset and Common Mode Corrected Signal (BP Excl.)'\n",
     "     }]\n",
     "\n",
-    "fig = xana.simplePlot(do_Final, figsize='2col', aspect=1, x_label = 'ADU', \n",
-    "                      y_label=\"Counts (logarithmic scale)\", y_log=True, x_range = bin_range, \n",
+    "fig = xana.simplePlot(do_Final, figsize='2col', aspect=1, x_label = 'ADU',\n",
+    "                      y_label=\"Counts (logarithmic scale)\", y_log=True, x_range = bin_range,\n",
     "                      y_range = (0.02, 1e8),\n",
     "                      legend='top-right-frame-1col', title = 'Comparison of Corrections')\n",
     "\n",
@@ -882,11 +837,11 @@
     "t0.title = \"Comparison of the Second Round of Corrections - Bad Pixels Excluded\"\n",
     "t0.field_names = [\"Dark Pedestal After Offset Correction\", \"Dark Pedestal After Offset and Common Mode Corrections\"]\n",
     "t0.add_row([\"Mean: {:0.3f} ADU\".format(np.nanmean(offset_corr_data2)), \"Mean: {:0.3f} ADU\"\n",
-    "            .format(np.nanmean(data))])\n",
+    "            .format(np.nanmean(corr_data))])\n",
     "t0.add_row([\"Median: {:0.3f} ADU\".format(np.nanmedian(offset_corr_data2)), \"Median: {:0.3f} ADU\"\n",
-    "            .format(np.nanmedian(data))])\n",
-    "t0.add_row([\"Standard Deviation: {:0.3f} ADU\".format(np.nanstd(offset_corr_data2)), \n",
-    "            \"Standard Deviation: {:0.3f} ADU\".format(np.nanstd(data))])\n",
+    "            .format(np.nanmedian(corr_data))])\n",
+    "t0.add_row([\"Standard Deviation: {:0.3f} ADU\".format(np.nanstd(offset_corr_data2)),\n",
+    "            \"Standard Deviation: {:0.3f} ADU\".format(np.nanstd(corr_data))])\n",
     "print(t0,'\\n')"
    ]
   },
@@ -1015,14 +970,9 @@
     "            parm.lower_deviation = temp_limits\n",
     "            parm.upper_deviation = temp_limits\n",
     "\n",
-    "    # This should be used in case of running notebook \n",
-    "    # by a different method other than myMDC which already\n",
-    "    # sends CalCat info.\n",
-    "    # TODO: Set db_module to \"\" by default in the first cell\n",
-    "    if not db_module:\n",
-    "        db_module = get_pdu_from_db(karabo_id, karabo_da, const,\n",
-    "                                    condition, cal_db_interface,\n",
-    "                                    snapshot_at=creation_time)[0]\n",
+    "    db_module = get_pdu_from_db(karabo_id, karabo_da, const,\n",
+    "                                condition, cal_db_interface,\n",
+    "                                snapshot_at=creation_time)[0]\n",
     "\n",
     "    if db_output:\n",
     "        md = send_to_db(db_module, karabo_id, const, condition,\n",