diff --git a/notebooks/pnCCD/Characterize_pnCCD_Dark_NBC.ipynb b/notebooks/pnCCD/Characterize_pnCCD_Dark_NBC.ipynb
index 8cc5d8762890f38c27dc5e14dcc9c3cf24880d19..2ebcc78c267bfcb45bac3b83db37af0446bba264 100644
--- a/notebooks/pnCCD/Characterize_pnCCD_Dark_NBC.ipynb
+++ b/notebooks/pnCCD/Characterize_pnCCD_Dark_NBC.ipynb
@@ -6,11 +6,17 @@
    "source": [
     "# pnCCD Dark Characterization\n",
     "\n",
-    "Author: Kiana Setoodehnia, DET Group, Version 2.0\n",
+    "Author: DET Group, modified by Kiana Setoodehnia, Version: 2.0\n",
     "\n",
-    "The following notebook provides dark image analysis of the pnCCD detector.\n",
+    "The following notebook provides dark image analysis of the pnCCD detector. Dark characterization evaluates offset and noise of the detector and gives information about bad pixels. \n",
     "\n",
-    "Dark characterization evaluates offset and noise of the detector and gives information about bad pixels. The noise map is corrected for common mode. The resulting map together with the offset and bad pixels maps are sent to the database and are also saved as .h5 files to a local path for a later use."
+    "On the first iteration, the offset and noise maps are generated. Initial bad pixels map is obtained based on the offset and initial noise maps. \n",
+    "\n",
+    "On the second iteration, the noise map is corrected for common mode. A second bad pixel map is generated based on the offset map and offset-and-common-mode-corrected noise map. Then, the hole in the center of the CCD is added to the second bad pixel map.\n",
+    "\n",
+    "On the third and final iteration, the pixels that show up on the abovementioned bad pixels map are masked. Possible events due to cosmic rays are found and masked. The data are then again offset and common mode corrected and a new final noise and bad pixels maps are generated.\n",
+    "\n",
+    "These latter resulting maps together with the offset map are saved as .h5 files to a local path for a later use. These dark constants are not automatically sent to the database."
    ]
   },
   {
@@ -25,10 +31,10 @@
    "outputs": [],
    "source": [
     "cluster_profile = \"noDB\"  # ipcluster profile to use\n",
-    "in_folder = \"/gpfs/exfel/exp/SQS/201921/p002430/raw/\"  # input folder, required\n",
-    "out_folder = 'gpfs/exfel/data/scratch/karnem/test/'  # output folder, required\n",
+    "in_folder = \"/gpfs/exfel/exp/SQS/201930/p900075/raw\"  # input folder, required\n",
+    "out_folder = '/gpfs/exfel/data/scratch/ahmedk/test/pnccd'  # output folder, required\n",
     "sequence = 0  # sequence file to use\n",
-    "run = 745  # which run to read data from, required\n",
+    "run = 364 # which run to read data from, required\n",
     "\n",
     "karabo_da = 'PNCCD01' # data aggregators\n",
     "karabo_id = \"SQS_NQS_PNCCD1MP\" # karabo prefix of PNCCD devices\n",
@@ -36,29 +42,28 @@
     "path_template = 'RAW-R{:04d}-{}-S{{:05d}}.h5'  # the template to use to access data\n",
     "h5path = '/INSTRUMENT/{}/CAL/{}:output/data/image/' # path in the HDF5 file the data is at\n",
     "\n",
+    "# for database time derivation:\n",
     "use_dir_creation_date = True  # use dir creation date as data production reference date\n",
     "cal_db_interface = \"tcp://max-exfl016:8021\"  # calibration DB interface to use\n",
     "cal_db_timeout = 300000 # timeout on caldb requests\n",
     "db_output = False # if True, the notebook sends dark constants to the calibration database\n",
     "local_output = True # if True, the notebook saves dark constants locally\n",
     "\n",
-    "\n",
     "number_dark_frames = 0  # number of images to be used, if set to 0 all available images are used\n",
     "chunkSize = 100 # number of images to read per chunk\n",
-    "fix_temperature = 233.  # fix temperature in K, set to -1 to use value from slow data\n",
-    "gain = 0  # the detector's gain setting, only 0 is currently implemented\n",
+    "fix_temperature = 233.  # fix temperature in K, set to 0. to use value from slow data\n",
+    "gain = 1  # the detector's gain setting, only 1 and 64 is available.\n",
     "bias_voltage = 300  # detector's bias voltage\n",
     "integration_time = 70  # detector's integration time\n",
     "commonModeAxis = 0 # axis along which common mode will be calculated (0: along rows, 1: along columns)\n",
-    "sigmaNoise = 10.  # pixels whose signal value exceeds sigmaNoise*noise value in that pixel will be masked\n",
+    "commonModeBlockSize = [512, 512] # size of the detector in pixels for common mode calculations\n",
+    "sigmaNoise = 10.  # pixels whose signal value exceeds sigmaNoise*noise will be considered as cosmics and are masked\n",
     "bad_pixel_offset_sigma = 5.  # any pixel whose offset is beyond 5 standard deviations, is a bad pixel\n",
     "bad_pixel_noise_sigma = 5.  # any pixel whose noise is beyond 5 standard deviations, is a bad pixel\n",
-    "run_parallel = True # for parallel computation \n",
-    "cpuCores = 40 # specifies the number of running cpu cores\n",
+    "temp_limits = 5  # temperature limits in which calibration parameters are considered equal\n",
     "\n",
-    "# for database time derivation:\n",
-    "temp_limits = 5  # temperature limits in which to consider calibration parameters equal\n",
-    "multi_iteration = False  # use multiple iterations"
+    "run_parallel = True # for parallel computation\n",
+    "cpuCores = 40 # specifies the number of running cpu cores"
    ]
   },
   {
@@ -137,7 +142,7 @@
    "source": [
     "proposal = list(filter(None, in_folder.strip('/').split('/')))[-2]\n",
     "file_loc = 'Proposal: {}, Run: {}'.format(proposal, run)\n",
-    "print(file_loc)"
+    "print(\"File Location:\", file_loc)"
    ]
   },
   {
@@ -154,9 +159,9 @@
     "# Calibration Database Settings, and Some Initial Run Parameters & Paths:\n",
     "\n",
     "display(Markdown('### Initial Settings'))\n",
-    "x = 1024 # rows of the FastCCD to analyze in FS mode \n",
-    "y = 1024 # columns of the FastCCD to analyze in FS mode \n",
-    "print(\"pnCCD size is: {}x{} pixels.\".format(x, y))\n",
+    "pixels_x = 1024 # rows of the FastCCD to analyze in FS mode \n",
+    "pixels_y = 1024 # columns of the FastCCD to analyze in FS mode \n",
+    "print(f\"pnCCD size is: {pixels_x}x{pixels_y} pixels.\")\n",
     "\n",
     "ped_dir = \"{}/r{:04d}\".format(in_folder, run)\n",
     "fp_name = path_template.format(run, karabo_da)\n",
@@ -175,7 +180,7 @@
     "print(\"Reading data from: {}\".format(filename))\n",
     "print(\"Run number: {}\".format(run))\n",
     "if creation_time:\n",
-    "    print(\"Creation time: {}\".format(creation_time.isoformat()))"
+    "    print(\"Creation time: {}\".format(creation_time))"
    ]
   },
   {
@@ -192,7 +197,7 @@
     "# Reading Parameters such as Detector Bias, Gain, etc. from the Data:\n",
     "\n",
     "memoryCells = 1 # pnCCD has 1 memory cell\n",
-    "sensorSize = [x, y]\n",
+    "sensorSize = [pixels_x, pixels_y]\n",
     "blockSize = [sensorSize[0]//2, sensorSize[1]//2]# sensor area will be analysed according to blocksize\n",
     "xcal.defaultBlockSize = blockSize\n",
     "nImages = fastccdreaderh5.getDataSize(filename, h5path)[0] # specifies total number of images to proceed\n",
@@ -212,12 +217,15 @@
     "print(\"Bias voltage is {} V.\".format(bias_voltage))\n",
     "print(\"Detector gain is set to {}.\".format(gain))\n",
     "print(\"Detector integration time is set to {} ms\".format(integration_time)) \n",
-    " \n",
+    "\n",
     "if fix_temperature != 0.:\n",
-    "    print(\"Using a fixed temperature of {} K\".format(fix_temperature))\n",
+    "    print(f\"Using a fixed temperature of {fix_temperature} K\")\n",
     "    temperature_k = fix_temperature\n",
     "else:\n",
     "    print(\"Temperature is not fixed.\")\n",
+    "    #TODO: remove this line after properly saving the temperature in control data.\n",
+    "    temperature_k = 233.\n",
+    "    print(f\"Using a fixed temperature of {fix_temperature} K\")\n",
     "    \n",
     "print(\"Number of dark images to analyze:\", nImages) "
    ]
@@ -236,8 +244,8 @@
     "# Reading Files in Chunks:\n",
     "\n",
     "# Chunk reader returns an iterator to access the data in the file within the ranges:\n",
-    "reader = ChunkReader(filename, fastccdreaderh5.readData, nImages, chunkSize, path = h5path, \n",
-    "                     pixels_x = sensorSize[0], pixels_y = sensorSize[1],)"
+    "reader = ChunkReader(filename, fastccdreaderh5.readData, nImages, chunkSize, path=h5path, \n",
+    "                     pixels_x=pixels_x, pixels_y=pixels_y)"
    ]
   },
   {
@@ -264,7 +272,7 @@
    "source": [
     "### First Iteration\n",
     "\n",
-    "Characterization of dark images with purpose to create dark maps (offset, noise and bad pixel maps) is an iterative process. Firstly, initial offset and noise maps are produced from the raw dark data."
+    "Characterization of dark images with purpose to create dark maps (offset, noise and bad pixel maps) is an iterative process. The initial offset and noise maps are produced from the raw dark data."
    ]
   },
   {
@@ -327,7 +335,7 @@
    "source": [
     "### Offset and Noise Maps prior to Common Mode Correction\n",
     "\n",
-    "In the following, the histogram of the pnCCD offset, pnCCD offset map, as well as the initial uncorrected noise map are plotted:"
+    "In the following, the histograms of the pnCCD offset and initial noise, and the heat maps of pnCCD offset, as well as the initial uncorrected noise are plotted."
    ]
   },
   {
@@ -343,17 +351,17 @@
    "outputs": [],
    "source": [
     "#************** OFFSET MAP HISTOGRAM ***********#\n",
-    "ho, co = np.histogram(offsetMap.flatten(), bins=700) # ho = offset histogram; co = offset bin centers\n",
+    "ho, co = np.histogram(offsetMap.flatten(), bins=2000) # ho = offset histogram; co = offset bin centers\n",
     "\n",
     "do = {'x': co[:-1],\n",
     "     'y': ho,\n",
-    "     'y_err': np.sqrt(ho[:]),\n",
+    "     #'y_err': np.sqrt(ho[:]), Markus thinks these errors bars are not correctly calculated!\n",
     "     'drawstyle': 'bars',\n",
     "     'color': 'cornflowerblue',\n",
-    "     'label': 'Raw Signal (ADU)'\n",
+    "     'label': 'Raw Pedestal (ADU)'\n",
     "     }\n",
     "                      \n",
-    "fig = xana.simplePlot(do, figsize='1col', aspect=1, x_label = 'Raw Signal (ADU)', y_label=\"Counts\", \n",
+    "fig = xana.simplePlot(do, figsize='1col', aspect=1, x_label = 'Raw Pedestal (ADU)', y_label=\"Counts\", \n",
     "                      title = 'Offset Histogram', x_range=(0, np.nanmax(offsetMap)), y_log=True)\n",
     "\n",
     "# Calculating mean, median and standard deviation for the above histogram:\n",
@@ -364,7 +372,7 @@
     "\n",
     "# Table of statistics on raw signal:\n",
     "t0 = PrettyTable()\n",
-    "t0.title = \"Statistics on Raw Signal (Offset)\"\n",
+    "t0.title = \"Statistics on Raw Pedestal (Offset)\"\n",
     "t0.field_names = [\"Mean\", \"Standard Deviation\"]\n",
     "t0.add_row([\"{:0.3f} (ADU)\".format(mean), \"{:0.3f} (ADU)\".format(std)])\n",
     "print(t0,'\\n')\n",
@@ -374,25 +382,25 @@
     "\n",
     "dn = {'x': cn[:-1],\n",
     "     'y': hn,\n",
-    "     'y_err': np.sqrt(hn[:]),\n",
+    "     #'y_err': np.sqrt(hn[:]),\n",
     "     'drawstyle': 'bars',\n",
     "     'color': 'cornflowerblue',\n",
     "     'label': 'Noise (ADU)'\n",
     "     }\n",
     "\n",
-    "fig = xana.simplePlot(dn, figsize='1col', aspect=1, x_label = 'Noise (ADU)', y_label=\"Counts\", \n",
+    "fig = xana.simplePlot(dn, figsize='1col', aspect=1, x_label = 'Raw Noise (ADU)', y_label=\"Counts\", \n",
     "                      title = 'Noise Histogram', y_log=True, x_range=(0, 600))\n",
     "\n",
     "\n",
     "#************** HEAT MAPS *******************#\n",
     "fig = xana.heatmapPlot(offsetMap[:,:,0], x_label='Column Number', y_label='Row Number',  aspect=1,\n",
-    "                       x_range=(0, y), y_range=(0, x), vmin=6000, vmax=14500, lut_label='Offset (ADU)', \n",
-    "                       panel_x_label='Columns Stat (ADU)', panel_y_label='Rows Stat (ADU)', title = 'OffsetMap')\n",
+    "                       x_range=(0, pixels_y), y_range=(0, pixels_x), vmin=6000, vmax=14500, lut_label='Offset (ADU)', \n",
+    "                       panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)', title = 'Offset Map')\n",
     "\n",
     "fig = xana.heatmapPlot(noiseMap[:,:,0], x_label='Column Number', y_label='Row Number', aspect=1,\n",
-    "                       lut_label='Uncorrected Noise (ADU)', x_range=(0, y),\n",
-    "                       y_range=(0, x), vmax=2*np.mean(noiseMap), panel_x_label='Columns Stat (ADU)', \n",
-    "                       panel_y_label='Rows Stat (ADU)', title = 'Uncorrected NoiseMap')"
+    "                       lut_label='Noise (ADU)', x_range=(0, pixels_y),\n",
+    "                       y_range=(0, pixels_x), vmax=2*np.mean(noiseMap), panel_x_label='Row Stat (ADU)', \n",
+    "                       panel_y_label='Column Stat (ADU)', title = 'Uncorrected NoiseMap')"
    ]
   },
   {
@@ -401,7 +409,7 @@
    "source": [
     "### Initial BadPixelMap\n",
     "\n",
-    "This is generated based on the offset and uncorrected noise maps:"
+    "This is generated based on the offset and uncorrected noise maps."
    ]
   },
   {
@@ -424,8 +432,10 @@
     "fig = xana.heatmapPlot(np.log2(bad_pixels[:, :, 0]),\n",
     "                       x_label='Columns', y_label='Rows',\n",
     "                       lut_label='Bad Pixel Value (ADU)',\n",
-    "                       x_range=(0, y),\n",
-    "                       y_range=(0, x), vmin=0, vmax=32)"
+    "                       x_range=(0, pixels_y),\n",
+    "                       y_range=(0, pixels_x), vmin=0, vmax=32,\n",
+    "                       panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)', \n",
+    "                       title = 'Initial Bad Pixels Map')"
    ]
   },
   {
@@ -434,10 +444,17 @@
    "metadata": {},
    "outputs": [],
    "source": [
+    "# Offset Correction:\n",
+    "\n",
+    "offsetCorrection = xcal.OffsetCorrection(sensorSize, offsetMap, nCells = memoryCells, cores=cpuCores, gains=None,\n",
+    "                                         runParallel=run_parallel, blockSize=blockSize)\n",
+    "\n",
+    "\n",
     "# Common Mode Correction:\n",
     "# In this method, the median of all pixels that are read out at the same time along a row is subtracted from the\n",
     "# signal in each pixel:\n",
-    "cmCorrection = xcal.CommonModeCorrection(sensorSize, [data.shape[0]//2, data.shape[1]//2],\n",
+    "cmCorrection = xcal.CommonModeCorrection(sensorSize,\n",
+    "                                         commonModeBlockSize,\n",
     "                                         commonModeAxis, parallel=run_parallel, dType=np.float32, stride=1,\n",
     "                                         noiseMap=noiseMap.astype(np.float32), minFrac=0)\n",
     "\n",
@@ -451,16 +468,15 @@
    "outputs": [],
    "source": [
     "# Histogram Calculators:\n",
+    "# The number of bins and bin ranges are based on the offset histogram. We set 1 ADU per bin and we consider some\n",
+    "# negative bin to ensure the offset and common mode corrections actually move the signal to zero:\n",
     "\n",
     "# For offset corrected data:\n",
-    "histCalCorrected = xcal.HistogramCalculator(sensorSize, bins=400, range=[-200, 200], memoryCells=memoryCells,\n",
+    "histCalCorrected = xcal.HistogramCalculator(sensorSize, bins=1100, range=[-50, 1050], memoryCells=memoryCells,\n",
     "                                            cores=cpuCores, gains=None, blockSize=blockSize)\n",
     "# For common mode corrected data:\n",
-    "histCalCMCorrected = xcal.HistogramCalculator(sensorSize, bins=400, range=[-200, 200], memoryCells=memoryCells,\n",
-    "                                              cores=cpuCores, gains=None, blockSize=blockSize)\n",
-    "\n",
-    "noiseCalCM = xcal.NoiseCalculator(sensorSize, memoryCells, cores=cpuCores, blockSize=blockSize,\n",
-    "                                  runParallel=run_parallel)"
+    "histCalCMCorrected = xcal.HistogramCalculator(sensorSize, bins=1100, range=[-50, 1050], memoryCells=memoryCells,\n",
+    "                                              cores=cpuCores, gains=None, blockSize=blockSize)"
    ]
   },
   {
@@ -469,7 +485,7 @@
    "source": [
     "### Second Iteration\n",
     "\n",
-    "During the second iteration, the data are offset and common mode corrected to produce a common mode corrected noise map. The offset and common mode corrections are calculated by subtracting out the median of all pixels that are read out at the same time along a row."
+    "During the second iteration, the data are offset and common mode corrected to produce an offset-and-common-mode-corrected noise map. The common mode correction is calculated by subtracting out the median of all pixels that are read out at the same time along a row."
    ]
   },
   {
@@ -505,7 +521,7 @@
     "    cellTable=np.zeros(data.shape[2], np.int32) # Common mode correction\n",
     "    data = cmCorrection.correct(data.astype(np.float32), cellTable=cellTable) \n",
     "    histCalCMCorrected.fill(data)\n",
-    "    noiseCalCM.fill(data)  # Filling calculators with data\n",
+    "    noiseCal.fill(data)  # Filling calculators with data\n",
     "    chunkSize = 100 # resetting the chunkSize to its default value for the next sequence or data-chunk\n",
     "\n",
     "print('A total number of {} images are processed.'.format(images))\n",
@@ -518,7 +534,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "noiseMapCM = noiseCalCM.get()  # Producing common mode corrected noise map\n",
+    "noiseMapCM = noiseCal.get() # Producing common mode corrected noise map\n",
     "ho, eo, co, so = histCalCorrected.get()\n",
     "hCM, eCM, cCM, sCM = histCalCMCorrected.get()"
    ]
@@ -542,7 +558,7 @@
    "source": [
     "### Signal after Offset and Common Mode Corrections\n",
     "\n",
-    "Here, the offset corrected signal is compared to the common-mode corrected signal (in the form of binned histograms):  "
+    "Here, the offset corrected signal is compared to the common-mode corrected signal (in the form of binned histograms)."
    ]
   },
   {
@@ -553,27 +569,26 @@
    "source": [
     "do = [{'x': co,\n",
     "     'y': ho,\n",
-    "     'y_err': np.sqrt(ho[:]),\n",
-    "     'drawstyle': 'steps-mid',\n",
+    "     'drawstyle': 'steps-post',\n",
     "     'color': 'cornflowerblue',\n",
     "     'label': 'Offset Corrected Signal'\n",
     "     },\n",
     "     {'x': cCM,\n",
     "     'y': hCM,\n",
-    "     'y_err': np.sqrt(hCM[:]),\n",
-    "     'drawstyle': 'steps-mid',\n",
+    "     'drawstyle': 'steps-post',\n",
     "     'color': 'red',\n",
     "     'label': 'Common Mode Corrected Signal'\n",
     "     }]\n",
     "      \n",
-    "fig = xana.simplePlot(do, figsize='2col', aspect=1, x_label = 'Corrected Signal (ADU)', y_label=\"Counts\", \n",
-    "                      x_range = (-200, 200), legend='top-right-frame-1col', \n",
-    "                      title = 'Corrected Signal - 2nd Iteration') \n",
+    "fig = xana.simplePlot(do, figsize='2col', aspect=1, x_label = 'Corrected Pedestal (ADU)', y_label=\"Counts\", \n",
+    "                      x_range = (-50, 1050), y_log=True, legend='top-right-frame-1col', \n",
+    "                      title = 'Corrected Pedestal - 2nd Iteration') \n",
     "\n",
     "t0 = PrettyTable()\n",
     "t0.title = \"Comparison of the First Round of Corrections - Bad Pixels Not Excluded\"\n",
     "t0.field_names = [\"After Offset Correction\", \"After Common Mode Correction\"]\n",
-    "t0.add_row([\"Mean: {:0.3f} (ADU)\".format(np.mean(offset_corr_data)), \"Mean: {:0.3f} (ADU)\".format(np.mean(data))])\n",
+    "t0.add_row([\"Mean: {:0.3f} (ADU)\".format(np.mean(offset_corr_data)), \"Mean: {:0.3f} (ADU)\"\n",
+    "            .format(np.mean(data))])\n",
     "t0.add_row([\"Median: {:0.3f} (ADU)\".format(np.median(offset_corr_data)), \"Median: {:0.3f} (ADU)\"\n",
     "            .format(np.median(data))])\n",
     "t0.add_row([\"Standard Deviation: {:0.3f} (ADU)\".format(np.std(offset_corr_data)), \n",
@@ -588,7 +603,7 @@
    "source": [
     "### Noise Map after Common Mode Correction\n",
     "\n",
-    "In the following, the effect of common mode correction on the noise is shown. Finally common mode corrected noise map (noiseMapCM) is displayed and compared to the initial uncorrected noise map:"
+    "In the following, the effect of common mode correction on the noise is shown. Finally common mode corrected noise map (noiseMapCM) is displayed and compared to the initial uncorrected noise map."
    ]
   },
   {
@@ -603,18 +618,15 @@
     "\n",
     "dn = [{'x': cn[:-1],\n",
     "       'y': hn,\n",
-    "       # 'y_err': np.sqrt(hn[:]),\n",
-    "       'drawstyle': 'steps-mid',  # 'bars',\n",
-    "       'color': 'blue',  # 'cornflowerblue',\n",
-    "       'label': 'Uncorrected Noise (ADU)'\n",
+    "       'drawstyle': 'steps-post',\n",
+    "       'color': 'blue',\n",
+    "       'label': 'Uncorrected'\n",
     "       },\n",
     "      {'x': cn_CM[:-1],\n",
     "       'y': hn_CM,\n",
-    "       # 'y_err': np.sqrt(hn_CM[:]),\n",
-    "       'drawstyle': 'steps-mid',  # 'bars',\n",
+    "       'drawstyle': 'steps-post',  # 'bars',\n",
     "       'color': 'crimson',  # 'red',#'cornflowerblue',\n",
-    "       # 'ecolor': 'crimson',\n",
-    "       'label': 'Common Mode Corrected Noise (ADU)'\n",
+    "       'label': 'Common Mode Corrected'\n",
     "       }]\n",
     "fig = xana.simplePlot(dn, figsize='1col', aspect=1, x_label='Noise (ADU)', y_label=\"Counts\",\n",
     "                      x_range=(0, 600), y_log=True, legend='top-center-frame-1col',\n",
@@ -624,8 +636,9 @@
     "fig = xana.heatmapPlot(noiseMapCM[:, :, 0],\n",
     "                       x_label='Columns', y_label='Rows',\n",
     "                       lut_label='Common Mode Corrected Noise (ADU)',\n",
-    "                       x_range=(0, y),\n",
-    "                       y_range=(0, x), vmax=2*np.mean(noiseMapCM), title='Common Mode Corrected Noise Map')"
+    "                       x_range=(0, pixels_y),\n",
+    "                       y_range=(0, pixels_x), vmax=2*np.mean(noiseMapCM), panel_x_label='Row Stat (ADU)', \n",
+    "                       panel_y_label='Column Stat (ADU)', title='Common Mode Corrected Noise Map')"
    ]
   },
   {
@@ -635,7 +648,7 @@
    "outputs": [],
    "source": [
     "# Resetting the calculators:\n",
-    "noiseCalCM.reset() # resetting noise calculator\n",
+    "noiseCal.reset() # resetting noise calculator\n",
     "histCalCorrected.reset() # resetting histogram calculator\n",
     "histCalCMCorrected.reset() # resetting histogram calculator\n",
     "cmCorrection.reset()"
@@ -645,9 +658,9 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "### Final BadPixelMap\n",
+    "### Second BadPixelMap\n",
     "\n",
-    "This is generated based on the offset and common mode corrected noise maps:"
+    "This is generated based on the offset map and offset-and-common-mode-corrected noise map."
    ]
   },
   {
@@ -656,7 +669,6 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "bad_pixels = np.zeros(offsetMap.shape, np.uint32)\n",
     "mnoffset = np.nanmedian(offsetMap)\n",
     "stdoffset = np.nanstd(offsetMap)\n",
     "bad_pixels[(offsetMap < mnoffset-bad_pixel_offset_sigma*stdoffset) |\n",
@@ -670,15 +682,280 @@
     "fig = xana.heatmapPlot(np.log2(bad_pixels[:, :, 0]),\n",
     "                       x_label='Columns', y_label='Rows',\n",
     "                       lut_label='Bad Pixel Value (ADU)',\n",
-    "                       x_range=(0, y),\n",
-    "                       y_range=(0, x), vmin=0, vmax=32)"
+    "                       x_range=(0, pixels_y),\n",
+    "                       y_range=(0, pixels_x), vmin=0, vmax=32,\n",
+    "                       panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)', \n",
+    "                       title = 'Second Bad Pixels Map')"
    ]
   },
   {
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "### Calibration Constants"
+    "Here, we are masking the pixels in the hole (center of the pnCCD)."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "scrolled": false
+   },
+   "outputs": [],
+   "source": [
+    "hole_mask = np.zeros(bad_pixels.shape, np.uint32) \n",
+    "hole_mask[483:539,477:543,:] = BadPixels.NON_SENSITIVE.value\n",
+    "\n",
+    "# Assigning this masked area as bad pixels:\n",
+    "bad_pixels = np.bitwise_or(bad_pixels, hole_mask)\n",
+    "\n",
+    "fig = xana.heatmapPlot(np.log2(bad_pixels[:, :, 0]),\n",
+    "                       x_label='Columns', y_label='Rows',\n",
+    "                       lut_label='Bad Pixel Value (ADU)',\n",
+    "                       x_range=(0, pixels_y),\n",
+    "                       y_range=(0, pixels_x), vmin=0, vmax=32, panel_x_label='Row Stat (ADU)', \n",
+    "                       panel_y_label='Column Stat (ADU)', title = 'Second Bad Pixels Map with the Hole in Center')"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Third Iteration\n",
+    "\n",
+    "During the third iteration, the last bad pixel map is applied to the data. Bad pixels are masked. Possible cosmic ray events are also masked. Offset and common mode corrections are applied once again to the data, which now have bad pixdels excluded, to produce a newly corrected noise map."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "event_threshold = sigmaNoise*np.median(noiseMapCM) # for exclusion of possible cosmic ray events\n",
+    "noiseCal.setBadPixelMask(bad_pixels != 0) # setting bad pixels map for the noise calculator"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "counter1 = 1 # To count how many \"if data.shape[2] >= chunkSize\" instances are there.\n",
+    "counter2 = 0 # To count how many \"if data.shape[2] < chunkSize\" instances are there.\n",
+    "chunkSize_new = 0 # See below\n",
+    "\n",
+    "for data in reader.readChunks():\n",
+    "    data = data.astype(np.float32)\n",
+    "    dx = np.count_nonzero(data, axis=(0, 1))\n",
+    "    data = data[:,:,dx != 0]\n",
+    "    data_mask = np.repeat(bad_pixels, data.shape[2], axis=2) # Converting bad_pixels to the same shape as the data\n",
+    "    data[data_mask != 0] = np.nan # masking data for bad pixels and equating the values to np.nan\n",
+    "    \n",
+    "    # Some sequences may have less than 500 frames in them. To find out how many images there are, we will \n",
+    "    # temporarily change chunkSize to be the same as whatever number of frames the last chunk of data has:\n",
+    "    if data.shape[2] < chunkSize:\n",
+    "        chunkSize_new = data.shape[2]\n",
+    "        print(\"Number of images are less than chunkSize. chunkSize is temporarily changed to {}.\"\n",
+    "              .format(chunkSize_new))\n",
+    "        images = images + chunkSize_new\n",
+    "        counter2 += 1 \n",
+    "    else:\n",
+    "        images = counter1*chunkSize + counter2*chunkSize_new\n",
+    "        counter1 += 1\n",
+    "    \n",
+    "    data_copy = offsetCorrection.correct(copy.copy(data))\n",
+    "    cellTable=np.zeros(data_copy.shape[2], np.int32)\n",
+    "    data_copy = cmCorrection.correct(data_copy.astype(np.float32), cellTable=cellTable)\n",
+    "    data[data_copy > event_threshold] = np.nan # discarding events caused by cosmic rays\n",
+    "    data = np.ma.MaskedArray(data, np.isnan(data), fill_value=0) # masking cosmics, default fill_value = 1e+20 \n",
+    "\n",
+    "    data -= offsetMap.data # Offset correction\n",
+    "    offset_corr_data2 = copy.copy(data) # I am copying this so that I can have access to it in the table below\n",
+    "    histCalCorrected.fill(data)\n",
+    "    cellTable=np.zeros(data.shape[2], np.int32) # Common mode correction\n",
+    "    data = cmCorrection.correct(data.astype(np.float32), cellTable=cellTable) \n",
+    "    histCalCMCorrected.fill(data)\n",
+    "    noiseCal.fill(data)  # Filling calculators with data\n",
+    "    chunkSize = 100 # resetting the chunkSize to its default value for the next sequence or data-chunk\n",
+    "\n",
+    "print(\"Final iteration is Performed.\")\n",
+    "print('A total number of {} images are processed.'.format(images))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "noiseMapCM_2nd = noiseCal.get().filled(0) # the masked pixels are filled with 0\n",
+    "ho2, eo2, co2, so2 = histCalCorrected.get()\n",
+    "hCM2, eCM2, cCM2, sCM2 = histCalCMCorrected.get()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Plots of the Final Results\n",
+    "\n",
+    "The following plot and table compare the offset and common mode corrected signal with and without the bad pixels."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "do_Final = [{'x': co_second_trial,\n",
+    "     'y': ho_second_trial,\n",
+    "     'drawstyle': 'steps-post',\n",
+    "     'color': 'blue',\n",
+    "     'label': 'Offset Correction, Bad Pixels Included - 2nd Trial'\n",
+    "     },\n",
+    "    {'x': cCM_second_trial,\n",
+    "     'y': hCM_second_trial,\n",
+    "     'drawstyle': 'steps-post',\n",
+    "     'color': 'red',\n",
+    "     'ecolor': 'crimson',\n",
+    "     'label': 'Common Mode Correction, Bad Pixels Included - 2nd Trial' \n",
+    "     },\n",
+    "    {'x': co2,\n",
+    "     'y': ho2,\n",
+    "     'drawstyle': 'steps-post',\n",
+    "     'color': 'black',\n",
+    "     'label': 'Offset Correction, Bad Pixels Excluded - 3rd Trial'\n",
+    "     },\n",
+    "    {'x': cCM2,\n",
+    "     'y': hCM2,\n",
+    "     'drawstyle': 'steps-post',\n",
+    "     'color': 'orange',\n",
+    "     'label': 'Common Mode Correction, Bad Pixels Excluded - 3rd Trial'\n",
+    "     }]\n",
+    "\n",
+    "fig = xana.simplePlot(do_Final, figsize='2col', aspect=1, x_label = 'Corrected Signal (ADU)', \n",
+    "                      y_label=\"Counts (Logarithmic Scale)\", y_log=True, x_range = (0, 1100),\n",
+    "                      legend='top-right-frame-1col', title = 'Comparison of Corrections')\n",
+    "\n",
+    "t0 = PrettyTable()\n",
+    "t0.title = \"Comparison of the Second Round of Corrections - Bad Pixels Excluded\"\n",
+    "t0.field_names = [\"After Offset Correction\", \"After Common Mode Correction\"]\n",
+    "t0.add_row([\"Mean: {:0.3f} (ADU)\".format(np.nanmean(offset_corr_data2)), \"Mean: {:0.3f} (ADU)\"\n",
+    "            .format(np.nanmean(data))])\n",
+    "t0.add_row([\"Median: {:0.3f} (ADU)\".format(np.nanmedian(offset_corr_data2)), \"Median: {:0.3f} (ADU)\"\n",
+    "            .format(np.nanmedian(data))])\n",
+    "t0.add_row([\"Standard Deviation: {:0.3f} (ADU)\".format(np.nanstd(offset_corr_data2)), \n",
+    "            \"Standard Deviation: {:0.3f} (ADU)\".format(np.nanstd(data))])\n",
+    "print(t0,'\\n')"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Final NoiseMap\n",
+    "\n",
+    "Finally offset and common mode corrected noise map with bad pixels and possible cosmic events excluded (noiseMapCM_2nd) is displayed."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "#*****NOISE MAP HISTOGRAM FROM THE COMMON MODE CORRECTED DATA*******#\n",
+    "hn_CM2, cn_CM2 = np.histogram(noiseMapCM_2nd.flatten(), bins=2000, range=(0, 600))\n",
+    "\n",
+    "dn2 = [{'x': cn[:-1],\n",
+    "     'y': hn,\n",
+    "     'drawstyle': 'steps-post',\n",
+    "     'color': 'blue',\n",
+    "     'label': 'Uncorrected'\n",
+    "     },\n",
+    "    {'x': cn_CM[:-1],\n",
+    "     'y': hn_CM,\n",
+    "     'drawstyle': 'steps-post',\n",
+    "     'color': 'red',\n",
+    "     'label': 'Common Mode Corrected prior to Bad Pixels Exclusion'\n",
+    "     },\n",
+    "    {'x': cn_CM2[:-1],\n",
+    "     'y': hn_CM2,\n",
+    "     'drawstyle': 'steps-post',\n",
+    "     'color': 'black',\n",
+    "     'label': 'Common Mode Corrected after Bad Pixels Exclusion'\n",
+    "     }]\n",
+    "\n",
+    "fig = xana.simplePlot(dn2, figsize='1col', aspect = 1, x_label = 'Noise (ADU)', y_label=\"Counts\", y_log=True, \n",
+    "                      legend='top-right-frame-1col', \n",
+    "                      title = 'Final Noise Comparison')\n",
+    "\n",
+    "fig = xana.heatmapPlot(noiseMapCM_2nd[:,:,0], aspect=1, x_label='Column Number', y_label='Row Number',\n",
+    "                       lut_label='Noise (ADU)', x_range=(0, pixels_y), y_range=(0, pixels_x),\n",
+    "                       title = 'Final Common Mode Corrected Noise\\n (Bad Pixels Excluded)', \n",
+    "                       panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)')"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Final Bad Pixel Map\n",
+    "\n",
+    "Lastly, the final bad pixel map is generated based on the OffsetMap and the noiseMapCM_2nd (offset and common mode corrected noise after exclusion of the bad pixels)."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "scrolled": false
+   },
+   "outputs": [],
+   "source": [
+    "mnoffset = np.nanmedian(offsetMap)\n",
+    "stdoffset = np.nanstd(offsetMap)\n",
+    "bad_pixels[(offsetMap < mnoffset-bad_pixel_offset_sigma*stdoffset) |\n",
+    "           (offsetMap > mnoffset+bad_pixel_offset_sigma*stdoffset)] = BadPixels.OFFSET_OUT_OF_THRESHOLD.value\n",
+    "\n",
+    "mnnoise = np.nanmedian(noiseMapCM_2nd)\n",
+    "stdnoise = np.nanstd(noiseMapCM_2nd)\n",
+    "bad_pixels[(noiseMapCM_2nd < mnnoise-bad_pixel_noise_sigma*stdnoise) | \n",
+    "           (noiseMapCM_2nd > mnnoise+bad_pixel_noise_sigma*stdnoise)] = BadPixels.NOISE_OUT_OF_THRESHOLD.value\n",
+    "\n",
+    "bad_pixels = np.bitwise_or(bad_pixels, hole_mask)\n",
+    "\n",
+    "fig = xana.heatmapPlot(np.log2(bad_pixels[:, :, 0]),\n",
+    "                       x_label='Columns', y_label='Rows',\n",
+    "                       lut_label='Bad Pixel Value (ADU)',\n",
+    "                       x_range=(0, pixels_y),\n",
+    "                       y_range=(0, pixels_x), vmin=0, vmax=32, panel_x_label='Row Stat (ADU)', \n",
+    "                       panel_y_label='Column Stat (ADU)', title = 'Final Bad Pixels Map')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "display(Markdown('### Statistics on the Bad Pixels'))\n",
+    "num_bad_pixels = np.count_nonzero(bad_pixels)\n",
+    "num_all_pixels = pixels_x*pixels_y\n",
+    "percentage_bad_pixels = num_bad_pixels*100/num_all_pixels\n",
+    "print(\"Number of bad pixels: {:0.0f}, i.e. {:0.2f}% of all pixels\".format(num_bad_pixels, percentage_bad_pixels))"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Calibration Constants\n",
+    "\n",
+    "Here, we send the dark constants to the database and/or save them locally."
    ]
   },
   {
@@ -694,7 +971,7 @@
    "source": [
     "constant_maps = {\n",
     "    'Offset': offsetMap,\n",
-    "    'Noise': noiseMapCM,\n",
+    "    'Noise': noiseMapCM_2nd,\n",
     "    'BadPixelsDark': bad_pixels\n",
     "}\n",
     "\n",
@@ -711,8 +988,8 @@
     "                                    integration_time=integration_time,\n",
     "                                    gain_setting=gain,\n",
     "                                    temperature=temperature_k,\n",
-    "                                    pixels_x=1024,\n",
-    "                                    pixels_y=1024)\n",
+    "                                    pixels_x=pixels_x,\n",
+    "                                    pixels_y=pixels_y)\n",
     "\n",
     "    device = Detectors.PnCCD1\n",
     "    metadata.detector_condition = condition\n",
@@ -728,15 +1005,19 @@
     "\n",
     "    if db_output:\n",
     "        try:\n",
-    "            metadata.send(cal_db_interface, timeout=cal_db_timeout)\n",
-    "            print(\"Inject {} constants from {}\".format(const_name,\n",
-    "                                                       metadata.calibration_constant_version.begin_at))\n",
+    "            metadata.send(cal_db_interface)\n",
+    "            print(f\"Inject {const_name} constants from {metadata.calibration_constant_version.begin_at}\")\n",
     "        except Exception as e:\n",
     "            print(e)\n",
     "\n",
     "    if local_output:\n",
     "        save_const_to_h5(metadata, out_folder)\n",
-    "        print(\"Calibration constant {} is stored to {}.\".format(const, out_folder))"
+    "        print(f\"Calibration constant {const.name} is stored to {out_folder}.\\n\")\n",
+    "\n",
+    "print(\"Generated constants with conditions:\\n\")\n",
+    "print(f\"• bias_voltage: {bias_voltage}\\n• integration_time: {integration_time}\\n\"\n",
+    "      f\"• gain_setting: {gain}\\n• temperature: {temperature_k}\\n\"\n",
+    "      f\"• creation_time: {creation_time}\\n\")"
    ]
   }
  ],