diff --git a/notebooks/FastCCD/Characterize_Darks_NewDAQ_FastCCD_NBC_New_Common_Mode.ipynb b/notebooks/FastCCD/Characterize_Darks_NewDAQ_FastCCD_NBC_New_Common_Mode.ipynb
index 5a3f2c96507f75b0de4380320dbb3c98c01b7ae7..0b56642e1a91ff2f68c87245995eac562c4677e6 100644
--- a/notebooks/FastCCD/Characterize_Darks_NewDAQ_FastCCD_NBC_New_Common_Mode.ipynb
+++ b/notebooks/FastCCD/Characterize_Darks_NewDAQ_FastCCD_NBC_New_Common_Mode.ipynb
@@ -16,12 +16,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-12-06T10:54:38.999974Z",
-     "start_time": "2018-12-06T10:54:38.983406Z"
-    }
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "# Initial Parameters:\n",
@@ -46,13 +41,14 @@
     "bad_pixel_offset_sigma = 5. # Any pixel whose offset is beyond 5 standard deviations, is a bad pixel\n",
     "bad_pixel_noise_sigma = 5. # Any pixel whose noise is beyond 5 standard deviations, is a bad pixel\n",
     "sigmaNoise = 5. # Any pixel whose signal exceeds 'sigmaNoise'*noiseCM (common mode corrected noise) will be masked\n",
-    "fix_temperature = 0. # Fixed operation temperature in Kelvins. If set to 0, mean value of the data file's temperature is used.\n",
+    "fix_temperature = 0. # Fixed operation temperature in Kelvins. If set to 0, mean value of the data file's temperature is \n",
+    "                     # used.\n",
     "chunkSize = 100 # Number of images to read per chunk\n",
     "cpuCores = 40 # Specifies the number of running cpu cores\n",
     "commonModeAxis = 1 # Axis along which common mode will be calculated (0: along rows, 1: along columns)\n",
-    "ADU_to_electron_upper = 6.1 # According to Table 6.1 of Ivana Klačková's master's thesis, for upper hemisphere: conversion\n",
-    "                            # gain is 1 ADU = 6.1e-\n",
-    "ADU_to_electron_lower = 6.2 # and for lower hemisphere: conversion gain is 1 ADU = 6.2e-\n",
+    "ADU_to_electron_upper_hg = 6.1 # According to Table 6.1 of Ivana Klačková's master's thesis, for upper hemisphere and \n",
+    "                               # high gain: conversion gain is 1 ADU = 6.1e-\n",
+    "ADU_to_electron_lower_hg = 6.2 # and for lower hemisphere and high gain: conversion gain is 1 ADU = 6.2e- \n",
     "run_parallel = True # For parallel computation \n",
     "db_output = True # Output constants to the calibration database"
    ]
@@ -112,8 +108,20 @@
    "source": [
     "# Output Folder Creation:\n",
     "if not os.path.exists(out_folder):\n",
-    "    os.makedirs(out_folder)\n",
-    "\n",
+    "    os.makedirs(out_folder)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2018-12-06T10:54:39.467334Z",
+     "start_time": "2018-12-06T10:54:39.427784Z"
+    }
+   },
+   "outputs": [],
+   "source": [
     "# Number of Images:\n",
     "def nImagesOrLimit(nImages, limit):\n",
     "    if limit == 0:\n",
@@ -193,13 +201,13 @@
     "nImages = fastccdreaderh5.getDataSize(filename, h5path)[0] # Specifies total number of images to proceed\n",
     "nImages = nImagesOrLimit(nImages, number_dark_frames)\n",
     "profile = False\n",
-    "gain_setting = None\n",
     "\n",
     "with h5py.File(filename, 'r') as f:\n",
     "    bias_voltage = int(f['{}/biasclock/bias/value'.format(h5path_cntrl)][0])\n",
     "    det_gain = int(f['{}/exposure/gain/value'.format(h5path_cntrl)][0])\n",
     "    integration_time = int(f['{}/exposure/exposure_time/value'.format(h5path_cntrl)][0])\n",
-    "    temperature = np.mean(f[h5path_t])"
+    "    temperature = np.mean(f[h5path_t])\n",
+    "    temperature = round(temperature, 2)"
    ]
   },
   {
@@ -216,20 +224,23 @@
     "# Printing the Parameters Read from the Data File:\n",
     "\n",
     "display(Markdown('### Evaluated Parameters'))\n",
-    "print(\"Number of dark images to analyze:\",nImages)   \n",
-    "\n",
-    "if det_gain == 8:\n",
-    "    gain_setting = \"high\"\n",
-    "elif det_gain == 2:\n",
-    "    gain_setting = \"medium\"\n",
-    "elif det_gain == 1:\n",
-    "    gain_setting = \"low\"\n",
-    "else:\n",
-    "    gain_setting = \"auto\"\n",
-    "\n",
-    "print(\"Bias voltage is {} V\".format(bias_voltage))\n",
-    "print(\"Detector gain is set to x{}\".format(det_gain), \"({} gain)\".format(gain_setting))\n",
-    "print(\"Detector integration time is set to {}\".format(integration_time), 'ms')\n",
+    "print(\"Number of dark images to analyze:\", nImages)   \n",
+    "\n",
+    "gain_dict = {\n",
+    "        \"high gain\" : 8,\n",
+    "        \"medium gain\" : 2,\n",
+    "        \"low gain\" : 1,\n",
+    "        \"auto gain\" : 0\n",
+    "    }\n",
+    "\n",
+    "for gain, value in gain_dict.items():   \n",
+    "    if det_gain == value:\n",
+    "        gain_setting = gain\n",
+    "                    \n",
+    "print(\"Bias voltage is {} V.\".format(bias_voltage))\n",
+    "print(\"Detector gain is set to x{} ({}).\".format(det_gain, gain_setting))\n",
+    "print(\"Detector integration time is set to {}\".format(integration_time), 'ms.') \n",
+    " \n",
     "\n",
     "if fix_temperature != 0.:\n",
     "    print(\"Using a fixed temperature of {} K\".format(fix_temperature))\n",
@@ -238,9 +249,7 @@
     "    # calibration constant to the DB later\n",
     "    fix_temperature = temperature + 273.15\n",
     "    print(\"Temperature is not fixed.\")\n",
-    "    print(\"Mean temperature was {:0.2f} °C / {:0.2f} K\".format(temperature, fix_temperature))\n",
-    "\n",
-    "print(\"Output: {}\".format(out_folder))"
+    "    print(\"Mean temperature was {:0.2f} °C / {:0.2f} K\".format(temperature, fix_temperature))"
    ]
   },
   {
@@ -300,12 +309,43 @@
    },
    "outputs": [],
    "source": [
+    "counter1 = 1 # To count how many \"if data.shape[2] >= chunkSize\" instances are there.\n",
+    "counter2 = 0 # To count how many \"if data.shape[2] < chunkSize\" instances are there.\n",
+    "chunkSize_new = 0 # See below\n",
+    "\n",
     "for data in reader.readChunks():\n",
     "    data = np.bitwise_and(data.astype(np.uint16), 0b0011111111111111).astype(np.float32)\n",
     "    dx = np.count_nonzero(data, axis=(0, 1))\n",
     "    data = data[:,:,dx != 0]\n",
+    "    # Some sequences may have less than 500 frames in them. To find out how many images there are, we will temporarily \n",
+    "    # change chunkSize to be the same as whatever number of frames the last chunk of data has:\n",
+    "    if data.shape[2] < chunkSize:\n",
+    "        chunkSize_new = data.shape[2]\n",
+    "        print(\"Number of images are less than chunkSize. chunkSize is temporarily changed to {}.\"\n",
+    "              .format(chunkSize_new))\n",
+    "        images = images + chunkSize_new\n",
+    "        counter2 += 1 \n",
+    "    else:\n",
+    "        images = counter1*chunkSize + counter2*chunkSize_new\n",
+    "        counter1 += 1\n",
+    "             \n",
     "    noiseCal.fill(data) # Filling calculators with data\n",
-    "          \n",
+    "    chunkSize = 100 # resetting the chunkSize to its default value for the next sequence or data-chunk\n",
+    "\n",
+    "print('A total number of {} images are processed.'.format(images))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2018-12-06T10:55:21.238009Z",
+     "start_time": "2018-12-06T10:54:54.586435Z"
+    }
+   },
+   "outputs": [],
+   "source": [
     "offsetMap = noiseCal.getOffset() # Producing offset map\n",
     "noiseMap = noiseCal.get() # Producing noise map\n",
     "noiseCal.reset() # Resetting noise calculator\n",
@@ -328,7 +368,7 @@
    "outputs": [],
    "source": [
     "#************** OFFSET MAP HISTOGRAM ***********#\n",
-    "ho,co = np.histogram(offsetMap.flatten(), bins=700) # ho = offset histogram; co = offset bin centers\n",
+    "ho, co = np.histogram(offsetMap.flatten(), bins=700) # ho = offset histogram; co = offset bin centers\n",
     "do = {'x': co[:-1],\n",
     "     'y': ho,\n",
     "     'y_err': np.sqrt(ho[:]),\n",
@@ -337,18 +377,19 @@
     "     'label': 'Raw Signal (ADU)'\n",
     "     }\n",
     "fig = xana.simplePlot(do, figsize='1col', aspect=1, x_label = 'Raw Signal (ADU)', y_label=\"Counts\", \n",
-    "                      x_range = (3400,4000), title = 'Offset Histogram')\n",
+    "                      x_range = (3400, 4400), title = 'Offset Histogram')\n",
     "#fig.savefig('Offset_Hist.svg', format='svg', dpi=1200, bbox_inches='tight') \n",
     "\n",
     "t0 = PrettyTable()\n",
     "t0.title = \"Raw Signal\"\n",
-    "t0.field_names = [\"Mean\",\"Median\", \"Standard Deviation\"]\n",
-    "t0.add_row([\"{:0.3f} (ADU)\".format(np.mean(data)), \"{:0.3f} (ADU)\".format(np.median(data)), \"{:0.3f} (ADU)\".format(np.std(data))])\n",
+    "t0.field_names = [\"Mean\", \"Median\", \"Standard Deviation\"]\n",
+    "t0.add_row([\"{:0.3f} (ADU)\".format(np.mean(data)), \"{:0.3f} (ADU)\".format(np.median(data)), \"{:0.3f} (ADU)\"\n",
+    "            .format(np.std(data))])\n",
     "print(t0,'\\n')\n",
     "\n",
     "#************** OffsetMAP *******************#\n",
     "fig = xana.heatmapPlot(offsetMap[:,:,0], x_label='Column Number', y_label='Row Number',  aspect=1,\n",
-    "                       x_range=(0,y), y_range=(0,x), vmin=3000, vmax=4300, lut_label='Offset (ADU)', \n",
+    "                       x_range=(0, y), y_range=(0, x), vmin=3000, vmax=4300, lut_label='Offset (ADU)', \n",
     "                       panel_x_label='Columns Stat (ADU)', panel_y_label='Rows Stat (ADU)', \n",
     "                       panel_top_low_lim = 3000, panel_top_high_lim = 4500, panel_side_low_lim = 3000, \n",
     "                       panel_side_high_lim = 5000, title = 'OffsetMap')\n",
@@ -356,8 +397,8 @@
     "\n",
     "#************** Raw NoiseMAP *******************#\n",
     "fig = xana.heatmapPlot(noiseMap[:,:,0], x_label='Column Number', y_label='Row Number', aspect=1,\n",
-    "                       lut_label='Uncorrected Noise (ADU)', x_range=(0,y),\n",
-    "                       y_range=(0,x), vmax=2*np.mean(noiseMap), panel_x_label='Columns Stat (ADU)', \n",
+    "                       lut_label='Uncorrected Noise (ADU)', x_range=(0, y),\n",
+    "                       y_range=(0, x), vmax=2*np.mean(noiseMap), panel_x_label='Columns Stat (ADU)', \n",
     "                       panel_y_label='Rows Stat (ADU)', panel_top_low_lim = 0, panel_top_high_lim = 20, \n",
     "                       panel_side_low_lim = 0, panel_side_high_lim = 50, title = 'Uncorrected NoiseMap')\n",
     "#fig.savefig('RawNoiseMap.pdf', format='pdf', dpi=400, bbox_inches='tight')"
@@ -379,7 +420,7 @@
     "# Common Mode Correction:\n",
     "# This is the new method subtracting the median of all pixels that are read out at the same time along a row:\n",
     "cmCorrection = xcal.CommonModeCorrection([data.shape[0], data.shape[1]], [data.shape[0]//2, data.shape[1]], \n",
-    "                                         commonModeAxis, parallel=False, dType=np.float32, stride=10,\n",
+    "                                         commonModeAxis, parallel=run_parallel, dType=np.float32, stride=10,\n",
     "                                         noiseMap=noiseMap.astype(np.float32), minFrac=0)\n",
     "\n",
     "cmCorrection.debug()"
@@ -394,10 +435,10 @@
     "# Histogram Calculators:\n",
     "\n",
     "# For offset corrected data:\n",
-    "histCalCorrected = xcal.HistogramCalculator(sensorSize, bins=600, range=[-200, 200], memoryCells=memoryCells, \n",
+    "histCalCorrected = xcal.HistogramCalculator(sensorSize, bins=400, range=[-200, 200], memoryCells=memoryCells, \n",
     "                                            cores=cpuCores, gains=None, blockSize=blockSize)\n",
     "# For common mode corrected data:\n",
-    "histCalCMCorrected = xcal.HistogramCalculator(sensorSize, bins=600, range=[-200, 200], memoryCells=memoryCells, \n",
+    "histCalCMCorrected = xcal.HistogramCalculator(sensorSize, bins=400, range=[-200, 200], memoryCells=memoryCells, \n",
     "                                              cores=cpuCores, gains=None, blockSize=blockSize)"
    ]
   },
@@ -416,11 +457,27 @@
    "metadata": {},
    "outputs": [],
    "source": [
+    "counter1 = 1 # To count how many \"if data.shape[2] >= chunkSize\" instances are there.\n",
+    "counter2 = 0 # To count how many \"if data.shape[2] < chunkSize\" instances are there.\n",
+    "chunkSize_new = 0 # See below\n",
+    "\n",
     "for data in reader.readChunks():\n",
     "    \n",
-    "    data = data.astype(np.float32)\n",
+    "    data = np.bitwise_and(data.astype(np.uint16), 0b0011111111111111).astype(np.float32)\n",
     "    dx = np.count_nonzero(data, axis=(0, 1))\n",
     "    data = data[:,:,dx != 0] \n",
+    "    # Some sequences may have less than 500 frames in them. To find out how many images there are, we will temporarily \n",
+    "    # change chunkSize to be the same as whatever number of frames the last chunk of data has:\n",
+    "    if data.shape[2] < chunkSize:\n",
+    "        chunkSize_new = data.shape[2]\n",
+    "        print(\"Number of images are less than chunkSize. chunkSize is temporarily changed to {}.\"\n",
+    "              .format(chunkSize_new))\n",
+    "        images = images + chunkSize_new\n",
+    "        counter2 += 1 \n",
+    "    else:\n",
+    "        images = counter1*chunkSize + counter2*chunkSize_new\n",
+    "        counter1 += 1\n",
+    "    \n",
     "    data = offsetCorrection.correct(data) # Offset correction\n",
     "    offset_corr_data = copy.copy(data) # I am copying this so that I can have access to it in the table below \n",
     "    histCalCorrected.fill(data)\n",
@@ -428,10 +485,9 @@
     "    data = cmCorrection.correct(data.astype(np.float32), cellTable=cellTable) # Common mode correction\n",
     "    histCalCMCorrected.fill(data)\n",
     "    noiseCal.fill(data)  # Filling noise calculator with common mode (CM) corrected data\n",
-    "    \n",
-    "noiseMapCM = noiseCal.get() # Produces CM corrected noise map\n",
-    "ho, eo, co , so = histCalCorrected.get()\n",
-    "hCM, eCM, cCM ,sCM = histCalCMCorrected.get()\n",
+    "    chunkSize = 100 # resetting the chunkSize to its default value for the next sequence or data-chunk\n",
+    "\n",
+    "print('A total number of {} images are processed.'.format(images))\n",
     "print(\"Offset and common mode corrections are applied.\")"
    ]
   },
@@ -441,7 +497,18 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "# I am copying these so that I can replot them later after the calculators are reset:\n",
+    "noiseMapCM = noiseCal.get() # Produces CM corrected noise map\n",
+    "ho, eo, co, so = histCalCorrected.get()\n",
+    "hCM, eCM, cCM, sCM = histCalCMCorrected.get()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# We are copying these so that we can replot them later after the calculators are reset:\n",
     "\n",
     "ho_second_trial = copy.copy(ho)\n",
     "co_second_trial = copy.copy(co)\n",
@@ -480,15 +547,17 @@
     "     }]\n",
     "      \n",
     "fig = xana.simplePlot(do, figsize='2col', aspect=1, x_label = 'Corrected Signal (ADU)', y_label=\"Counts\", \n",
-    "                      x_range = (-20,20), legend='top-right-frame-1col', title = 'Corrected Signal - 2nd Iteration')\n",
+    "                      x_range = (-20, 20), legend='top-right-frame-1col', title = 'Corrected Signal - 2nd Iteration')\n",
     "#fig.savefig('Corrected_Signal_Hist_1.svg', format='svg', dpi=1200, bbox_inches='tight') \n",
     "\n",
     "t0 = PrettyTable()\n",
-    "t0.title = \"Comparison of the First Round of Corrections - Bad Pixels Included\"\n",
-    "t0.field_names = [\"After Offset Correction\",\"After Common Mode Correction\"]\n",
+    "t0.title = \"Comparison of the First Round of Corrections - Bad Pixels Not Excluded\"\n",
+    "t0.field_names = [\"After Offset Correction\", \"After Common Mode Correction\"]\n",
     "t0.add_row([\"Mean: {:0.3f} (ADU)\".format(np.mean(offset_corr_data)), \"Mean: {:0.3f} (ADU)\".format(np.mean(data))])\n",
-    "t0.add_row([\"Median: {:0.3f} (ADU)\".format(np.median(offset_corr_data)), \"Median: {:0.3f} (ADU)\".format(np.median(data))])\n",
-    "t0.add_row([\"Standard Deviation: {:0.3f} (ADU)\".format(np.std(offset_corr_data)), \"Standard Deviation: {:0.3f} (ADU)\".format(np.std(data))])\n",
+    "t0.add_row([\"Median: {:0.3f} (ADU)\".format(np.median(offset_corr_data)), \"Median: {:0.3f} (ADU)\"\n",
+    "            .format(np.median(data))])\n",
+    "t0.add_row([\"Standard Deviation: {:0.3f} (ADU)\".format(np.std(offset_corr_data)), \"Standard Deviation: {:0.3f} (ADU)\"\n",
+    "            .format(np.std(data))])\n",
     "print(t0,'\\n')"
    ]
   },
@@ -510,9 +579,9 @@
    "outputs": [],
    "source": [
     "#*****NOISE MAP HISTOGRAM FROM THE COMMON MODE CORRECTED DATA*******#\n",
-    "hn,cn = np.histogram(noiseMap.flatten(), bins=200, range=(2,40)) # hn: histogram of noise, cn: bin centers for noise\n",
-    "hn_CM,cn_CM = np.histogram(noiseMapCM.flatten(), bins=200, range=(2,40))\n",
-    "\n",
+    "hn, cn = np.histogram(noiseMap.flatten(), bins=200, range=(0, 40)) # hn: histogram of noise, cn: bin centers for noise\n",
+    "hn_CM, cn_CM = np.histogram(noiseMapCM.flatten(), bins=200, range=(0, 40))\n",
+    " \n",
     "dn = [{'x': cn[:-1],\n",
     "     'y': hn,\n",
     "     #'y_err': np.sqrt(hn[:]),\n",
@@ -529,15 +598,15 @@
     "     'label': 'Common Mode Corrected Noise'\n",
     "     }]\n",
     "fig = xana.simplePlot(dn, figsize='2col', aspect=1, x_label = 'Noise (ADU)', y_label=\"Counts\", \n",
-    "                      x_range=(0,40), y_range=(0,1e6), y_log=True, legend='top-center-frame-1col',\n",
+    "                      x_range=(0, 40), y_range=(0, 1e6), y_log=True, legend='top-center-frame-1col',\n",
     "                      title = 'Noise Comparison')\n",
     "\n",
     "#fig.savefig('Noise_CM_1_Hist.svg', format='svg', dpi=1200, bbox_inches='tight') \n",
     "\n",
     "fig = xana.heatmapPlot(noiseMapCM[:,:,0], aspect=1, x_label='Column Number', y_label='Row Number',\n",
     "                       lut_label='Common Mode Corrected Noise (ADU)', x_range=(0,y), y_range=(0,x), \n",
-    "                       vmax=2*np.mean(noiseMapCM), panel_top_low_lim = 0, panel_top_high_lim = 20, panel_side_low_lim = 0,\n",
-    "                       panel_side_high_lim = 50, title = 'Common Mode Corrected Noise', \n",
+    "                       vmax=2*np.mean(noiseMapCM), panel_top_low_lim = 0, panel_top_high_lim = 20, \n",
+    "                       panel_side_low_lim = 0, panel_side_high_lim = 50, title = 'Common Mode Corrected Noise', \n",
     "                       panel_x_label='Columns Stat (ADU)', panel_y_label='Rows Stat (ADU)')\n",
     "\n",
     "#fig.savefig('NoiseMapCM.pdf', format='pdf', dpi=400, bbox_inches='tight')"
@@ -583,9 +652,9 @@
     "           (noiseMapCM > mnnoise+bad_pixel_noise_sigma*stdnoise)] = BadPixels.NOISE_OUT_OF_THRESHOLD.value\n",
     "\n",
     "fig = xana.heatmapPlot(np.log2(bad_pixels[:,:,0]),aspect=1, x_label='Column Number', y_label='Row Number', \n",
-    "                       lut_label='2^(Assigned Value to Bad Pixels)', x_range=(0,y), y_range=(0,x), \n",
-    "                       title = 'Bad Pixels Map Excluding Non-Sensitive Areas', panel_x_label= 'Columns Stat', \n",
-    "                       panel_y_label='Rows Stat')"
+    "                       lut_label='2^(Assigned Value to Bad Pixels)', x_range=(0, y), y_range=(0, x), \n",
+    "                       title = 'Bad Pixels Map Excluding Non-Sensitive\\n Areas in Middle of CCD', \n",
+    "                       panel_x_label= 'Columns Stat', panel_y_label='Rows Stat')"
    ]
   },
   {
@@ -628,8 +697,8 @@
     "\n",
     "\n",
     "# Defining a circular mask + a rectangular mask (overscan) for the hole in the middle of the CCD:\n",
-    "h, w = (x,y)\n",
-    "hole_mask_bool = create_circular_mask(h-4, w, radius=61.5, center=(w//2,(h-4)//2))\n",
+    "h, w = (x, y)\n",
+    "hole_mask_bool = create_circular_mask(h-4, w, radius=61.5, center=(w//2, (h-4)//2))\n",
     "hole_mask = np.zeros(hole_mask_bool.shape, np.uint32)\n",
     "hole_mask[hole_mask_bool] = BadPixels.NON_SENSITIVE.value\n",
     "\n",
@@ -639,11 +708,12 @@
     "\n",
     "# Assigning this masked area as bad pixels:\n",
     "bad_pixels = np.bitwise_or(bad_pixels, mask)\n",
+    "\n",
     "fig = xana.heatmapPlot(np.log2(bad_pixels[:,:,0]),aspect=1, x_label='Column Number', y_label='Row Number', \n",
-    "                       lut_label='2^(Assigned Value to Bad Pixels)', x_range=(0,y), y_range=(0,x), panel_top_low_lim = 0, \n",
-    "                       panel_top_high_lim = 20, panel_side_low_lim = 0, panel_side_high_lim = 20, \n",
-    "                       title = 'Bad Pixels Map Including Non-Sensitive Areas', panel_x_label='Columns Stat', \n",
-    "                       panel_y_label='Rows Stat', vmax=20)\n",
+    "                       lut_label='2^(Assigned Value to Bad Pixels)', x_range=(0, y), y_range=(0, x), \n",
+    "                       panel_top_low_lim = 0, panel_top_high_lim = 20, panel_side_low_lim = 0, panel_side_high_lim = 20, \n",
+    "                       title = 'Bad Pixels Map Including Non-Sensitive\\n Areas in Middle of CCD', \n",
+    "                       panel_x_label='Columns Stat', panel_y_label='Rows Stat', vmax=20)\n",
     "\n",
     "#fig.savefig('BadPixelMap_1.svg', format='svg', dpi=1200, bbox_inches='tight') "
    ]
@@ -678,10 +748,26 @@
    "metadata": {},
    "outputs": [],
    "source": [
+    "counter1 = 1 # To count how many \"if data.shape[2] >= chunkSize\" instances are there.\n",
+    "counter2 = 0 # To count how many \"if data.shape[2] < chunkSize\" instances are there.\n",
+    "chunkSize_new = 0 # See below\n",
+    "\n",
     "for data in reader.readChunks():\n",
-    "    data = data.astype(np.float32)\n",
+    "    #data = data.astype(np.float32)\n",
+    "    data = np.bitwise_and(data.astype(np.uint16), 0b0011111111111111).astype(np.float32)\n",
     "    dx = np.count_nonzero(data, axis=(0, 1))\n",
     "    data = data[:,:,dx != 0]\n",
+    "    # Some sequences may have less than 500 frames in them. To find out how many images there are, we will temporarily \n",
+    "    # change chunkSize to be the same as whatever number of frames the last chunk of data has:\n",
+    "    if data.shape[2] < chunkSize:\n",
+    "        chunkSize_new = data.shape[2]\n",
+    "        print(\"Number of images are less than chunkSize. chunkSize is temporarily changed to {}.\"\n",
+    "              .format(chunkSize_new))\n",
+    "        images = images + chunkSize_new\n",
+    "        counter2 += 1 \n",
+    "    else:\n",
+    "        images = counter1*chunkSize + counter2*chunkSize_new\n",
+    "        counter1 += 1     \n",
     "    data_copy = offsetCorrection.correct(copy.copy(data))\n",
     "    cellTable=np.zeros(data_copy.shape[2], np.int32)\n",
     "    data_copy = cmCorrection.correct(data_copy.astype(np.float32), cellTable=cellTable)\n",
@@ -694,11 +780,21 @@
     "    data = cmCorrection.correct(data.astype(np.float32), cellTable=cellTable)\n",
     "    histCalCMCorrected.fill(data)\n",
     "    noiseCal.fill(data) \n",
+    "    chunkSize = 100 # resetting the chunkSize to its default value for the next sequence or data-chunk\n",
     "\n",
+    "print('A total number of {} images are processed.'.format(images))\n",
+    "print(\"Final iteration is Performed.\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
     "noiseMapCM_2nd = noiseCal.get().filled(0) # the masked pixels are filled with zero\n",
     "ho2, eo2, co2, so2 = histCalCorrected.get()\n",
-    "hCM2, eCM2, cCM2 ,sCM2 = histCalCMCorrected.get()\n",
-    "print(\"Final iteration is Performed.\")"
+    "hCM2, eCM2, cCM2, sCM2 = histCalCMCorrected.get()"
    ]
   },
   {
@@ -751,17 +847,19 @@
     "     }]\n",
     "\n",
     "fig = xana.simplePlot(do_Final, figsize='2col', aspect=1, x_label = 'Corrected Signal (ADU)', \n",
-    "                      y_label=\"Counts (Logarithmic Scale)\", y_log=True, x_range=(-40,40), legend='bottom-left-frame-1col',\n",
-    "                      title = 'Comparison of Corrected Signal')\n",
+    "                      y_label=\"Counts (Logarithmic Scale)\", y_log=True, x_range=(-40, 40), \n",
+    "                      legend='bottom-left-frame-1col', title = 'Comparison of Corrected Signal')\n",
     "#fig.savefig('Corrected_Signal_Hist_2.svg', format='svg', dpi=1200, bbox_inches='tight') \n",
     "\n",
     "# offset_corr_data2 and data most likely have some nan's => I am going to use nanmean, nanmedian and nanstd functions:\n",
     "t0 = PrettyTable()\n",
     "t0.title = \"Comparison of the Second Round of Corrections - Bad Pixels Excluded\"\n",
-    "t0.field_names = [\"After Offset Correction\",\"After Common Mode Correction\"]\n",
+    "t0.field_names = [\"After Offset Correction\", \"After Common Mode Correction\"]\n",
     "t0.add_row([\"Mean: {:0.3f} (ADU)\".format(np.nanmean(offset_corr_data2)), \"Mean: {:0.3f} (ADU)\".format(np.nanmean(data))])\n",
-    "t0.add_row([\"Median: {:0.3f} (ADU)\".format(np.nanmedian(offset_corr_data2)), \"Median: {:0.3f} (ADU)\".format(np.nanmedian(data))])\n",
-    "t0.add_row([\"Standard Deviation: {:0.3f} (ADU)\".format(np.nanstd(offset_corr_data2)), \"Standard Deviation: {:0.3f} (ADU)\".format(np.nanstd(data))])\n",
+    "t0.add_row([\"Median: {:0.3f} (ADU)\".format(np.nanmedian(offset_corr_data2)), \"Median: {:0.3f} (ADU)\"\n",
+    "            .format(np.nanmedian(data))])\n",
+    "t0.add_row([\"Standard Deviation: {:0.3f} (ADU)\".format(np.nanstd(offset_corr_data2)), \n",
+    "            \"Standard Deviation: {:0.3f} (ADU)\".format(np.nanstd(data))])\n",
     "print(t0,'\\n')"
    ]
   },
@@ -783,7 +881,7 @@
    "outputs": [],
    "source": [
     "#*****NOISE MAP HISTOGRAM FROM THE COMMON MODE CORRECTED DATA*******#\n",
-    "hn_CM2,cn_CM2 = np.histogram(noiseMapCM_2nd.flatten(), bins=200, range=(2,40))\n",
+    "hn_CM2, cn_CM2 = np.histogram(noiseMapCM_2nd.flatten(), bins=200, range=(0, 40))\n",
     "\n",
     "dn2 = [{'x': cn[:-1],\n",
     "     'y': hn,\n",
@@ -809,13 +907,13 @@
     "     }]\n",
     "\n",
     "fig = xana.simplePlot(dn2, figsize='2col', aspect = 1, x_label = 'Noise (ADU)', y_label=\"Counts\", y_log=True, \n",
-    "                      x_range=(0,40), y_range=(0,1e6), legend='top-right-frame-1col', title = 'Final Noise Comparison')\n",
+    "                      x_range=(0, 40), y_range=(0, 1e6), legend='top-right-frame-1col', title = 'Final Noise Comparison')\n",
     "\n",
     "#fig.savefig('Noise_Hist_2.svg', format='svg', dpi=1200, bbox_inches='tight') \n",
     "\n",
     "fig = xana.heatmapPlot(np.log2(noiseMapCM_2nd[:,:,0]), aspect=1, x_label='Column Number', y_label='Row Number',\n",
-    "                       lut_label='Noise (ADU)', x_range=(0,y), y_range=(0,x), vmax=2*np.mean(noiseMapCM_2nd), \n",
-    "                       title = 'Final Common Mode Corrected Noise (Bad Pixels Excluded)', \n",
+    "                       lut_label='Noise (ADU)', x_range=(0, y), y_range=(0, x), vmax=2*np.mean(noiseMapCM_2nd), \n",
+    "                       title = 'Final Common Mode Corrected Noise\\n (Bad Pixels Excluded)', \n",
     "                       panel_x_label='Columns Stat (ADU)', panel_y_label='Rows Stat (ADU)')\n",
     "#fig.savefig('NoiseMapCM_2nd.pdf', format='pdf', dpi=400, bbox_inches='tight') "
    ]
@@ -848,8 +946,8 @@
     "\n",
     "bad_pixels = np.bitwise_or(bad_pixels, mask)\n",
     "fig = xana.heatmapPlot(np.log2(bad_pixels[:,:,0]),aspect=1, x_label='Column Number', y_label='Row Number', \n",
-    "                       lut_label='2^(Assigned Value to Bad Pixels)', x_range=(0,y), y_range=(0,x), panel_top_low_lim = 0, \n",
-    "                       panel_top_high_lim = 20, panel_side_low_lim = 0, panel_side_high_lim = 20, \n",
+    "                       lut_label='2^(Assigned Value to Bad Pixels)', x_range=(0, y), y_range=(0, x), \n",
+    "                       panel_top_low_lim = 0, panel_top_high_lim = 20, panel_side_low_lim = 0, panel_side_high_lim = 20, \n",
     "                       title = 'Final Bad Pixels Map', panel_x_label='Columns Stat', \n",
     "                       panel_y_label='Rows Stat', vmax=20)\n",
     "#fig.savefig('BadPixelMap_2.svg', format='svg', dpi=1200, bbox_inches='tight') "
@@ -874,7 +972,7 @@
    "source": [
     "### Electronic Noise\n",
     "\n",
-    "According to Table 6.1 (page 80) of Ivana Klačková's master's thesis: \"Conversion gain for the FastCCD is: lower hemisphere = 6.2e-/ADU and upper hemisphere = 6.1e-/ADU.\"\n",
+    "According to Table 6.1 (page 80) of Ivana Klačková's master's thesis: \"Conversion gain for the FastCCD high gain is: lower hemisphere = 6.2e-/ADU and upper hemisphere = 6.1e-/ADU.\" Also, we know that the high gain/medium gain and high gain/low gain ratios are 4 and 8, respectively since high gain = x8, medium gain = x2 and low gain = x1. We do not currently (October - 2019) know the conversion gains for the FastCCD medium and lows gains in electrons. Therefore, we will use those of the high gains (in both hemispheres) together with the gain ratios to convert the noise in ADU to electrons.\n",
     "\n",
     "The following Tables present the noise along lower hemisphere, upper hemisphere, and the entire FastCCD detector at different stages. Here, the values in the first table (in ADU and e-) are the mean of noise per pixel, where noise is considered to be the initial uncorrected noise, CM corrected noise after second trial (including bad pixels) and CM corrected noise after third trial (excluding bad pixels). \n",
     "\n",
@@ -890,27 +988,80 @@
     "# noiseMap refers to the initial uncorrected noise, noiseMapCM refers to common mode corrected noise with inclusion of \n",
     "# bad pixels, and noiseMapCM_2nd refers to common mode corrected noise without inclusion of bad pixels:\n",
     "\n",
-    "ADU_to_electron = (ADU_to_electron_upper + ADU_to_electron_lower)/2 # Average of ADU_to_electron for the entire detector \n",
+    "ADU_to_electron_hg = (ADU_to_electron_upper_hg + ADU_to_electron_lower_hg)/2 # Average of ADU_to_electron for entire CCD\n",
+    "                                                                             # for high gain\n",
+    "\n",
+    "ADU_to_electron_upper_mg = ADU_to_electron_upper_hg*4 # high/medium gain ratio = 4\n",
+    "ADU_to_electron_lower_mg = ADU_to_electron_lower_hg*4\n",
+    "ADU_to_electron_mg = (ADU_to_electron_upper_mg + ADU_to_electron_lower_mg)/2 # Average of ADU_to_electron for entire CCD\n",
+    "                                                                             # for medium gain\n",
+    "    \n",
+    "ADU_to_electron_upper_lg = ADU_to_electron_upper_hg*8 # high/medium gain ratio = 8\n",
+    "ADU_to_electron_lower_lg = ADU_to_electron_lower_hg*8\n",
+    "ADU_to_electron_lg = (ADU_to_electron_upper_lg + ADU_to_electron_lower_lg)/2 # Average of ADU_to_electron for entire CCD\n",
+    "                                                                             # for low gain"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "for gain, value in gain_dict.items():  \n",
+    "    \n",
+    "    if det_gain == gain_dict[\"low gain\"]:\n",
+    "        ADU_to_electron = ADU_to_electron_lg\n",
+    "        ADU_to_electron_upper = ADU_to_electron_upper_lg\n",
+    "        ADU_to_electron_lower = ADU_to_electron_lower_lg\n",
+    "        \n",
+    "    elif det_gain == gain_dict[\"medium gain\"]:\n",
+    "        ADU_to_electron = ADU_to_electron_mg\n",
+    "        ADU_to_electron_upper = ADU_to_electron_upper_mg\n",
+    "        ADU_to_electron_lower = ADU_to_electron_lower_mg\n",
+    "        \n",
+    "    else: # Here, we assume the auto gain and high gain conversions from ADU to electrons are the same.\n",
+    "        ADU_to_electron = ADU_to_electron_hg\n",
+    "        ADU_to_electron_upper = ADU_to_electron_upper_hg\n",
+    "        ADU_to_electron_lower = ADU_to_electron_lower_hg\n",
     "\n",
     "print(\"Abbreviations:\")\n",
-    "print(\" - ED = Entire Detector; LH: Lower Hemisphere; UH: Upper Hemisphere\")\n",
-    "print(\" - CM Noise: Common Mode Corrected Noise\")\n",
+    "print(\" - ED = Entire Detector;\\n - LH: Lower Hemisphere;\\n - UH: Upper Hemisphere;\")\n",
+    "print(\" - CM Noise: Common Mode Corrected Noise;\")\n",
     "print(\" - BP: Bad Pixels\\n\")\n",
     "      \n",
     "t0 = PrettyTable()\n",
     "t0.title = \"Averages of Noise per Pixel\"\n",
-    "t0.field_names = [\"Uncorrected Noise\",\"CM Noise, BP Incl.\", \"CM Noise, BP Excl.\"]\n",
-    "t0.add_row([\"ED: {:0.2f} ADU = {:0.2f} e-\".format(np.mean(noiseMap),np.mean(noiseMap)*ADU_to_electron), \"ED: {:0.2f} ADU = {:0.2f} e-\".format(np.mean(noiseMapCM), np.mean(noiseMapCM)*ADU_to_electron), \"ED: {:0.2f} ADU = {:0.2f} e-\".format(np.mean(noiseMapCM_2nd), np.mean(noiseMapCM_2nd)*ADU_to_electron)])\n",
-    "t0.add_row([\"LH: {:0.2f} ADU = {:0.2f} e-\".format(np.mean(noiseMap[:x//2,:]), np.mean(noiseMap[:x//2,:])*ADU_to_electron_lower), \"LH: {:0.2f} ADU = {:0.2f} e-\".format(np.mean(noiseMapCM[:x//2,:]), np.mean(noiseMapCM[:x//2,:])*ADU_to_electron_lower), \"LH: {:0.2f} ADU = {:0.2f} e-\".format(np.mean(noiseMapCM_2nd[:x//2,:]), np.mean(noiseMapCM_2nd[:x//2,:])*ADU_to_electron_lower)])\n",
-    "t0.add_row([\"UH: {:0.2f} ADU = {:0.2f} e-\".format(np.mean(noiseMap[x//2:,:]), np.mean(noiseMap[x//2:,:])*ADU_to_electron_upper), \"UH: {:0.2f} ADU = {:0.2f} e-\".format(np.mean(noiseMapCM[x//2:,:]), np.mean(noiseMapCM[x//2:,:])*ADU_to_electron_upper), \"UH: {:0.2f} ADU = {:0.2f} e-\".format(np.mean(noiseMapCM_2nd[x//2:,:]), np.mean(noiseMapCM_2nd[x//2:,:])*ADU_to_electron_upper)])\n",
+    "t0.field_names = [\"Uncorrected Noise\", \"CM Noise, BP Incl.\", \"CM Noise, BP Excl.\"]\n",
+    "t0.add_row([\"ED: {:0.2f} ADU = {:0.2f} e-\".format(np.mean(noiseMap), np.mean(noiseMap)*ADU_to_electron), \n",
+    "            \"ED: {:0.2f} ADU = {:0.2f} e-\".format(np.mean(noiseMapCM), np.mean(noiseMapCM)*ADU_to_electron), \n",
+    "            \"ED: {:0.2f} ADU = {:0.2f} e-\".format(np.mean(noiseMapCM_2nd), np.mean(noiseMapCM_2nd)*ADU_to_electron)])\n",
+    "t0.add_row([\"LH: {:0.2f} ADU = {:0.2f} e-\".format(np.mean(noiseMap[:x//2,:]), \n",
+    "                                                  np.mean(noiseMap[:x//2,:])*ADU_to_electron_lower), \n",
+    "            \"LH: {:0.2f} ADU = {:0.2f} e-\".format(np.mean(noiseMapCM[:x//2,:]), \n",
+    "                                                  np.mean(noiseMapCM[:x//2,:])*ADU_to_electron_lower), \n",
+    "            \"LH: {:0.2f} ADU = {:0.2f} e-\".format(np.mean(noiseMapCM_2nd[:x//2,:]), \n",
+    "                                                  np.mean(noiseMapCM_2nd[:x//2,:])*ADU_to_electron_lower)])\n",
+    "t0.add_row([\"UH: {:0.2f} ADU = {:0.2f} e-\".format(np.mean(noiseMap[x//2:,:]), \n",
+    "                                                  np.mean(noiseMap[x//2:,:])*ADU_to_electron_upper), \n",
+    "            \"UH: {:0.2f} ADU = {:0.2f} e-\".format(np.mean(noiseMapCM[x//2:,:]), \n",
+    "                                                  np.mean(noiseMapCM[x//2:,:])*ADU_to_electron_upper), \n",
+    "            \"UH: {:0.2f} ADU = {:0.2f} e-\".format(np.mean(noiseMapCM_2nd[x//2:,:]), \n",
+    "                                                  np.mean(noiseMapCM_2nd[x//2:,:])*ADU_to_electron_upper)])\n",
     "print(t0,'\\n')\n",
     "\n",
     "t1 = PrettyTable()\n",
     "t1.title = \"Standard Deviations of Noise per Pixel\"\n",
-    "t1.field_names = [\"Uncorrected Noise\",\"CM Noise, BP Incl.\", \"CM Noise, BP Excl.\"]\n",
-    "t1.add_row([\"ED: {:0.2f} e-\".format(np.std(noiseMap)*ADU_to_electron), \"ED: {:0.2f} e-\".format(np.std(noiseMapCM)*ADU_to_electron), \"ED: {:0.2f} e-\".format(np.std(noiseMapCM_2nd)*ADU_to_electron)])\n",
-    "t1.add_row([\"LH: {:0.2f} e-\".format(np.std(noiseMap[:x//2,:])*ADU_to_electron_lower), \"LH: {:0.2f} e-\".format(np.std(noiseMapCM[:x//2,:])*ADU_to_electron_lower), \"LH: {:0.2f} e-\".format(np.std(noiseMapCM_2nd[:x//2,:])*ADU_to_electron_lower)])\n",
-    "t1.add_row([\"UH: {:0.2f} e-\".format(np.std(noiseMap[x//2:,:])*ADU_to_electron_upper), \"UH: {:0.2f} e-\".format(np.std(noiseMapCM[x//2:,:])*ADU_to_electron_upper), \"UH: {:0.2f} e-\".format(np.std(noiseMapCM_2nd[x//2:,:])*ADU_to_electron_upper)])\n",
+    "t1.field_names = [\"Uncorrected Noise\", \"CM Noise, BP Incl.\", \"CM Noise, BP Excl.\"]\n",
+    "t1.add_row([\"ED: {:0.2f} e-\".format(np.std(noiseMap)*ADU_to_electron), \n",
+    "            \"ED: {:0.2f} e-\".format(np.std(noiseMapCM)*ADU_to_electron), \n",
+    "            \"ED: {:0.2f} e-\".format(np.std(noiseMapCM_2nd)*ADU_to_electron)])\n",
+    "t1.add_row([\"LH: {:0.2f} e-\".format(np.std(noiseMap[:x//2,:])*ADU_to_electron_lower), \n",
+    "            \"LH: {:0.2f} e-\".format(np.std(noiseMapCM[:x//2,:])*ADU_to_electron_lower), \n",
+    "            \"LH: {:0.2f} e-\".format(np.std(noiseMapCM_2nd[:x//2,:])*ADU_to_electron_lower)])\n",
+    "t1.add_row([\"UH: {:0.2f} e-\".format(np.std(noiseMap[x//2:,:])*ADU_to_electron_upper), \n",
+    "            \"UH: {:0.2f} e-\".format(np.std(noiseMapCM[x//2:,:])*ADU_to_electron_upper), \n",
+    "            \"UH: {:0.2f} e-\".format(np.std(noiseMapCM_2nd[x//2:,:])*ADU_to_electron_upper)])\n",
     "print(t1)"
    ]
   },
@@ -961,10 +1112,11 @@
     "    \n",
     "    if db_output:\n",
     "        metadata.calibration_constant_version.raw_data_location = file_loc\n",
-    "        metadata.send(cal_db_interface, timeout=cal_db_timeout)    \n",
-    "\n",
+    "        metadata.send(cal_db_interface, timeout=cal_db_timeout)  \n",
+    "        \n",
     "print(\"Calibration constants (offsetMap, noiseMapCM_2nd and bad_pixels) are sent to the calibration database.\")\n",
-    "print(\"Creation time is: {}\".format(creation_time))"
+    "print(\"Creation time is: {}\".format(creation_time))\n",
+    "print(\"Raw data location is: {}\".format(file_loc))"
    ]
   },
   {