diff --git a/notebooks/FastCCD/CorrectionNotebook_NewDAQ_FastCCD_NBC.ipynb b/notebooks/FastCCD/CorrectionNotebook_NewDAQ_FastCCD_NBC.ipynb
index 37fabb9cff9e77870ba9e701ba31222db15e5ca4..e6895334c7c465a16194f9e943f5ffe6e6bbdd28 100644
--- a/notebooks/FastCCD/CorrectionNotebook_NewDAQ_FastCCD_NBC.ipynb
+++ b/notebooks/FastCCD/CorrectionNotebook_NewDAQ_FastCCD_NBC.ipynb
@@ -18,16 +18,15 @@
     "ExecuteTime": {
      "end_time": "2018-12-06T15:54:23.218849Z",
      "start_time": "2018-12-06T15:54:23.166497Z"
-    },
-    "collapsed": true
+    }
    },
    "outputs": [],
    "source": [
-    "in_folder = \"/gpfs/exfel/exp/SCS/201802/p002170/raw/\" # input folder, required\n",
-    "out_folder = '/gpfs/exfel/data/scratch/xcal/test/' # output folder, required\n",
+    "in_folder = \"/gpfs/exfel/exp/SCS/201930/p900074/raw\" # input folder, required\n",
+    "out_folder = '/gpfs/exfel/data/scratch/karnem/test/fastccd' # output folder, required\n",
     "path_template = 'RAW-R{:04d}-{}-S{{:05d}}.h5' #  path template in hdf5 file\n",
     "path_inset = 'DA05'\n",
-    "run = 277 # run number\n",
+    "run = 453 # run number\n",
     "h5path = '/INSTRUMENT/SCS_CDIDET_FCCD2M/DAQ/FCCD:daqOutput/data/image' # path in HDF5 file\n",
     "h5path_t = '/CONTROL/SCS_CDIDET_FCCD2M/CTRL/LSLAN/inputA/crdg/value' # temperature path in HDF5 file\n",
     "h5path_cntrl = '/RUN/SCS_CDIDET_FCCD2M/DET/FCCD'  # path to control data\n",
@@ -42,16 +41,23 @@
     "sequences = [-1] # sequences to correct, set to -1 for all, range allowed\n",
     "chunk_size_idim = 1 # H5 chunking size of output data\n",
     "overwrite = True # overwrite existing files\n",
-    "do_pattern_classification = True # classify split events\n",
     "sequences_per_node = 1 # sequences to correct per node\n",
     "limit_images = 0 # limit images per file \n",
-    "correct_offset_drift = False  # correct for offset drifts\n",
     "use_dir_creation_date = True # use dir creation data for calDB queries\n",
     "time_offset_days = 0 # offset in days for calibration parameters\n",
-    "photon_energy_gain_map = 2. # energy in keV\n",
+    "photon_energy_gain_map = 5.9 # energy in keV\n",
     "fix_temperature = 0. # fix temperature to this value, set to 0 to use slow control value\n",
     "flipped_between = [\"2019-02-01\", \"2019-04-02\"] # detector was flipped during this timespan\n",
     "temp_limits = 5 # limits within which temperature is considered the same\n",
+    "commonModeAxis = 1 # Axis along which common mode will be calculated (0: along rows, 1: along columns)\n",
+    "\n",
+    "# Correction Booleans\n",
+    "only_offset = False # Only apply offset correction\n",
+    "cti_corr = False # Apply CTI correction\n",
+    "relgain_corr = True # Apply relative gain correction\n",
+    "common_mode_corr = False # Apply commonMode correction\n",
+    "correct_offset_drift = False  # correct for offset drifts\n",
+    "do_pattern_classification = True # classify split events\n",
     "\n",
     "def balance_sequences(in_folder, run, sequences, sequences_per_node, path_inset):\n",
     "    import glob\n",
@@ -74,6 +80,38 @@
     "        return sequences"
    ]
   },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Fill dictionaries comprising bools and arguments for correction and data analysis\n",
+    "\n",
+    "# Here the herarichy and dependability for correction booleans are defined \n",
+    "corr_bools = {}\n",
+    "\n",
+    "# offset is at the bottom of AGIPD correction pyramid.\n",
+    "corr_bools[\"only_offset\"] = only_offset\n",
+    "\n",
+    "# Dont apply any corrections if only_offset is requested \n",
+    "if not only_offset:\n",
+    "    \n",
+    "    # Apply relative gain correction, only if requested\n",
+    "    if relgain_corr:\n",
+    "        corr_bools[\"relgain\"] = relgain_corr\n",
+    "    \n",
+    "        # Apply CTI correction, only if requested\n",
+    "        if cti_corr:\n",
+    "            corr_bools[\"cti\"] = cti_corr\n",
+    "        \n",
+    "    corr_bools[\"common_mode\"] = common_mode_corr\n",
+    "    corr_bools[\"offset_drift\"] = correct_offset_drift\n",
+    "    corr_bools[\"pattern_class\"] = do_pattern_classification\n",
+    "# Here the herarichy and dependability for data analysis booleans and arguments are defined \n",
+    "data_analysis_parms = {}"
+   ]
+  },
   {
    "cell_type": "code",
    "execution_count": null,
@@ -115,7 +153,9 @@
     "\n",
     "from iCalibrationDB import ConstantMetaData, Constants, Conditions, Detectors, Versions\n",
     "from iCalibrationDB.detectors import DetectorTypes\n",
-    "from cal_tools.tools import get_dir_creation_date\n",
+    "from cal_tools.tools import (get_dir_creation_date,\n",
+    "                             get_random_db_interface,\n",
+    "                             get_constant_from_db_and_time)\n",
     "\n",
     "from datetime import timedelta\n",
     "\n",
@@ -123,15 +163,9 @@
     "\n",
     "if sequences[0] == -1:\n",
     "    sequences = None\n",
-    "    \n",
-    "offset_correction_args = (0.2459991787617141, 243.21639920846485)\n",
-    "t_base = 247.82\n",
-    "\n",
-    "if \"#\" in cal_db_interface:\n",
-    "    prot, serv, ran = cal_db_interface.split(\":\")\n",
-    "    r1, r2 = ran.split(\"#\")\n",
-    "    cal_db_interface = \":\".join(\n",
-    "        [prot, serv, str(np.random.randint(int(r1), int(r2)))])"
+    "\n",
+    "# select a random port for the data base \n",
+    "cal_db_interface = get_random_db_interface(cal_db_interface)\n"
    ]
   },
   {
@@ -186,22 +220,21 @@
     "\n",
     "sensorSize = [x, y]\n",
     "chunkSize = 100 #Number of images to read per chunk\n",
-    "blockSize = [sensorSize[0]//2, sensorSize[1]//4] #Sensor area will be analysed according to blocksize\n",
+    "blockSize = [sensorSize[0]//2, sensorSize[1]] #Sensor area will be analysed according to blocksize\n",
     "xcal.defaultBlockSize = blockSize\n",
     "memoryCells = 1 #FastCCD has 1 memory cell\n",
     "#Specifies total number of images to proceed\n",
     "\n",
     "commonModeBlockSize = blockSize\n",
-    "commonModeAxisR = 'row'#Axis along which common mode will be calculated\n",
+    "# commonModeAxisR = 'row'#Axis along which common mode will be calculated\n",
     "run_parallel = True\n",
     "profile = False\n",
     "\n",
-    "temperature_k = 291\n",
     "filename = fp_path.format(sequences[0] if sequences else 0)\n",
     "with h5py.File(filename, 'r') as f:\n",
     "    bias_voltage = int(f['{}/biasclock/bias/value'.format(h5path_cntrl)][0])\n",
     "    det_gain = int(f['{}/exposure/gain/value'.format(h5path_cntrl)][0])\n",
-    "    integration_time = int(f['{}/acquisitionTime/value'.format(h5path_cntrl)][0])\n",
+    "    integration_time = int(f['{}/exposure/exposure_time/value'.format(h5path_cntrl)][0])\n",
     "    print(\"Bias voltage is {} V\".format(bias_voltage))\n",
     "    print(\"Detector gain is set to x{}\".format(det_gain))\n",
     "    print(\"Detector integration time is set to {}\".format(integration_time))\n",
@@ -216,7 +249,8 @@
     "if not os.path.exists(out_folder):\n",
     "    os.makedirs(out_folder)\n",
     "elif not overwrite:\n",
-    "    raise AttributeError(\"Output path exists! Exiting\")    \n"
+    "    # Stop Notebook not only this cell\n",
+    "    raise SystemExit(\"Output path exists! Exiting\")    \n"
    ]
   },
   {
@@ -226,8 +260,7 @@
     "ExecuteTime": {
      "end_time": "2018-12-06T15:54:24.088948Z",
      "start_time": "2018-12-06T15:54:24.059925Z"
-    },
-    "collapsed": true
+    }
    },
    "outputs": [],
    "source": [
@@ -303,46 +336,52 @@
     "offsetMap = None\n",
     "badPixelMap = None\n",
     "noiseMap = None\n",
+    "\n",
+    "\n",
+    "# The following constants are saved in data base as a constant for each gain\n",
+    "# Hence, a loop over all 3 gains is performed\n",
+    "\n",
+    "# This is to be used in messages in reports.\n",
+    "g_name = {8: \"High gain\", 2: \"Medium gain\", 1: \"Low gain\"}\n",
+    "\n",
     "for i, g in enumerate([8, 2, 1]):\n",
-    "    ## offset\n",
-    "    metadata = ConstantMetaData()\n",
-    "    offset = Constants.CCD(DetectorTypes.fastCCD).Offset()\n",
-    "    metadata.calibration_constant = offset\n",
     "\n",
+    "    print(\"Retrieving constants for {}\".format(g_name[g]))\n",
+    "    \n",
     "    # set the operating condition\n",
     "    condition = Conditions.Dark.CCD(bias_voltage=bias_voltage,\n",
     "                                    integration_time=integration_time,\n",
     "                                    gain_setting=g,\n",
     "                                    temperature=temperature_k,\n",
-    "                                    pixels_x=1934,\n",
-    "                                    pixels_y=960)\n",
+    "                                    pixels_x=x,\n",
+    "                                    pixels_y=y)\n",
     "\n",
     "    for parm in condition.parameters:\n",
     "        if parm.name == \"Sensor Temperature\":\n",
     "            parm.lower_deviation = temp_limits\n",
     "            parm.upper_deviation = temp_limits\n",
     "\n",
+    "            \n",
+    "    offset = Constants.CCD(DetectorTypes.fastCCD).Offset()\n",
+    "    noise = Constants.CCD(DetectorTypes.fastCCD).Noise()\n",
+    "    bpix = Constants.CCD(DetectorTypes.fastCCD).BadPixelsDark()\n",
+    "    \n",
+    "    ## retrieve_offset\n",
+    "    offset, offset_time = get_constant_from_db_and_time(device=Detectors.fastCCD1,\n",
+    "                                                        constant=offset,\n",
+    "                                                        condition=condition,\n",
+    "                                                        empty_constant=None,\n",
+    "                                                        cal_db_interface=cal_db_interface, \n",
+    "                                                        creation_time=creation_time,\n",
+    "                                                        timeout=cal_db_timeout, print_once=False)\n",
     "\n",
-    "    device = Detectors.fastCCD1\n",
-    "\n",
-    "\n",
-    "    metadata.detector_condition = condition\n",
-    "\n",
-    "\n",
+    "    if offsetMap is None:\n",
+    "        offsetMap = np.zeros(list(offset.shape)+[3], np.float32)\n",
     "\n",
-    "    # specify the version for this constant\n",
-    "    if creation_time is None:\n",
-    "        metadata.calibration_constant_version = Versions.Now(device=device)\n",
-    "        metadata.retrieve(cal_db_interface)\n",
+    "    if offset is not None:\n",
+    "        offsetMap[...,i] = offset\n",
     "    else:\n",
-    "        metadata.calibration_constant_version = Versions.Timespan(device=device,\n",
-    "                                                                  start=creation_time)\n",
-    "        metadata.retrieve(cal_db_interface, when=creation_time.isoformat(), timeout=3000000)\n",
-    "\n",
-    "\n",
-    "    if offsetMap is None:\n",
-    "        offsetMap = np.zeros(list(offset.data.shape)+[3], np.float32)\n",
-    "    offsetMap[...,i] = offset.data\n",
+    "        print(\"NO OFFSET FOUND IN DB!\")\n",
     "\n",
     "    offset_temperature = None\n",
     "    for parm in condition.parameters:\n",
@@ -350,87 +389,45 @@
     "        if parm.name == \"Sensor Temperature\":\n",
     "            offset_temperature = parm.value\n",
     "\n",
-    "    print(\"Temperature of detector when dark images (gain {}) for offset calculation \".format(g) +\n",
-    "              \"were taken at: {:0.2f} K @ {}\".format(offset_temperature,\n",
-    "                                                     metadata.calibration_constant_version.begin_at))\n",
-    "\n",
-    "    ## noise\n",
-    "    metadata = ConstantMetaData()\n",
-    "    noise = Constants.CCD(DetectorTypes.fastCCD).Noise()\n",
-    "    metadata.calibration_constant = noise\n",
-    "\n",
-    "    # set the operating condition\n",
-    "    condition = Conditions.Dark.CCD(bias_voltage=bias_voltage,\n",
-    "                                    integration_time=integration_time,\n",
-    "                                    gain_setting=g,\n",
-    "                                    temperature=temperature_k,\n",
-    "                                    pixels_x=1934,\n",
-    "                                    pixels_y=960)\n",
-    "\n",
-    "\n",
-    "    for parm in condition.parameters:\n",
-    "        if parm.name == \"Sensor Temperature\":\n",
-    "            parm.lower_deviation = temp_limits\n",
-    "            parm.upper_deviation = temp_limits\n",
-    "\n",
-    "\n",
-    "    device = Detectors.fastCCD1\n",
-    "\n",
-    "\n",
-    "    metadata.detector_condition = condition\n",
-    "\n",
-    "    # specify the version for this constant\n",
-    "    if creation_time is None:\n",
-    "        metadata.calibration_constant_version = Versions.Now(device=device)\n",
-    "        metadata.retrieve(cal_db_interface)\n",
-    "    else:\n",
-    "        metadata.calibration_constant_version = Versions.Timespan(device=device,\n",
-    "                                                                  start=creation_time)\n",
-    "        metadata.retrieve(cal_db_interface, when=creation_time.isoformat(), timeout=3000000)\n",
-    "\n",
+    "    print(\"Dark Offset was taken at temperature of {:0.2f}K at {}\"\n",
+    "          .format(offset_temperature, offset_time))\n",
+    "\n",
+    "    ## retrieve_noise\n",
+    "    noise, noise_time = get_constant_from_db_and_time(device=Detectors.fastCCD1,\n",
+    "                                                        constant=noise,\n",
+    "                                                        condition=condition,\n",
+    "                                                        empty_constant=None,\n",
+    "                                                        cal_db_interface=cal_db_interface, \n",
+    "                                                        creation_time=creation_time,\n",
+    "                                                        timeout=cal_db_timeout, print_once=False)\n",
     "    if noiseMap is None:\n",
-    "        noiseMap = np.zeros(list(noise.data.shape)+[3], np.float32)\n",
-    "    noiseMap[...,i] = noise.data\n",
-    "\n",
-    "\n",
-    "    ## bad pixels \n",
-    "\n",
-    "    metadata = ConstantMetaData()\n",
-    "    bpix = Constants.CCD(DetectorTypes.fastCCD).BadPixelsDark()\n",
-    "    metadata.calibration_constant = bpix\n",
-    "\n",
-    "    # set the operating condition\n",
-    "    condition = Conditions.Dark.CCD(bias_voltage=bias_voltage,\n",
-    "                                    integration_time=integration_time,\n",
-    "                                    gain_setting=g,\n",
-    "                                    temperature=temperature_k,\n",
-    "                                    pixels_x=1934,\n",
-    "                                    pixels_y=960)\n",
-    "\n",
-    "    for parm in condition.parameters:\n",
-    "        if parm.name == \"Sensor Temperature\":\n",
-    "            parm.lower_deviation = temp_limits\n",
-    "            parm.upper_deviation = temp_limits\n",
-    "\n",
-    "\n",
-    "    device = Detectors.fastCCD1\n",
-    "\n",
-    "\n",
-    "    metadata.detector_condition = condition\n",
-    "\n",
-    "    # specify the version for this constant\n",
-    "    if creation_time is None:\n",
-    "        metadata.calibration_constant_version = Versions.Now(device=device)\n",
-    "        metadata.retrieve(cal_db_interface)\n",
+    "        noiseMap = np.zeros(list(noise.shape)+[3], np.float32)\n",
+    "    if noise is not None:\n",
+    "        noiseMap[...,i] = noise\n",
     "    else:\n",
-    "        metadata.calibration_constant_version = Versions.Timespan(device=device,\n",
-    "                                                                  start=creation_time)\n",
-    "        metadata.retrieve(cal_db_interface, when=creation_time.isoformat(), timeout=3000000)\n",
-    "\n",
+    "        print(\"NO NOISE FOUND IN DB!\")\n",
+    "\n",
+    "    print(\"Noise at {} was taken at {}\"\n",
+    "          .format(g_name[g], noise_time))\n",
+    "\n",
+    "    ## retrieve_bad pixels\n",
+    "    bpix, bpix_time = get_constant_from_db_and_time(device=Detectors.fastCCD1,\n",
+    "                                                        constant=bpix,\n",
+    "                                                        condition=condition,\n",
+    "                                                        empty_constant=None,\n",
+    "                                                        cal_db_interface=cal_db_interface, \n",
+    "                                                        creation_time=creation_time,\n",
+    "                                                        timeout=cal_db_timeout, print_once=False)\n",
     "    if badPixelMap is None:\n",
-    "        badPixelMap = np.zeros(list(bpix.data.shape)+[3], np.uint32)\n",
-    "    badPixelMap[...,i] = bpix.data\n",
-    "        \n"
+    "        badPixelMap = np.zeros(list(bpix.shape)+[3], np.uint32)\n",
+    "        print(\"NO BadPixel FOUND IN DB!\")\n",
+    "    if bpix is not None:\n",
+    "        badPixelMap[...,i] = noise\n",
+    "    else:\n",
+    "        print(\"NO BADPIXEL FOUND IN DB!\")\n",
+    "\n",
+    "    print(\"BadPixes at {} was taken at {}\"\n",
+    "          .format(g_name[g], bpix_time))"
    ]
   },
   {
@@ -447,106 +444,116 @@
     "ExecuteTime": {
      "end_time": "2018-12-06T15:54:28.343869Z",
      "start_time": "2018-12-06T15:54:28.271344Z"
-    },
-    "collapsed": true
+    }
    },
    "outputs": [],
    "source": [
-    "## relative gain\n",
-    "\n",
-    "metadata = ConstantMetaData()\n",
-    "relgain = Constants.CCD(DetectorTypes.fastCCD).RelativeGain()\n",
-    "metadata.calibration_constant = relgain\n",
+    "# relative gain\n",
+    "if corr_bools.get('relgain'):\n",
     "\n",
-    "# set the operating condition\n",
-    "condition = Conditions.Illuminated.CCD(bias_voltage=bias_voltage,\n",
-    "                                       integration_time=integration_time,\n",
-    "                                       gain_setting=0,\n",
-    "                                       temperature=temperature_k,\n",
-    "                                       pixels_x=1934,\n",
-    "                                       pixels_y=960, photon_energy=photon_energy_gain_map)\n",
-    "device = Detectors.fastCCD1\n",
+    "    relgain = Constants.CCD(DetectorTypes.fastCCD).RelativeGain()\n",
     "\n",
-    "\n",
-    "metadata.detector_condition = condition\n",
-    "\n",
-    "# specify the a version for this constant\n",
-    "metadata.calibration_constant_version = Versions.Now(device=device)\n",
-    "metadata.retrieve(cal_db_interface)\n",
-    "\n",
-    "relGain = relgain.data[::-1,...]\n"
+    "    # set the operating condition\n",
+    "    condition = Conditions.Illuminated.CCD(bias_voltage=bias_voltage,\n",
+    "                                           integration_time=integration_time,\n",
+    "                                           gain_setting=det_gain,\n",
+    "                                           temperature=temperature_k,\n",
+    "                                           pixels_x=x, pixels_y=y,\n",
+    "                                           photon_energy=photon_energy_gain_map)\n",
+    "\n",
+    "    relgain, relgain_time = get_constant_from_db_and_time(device=Detectors.fastCCD1,\n",
+    "                                                          constant=relgain,\n",
+    "                                                          condition=condition,\n",
+    "                                                          empty_constant=None,\n",
+    "                                                          cal_db_interface=cal_db_interface,\n",
+    "                                                          creation_time=creation_time,\n",
+    "                                                          timeout=cal_db_timeout, print_once=False)\n",
+    "\n",
+    "    # TODO: CHECK IF THIS FLIPPING IS CORRECT\n",
+    "    if relgain is None:\n",
+    "        corr_bools[\"relgain\"] = False\n",
+    "        print('Relative Gain was not found. Proceed without Relative Gain correction.')\n",
+    "    else:\n",
+    "        relGain = relgain[::-1, ...]\n",
+    "        print('Relative Gain was taken with creation-date:', relgain_time)"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
-    "relGainCA = copy.copy(relGain)\n",
-    "relGainC = relGainCA[:relGainCA.shape[0]//2,...]\n",
-    "ctiA = np.ones(relGainCA.shape[:2])\n",
-    "cti = np.ones(relGainC.shape[:2])\n",
-    "i = 0\n",
-    "idx = (relGainC[i, :, 0] < 0.9) | (relGainC[i,:,0] > 1.1)\n",
-    "mn1 = np.nanmean(relGainC[i, ~idx, 0])\n",
-    "\n",
-    "for i in range(1, relGainC.shape[0]):\n",
-    "    idx = (relGainC[i, :, 0] < 0.9) | (relGainC[i,:,0] > 1.1)\n",
-    "    mn2 = np.nanmean(relGainC[i, ~idx, 0])\n",
-    "    cti[i,:] = mn2/mn1\n",
-    "ctiA[:relGainCA.shape[0]//2,...] = cti\n",
-    "\n",
-    "relGainC = relGainCA[relGainCA.shape[0]//2:,...]\n",
-    "\n",
-    "\n",
-    "cti = np.ones(relGainC.shape[:2])\n",
-    "i = -1\n",
-    "idx = (relGainC[i, :, 0] < 0.9) | (relGainC[i,:,0] > 1.1)\n",
-    "mn1 = np.nanmean(relGainC[i, ~idx, 0])\n",
-    "\n",
-    "for i in range(relGainC.shape[0]-1, 1, -1):\n",
-    "    idx = (relGainC[i, :, 0] < 0.9) | (relGainC[i,:,0] > 1.1)\n",
-    "    mn2 = np.nanmean(relGainC[i, ~idx, 0])\n",
-    "    cti[i,:] = mn2/mn1\n",
-    "\n",
-    "ctiA[relGainCA.shape[0]//2:,...] = cti\n",
-    "\n",
-    "relGainCA = copy.copy(relGain)\n",
-    "relGainC = relGainCA[:relGainCA.shape[0]//2,...]\n",
-    "for i in range(relGainC.shape[1]):\n",
-    "    idx = (relGainC[:,i, 0] < 0.95) | (relGainC[:,i,0] > 1.05)\n",
-    "    relGainC[idx,i,0] = np.nanmean(relGainC[~idx,i,0])\n",
-    "    relGainC[idx,i,1] = np.nanmean(relGainC[~idx,i,1])\n",
-    "    relGainC[idx,i,2] = np.nanmean(relGainC[~idx,i,2])\n",
-    "relGainCA[:relGainCA.shape[0]//2,...] = relGainC\n",
-    "relGainC = relGainCA[relGainCA.shape[0]//2:,...]\n",
-    "for i in range(relGainC.shape[1]):\n",
-    "    idx = (relGainC[:,i, 0] < 0.95) | (relGainC[:,i,0] > 1.05)\n",
-    "    relGainC[idx,i,0] = np.nanmean(relGainC[~idx,i,0])\n",
-    "    relGainC[idx,i,1] = np.nanmean(relGainC[~idx,i,1])\n",
-    "    relGainC[idx,i,2] = np.nanmean(relGainC[~idx,i,2])\n",
-    "relGainCA[relGainCA.shape[0]//2:,...] = relGainC\n",
-    "relGainC = relGainCA*ctiA[...,None]\n",
-    "\n",
-    "relGain = relGainC"
+    "if corr_bools.get('cti'):\n",
+    "    pass\n",
+    "    ## FASTCCD CTI CURRENTLY IS NOT SUPPORTED BY DET!\n",
+    "    \n",
+    "    # The code is left for tracing past algorithm for later\n",
+    "    # use during the development of the new CTI algorithm.\n",
+    "    # TODO: Proper CTI Retrieval from CTI and the correction\n",
+    "    # in following cells\n",
+    "    \n",
+    "    \n",
+    "#     relGainCA = copy.copy(relGain)\n",
+    "#     relGainC = relGainCA[:relGainCA.shape[0]//2,...]\n",
+    "#     ctiA = np.ones(relGainCA.shape[:2])\n",
+    "#     cti = np.ones(relGainC.shape[:2])\n",
+    "#     i = 0\n",
+    "#     idx = (relGainC[i, :, 0] < 0.9) | (relGainC[i,:,0] > 1.1)\n",
+    "#     mn1 = np.nanmean(relGainC[i, ~idx, 0])\n",
+    "\n",
+    "#     for i in range(1, relGainC.shape[0]):\n",
+    "#         idx = (relGainC[i, :, 0] < 0.9) | (relGainC[i,:,0] > 1.1)\n",
+    "#         mn2 = np.nanmean(relGainC[i, ~idx, 0])\n",
+    "#         cti[i,:] = mn2/mn1\n",
+    "#     ctiA[:relGainCA.shape[0]//2,...] = cti\n",
+    "\n",
+    "#     relGainC = relGainCA[relGainCA.shape[0]//2:,...]\n",
+    "\n",
+    "\n",
+    "#     cti = np.ones(relGainC.shape[:2])\n",
+    "#     i = -1\n",
+    "#     idx = (relGainC[i, :, 0] < 0.9) | (relGainC[i,:,0] > 1.1)\n",
+    "#     mn1 = np.nanmean(relGainC[i, ~idx, 0])\n",
+    "\n",
+    "#     for i in range(relGainC.shape[0]-1, 1, -1):\n",
+    "#         idx = (relGainC[i, :, 0] < 0.9) | (relGainC[i,:,0] > 1.1)\n",
+    "#         mn2 = np.nanmean(relGainC[i, ~idx, 0])\n",
+    "#         cti[i,:] = mn2/mn1\n",
+    "\n",
+    "#     ctiA[relGainCA.shape[0]//2:,...] = cti\n",
+    "\n",
+    "#     relGainCA = copy.copy(relGain)\n",
+    "#     relGainC = relGainCA[:relGainCA.shape[0]//2,...]\n",
+    "#     for i in range(relGainC.shape[1]):\n",
+    "#         idx = (relGainC[:,i, 0] < 0.95) | (relGainC[:,i,0] > 1.05)\n",
+    "#         relGainC[idx,i,0] = np.nanmean(relGainC[~idx,i,0])\n",
+    "#         relGainC[idx,i,1] = np.nanmean(relGainC[~idx,i,1])\n",
+    "#         relGainC[idx,i,2] = np.nanmean(relGainC[~idx,i,2])\n",
+    "#     relGainCA[:relGainCA.shape[0]//2,...] = relGainC\n",
+    "#     relGainC = relGainCA[relGainCA.shape[0]//2:,...]\n",
+    "#     for i in range(relGainC.shape[1]):\n",
+    "#         idx = (relGainC[:,i, 0] < 0.95) | (relGainC[:,i,0] > 1.05)\n",
+    "#         relGainC[idx,i,0] = np.nanmean(relGainC[~idx,i,0])\n",
+    "#         relGainC[idx,i,1] = np.nanmean(relGainC[~idx,i,1])\n",
+    "#         relGainC[idx,i,2] = np.nanmean(relGainC[~idx,i,2])\n",
+    "#     relGainCA[relGainCA.shape[0]//2:,...] = relGainC\n",
+    "#     relGainC = relGainCA*ctiA[...,None]\n",
+    "\n",
+    "#     relGain = relGainC"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "import dateutil.parser\n",
     "flipped_between = [dateutil.parser.parse(d) for d in flipped_between]\n",
-    "flip_rgain = creation_time >= flipped_between[0] and creation_time <= flipped_between[1]\n",
-    "flip_rgain &= (metadata.calibration_constant_version.begin_at.replace(tzinfo=None) >= flipped_between[0] \n",
-    "               and metadata.calibration_constant_version.begin_at.replace(tzinfo=None) <= flipped_between[1])\n",
+    "flip_rgain = creation_time.replace(tzinfo=None) >= flipped_between[0] and creation_time.replace(tzinfo=None) <= flipped_between[1]\n",
+    "flip_rgain &= (relgain_time.replace(tzinfo=None) >= flipped_between[0] \n",
+    "               and relgain_time.replace(tzinfo=None) <= flipped_between[1])\n",
     "print(\"Accounting for flipped detector: {}\".format(flip_rgain))\n"
    ]
   },
@@ -557,21 +564,20 @@
     "ExecuteTime": {
      "end_time": "2018-12-06T15:54:28.771629Z",
      "start_time": "2018-12-06T15:54:28.346051Z"
-    },
-    "collapsed": true
+    }
    },
    "outputs": [],
    "source": [
     "#************************Calculators************************#\n",
     "\n",
-    "\n",
-    "cmCorrection = xcal.CommonModeCorrection([x, y], \n",
-    "                                         commonModeBlockSize, \n",
-    "                                         commonModeAxisR,\n",
-    "                                         nCells = memoryCells, \n",
-    "                                         noiseMap = noiseMap,\n",
-    "                                        runParallel=True,\n",
-    "                                        stats=True)\n",
+    "if corr_bools.get('common_mode'):\n",
+    "    cmCorrection = xcal.CommonModeCorrection([x, y],\n",
+    "                                             commonModeBlockSize, \n",
+    "                                             commonModeAxis,\n",
+    "                                             nCells = memoryCells,\n",
+    "                                             stride=10,\n",
+    "                                             runParallel=True,\n",
+    "                                             stats=True, minFrac=0)\n",
     "\n",
     "patternClassifierLH = xcal.PatternClassifier([x//2, y], \n",
     "                                              noiseMap[:x//2, :], \n",
@@ -611,33 +617,39 @@
     "ExecuteTime": {
      "end_time": "2018-12-06T16:08:51.886343Z",
      "start_time": "2018-12-06T16:08:51.842837Z"
-    },
-    "collapsed": true
+    }
    },
    "outputs": [],
    "source": [
     "#*****************Histogram Calculators******************#\n",
     "\n",
     "histCalOffsetCor = xcal.HistogramCalculator([x, y], \n",
-    "                                             bins=500, \n",
+    "                                             bins=1050, \n",
     "                                             range=[-50, 1000],\n",
     "                                             nCells=memoryCells, \n",
     "                                             cores=cpuCores,\n",
     "                                             blockSize=blockSize)\n",
     "\n",
     "histCalPcorr = xcal.HistogramCalculator([x, y], \n",
-    "                                         bins=500, \n",
+    "                                         bins=1050, \n",
     "                                         range=[-50, 1000],\n",
     "                                         nCells=memoryCells, \n",
     "                                         cores=cpuCores,\n",
     "                                         blockSize=blockSize)\n",
     "\n",
     "histCalPcorrS = xcal.HistogramCalculator([x, y], \n",
-    "                                         bins=500, \n",
+    "                                         bins=1050, \n",
     "                                         range=[-50, 1000],\n",
     "                                         nCells=memoryCells, \n",
     "                                         cores=cpuCores,\n",
-    "                                         blockSize=blockSize)"
+    "                                         blockSize=blockSize)\n",
+    "\n",
+    "histCalCommonModeCor = xcal.HistogramCalculator([x, y], \n",
+    "                                             bins=1050, \n",
+    "                                             range=[-50, 1000],\n",
+    "                                             nCells=memoryCells, \n",
+    "                                             cores=cpuCores,\n",
+    "                                             blockSize=blockSize)"
    ]
   },
   {
@@ -654,8 +666,7 @@
     "ExecuteTime": {
      "end_time": "2018-12-06T16:08:52.441784Z",
      "start_time": "2018-12-06T16:08:52.437284Z"
-    },
-    "collapsed": true
+    }
    },
    "outputs": [],
    "source": [
@@ -672,13 +683,13 @@
     "ExecuteTime": {
      "end_time": "2018-12-06T16:08:53.042555Z",
      "start_time": "2018-12-06T16:08:53.034522Z"
-    },
-    "collapsed": true
+    }
    },
    "outputs": [],
    "source": [
     "histCalOffsetCor.debug()\n",
-    "histCalPcorr.debug()\n"
+    "histCalCommonModeCor.debug()\n",
+    "histCalPcorr.debug()"
    ]
   },
   {
@@ -688,12 +699,18 @@
     "ExecuteTime": {
      "end_time": "2018-12-06T16:08:53.551111Z",
      "start_time": "2018-12-06T16:08:53.531064Z"
-    },
-    "collapsed": true
+    }
    },
    "outputs": [],
    "source": [
     "def copy_and_sanitize_non_cal_data(infile, outfile, h5base):\n",
+    "    \"\"\"\n",
+    "    \n",
+    "    \n",
+    "    :param infile: Input file \n",
+    "    :param outfile: Otput file\n",
+    "    :param h5base: \n",
+    "    \"\"\"\n",
     "    \n",
     "    if h5base.startswith(\"/\"):\n",
     "        h5base = h5base[1:]\n",
@@ -702,6 +719,12 @@
     "                for do in dont_copy]\n",
     "\n",
     "    def visitor(k, item):\n",
+    "        \"\"\"\n",
+    "    \n",
+    "    \n",
+    "        :param k:\n",
+    "        :param item:\n",
+    "        \"\"\"\n",
     "        if k not in dont_copy:\n",
     "            if isinstance(item, h5py.Group):\n",
     "                outfile.create_group(k)\n",
@@ -720,8 +743,7 @@
     "ExecuteTime": {
      "end_time": "2018-12-06T16:10:55.917179Z",
      "start_time": "2018-12-06T16:09:01.603633Z"
-    },
-    "collapsed": true
+    }
    },
    "outputs": [],
    "source": [
@@ -734,7 +756,13 @@
     "offsetMap = np.squeeze(offsetMap)\n",
     "noiseMap = np.squeeze(noiseMap)\n",
     "badPixelMap = np.squeeze(badPixelMap)\n",
-    "relGain = np.squeeze(relGain)\n",
+    "if corr_bools.get('relgain'):\n",
+    "    #TODO: This should be removed after properly injecting gain const for all 3 gains\n",
+    "    if len(relGain.shape) == 3 and relGain.shape[2] == 1:\n",
+    "        # This is a temporary solution for the relGain of one gain in db\n",
+    "        relGain = np.repeat(relGain, 3, axis=2)\n",
+    "    relGain = np.squeeze(relGain)\n",
+    "\n",
     "for k, f in enumerate(file_list):\n",
     "    with h5py.File(f, 'r', driver='core') as infile:\n",
     "        out_fileb = \"{}/{}\".format(out_folder, f.split(\"/\")[-1])\n",
@@ -769,39 +797,59 @@
     "                                             chunks=(chunk_size_idim, oshape[1], oshape[2]),\n",
     "                                             dtype=np.uint8, compression=\"gzip\")\n",
     "                \n",
+    "                # Getting the 14th and 15th bit from the data, \n",
+    "                # which contains gain before removing them from the data.\n",
     "                gain = np.right_shift(data, 14)\n",
     "                \n",
+    "                # gains are stored as 00 for High gain, \n",
+    "                # 01 for Medium gain and 11 for low gain.\n",
+    "                # Hence, the subtraction from the \n",
+    "                # gain's int values to have 0, 1 and 2\n",
     "                gain[gain != 0] -= 1\n",
     "                \n",
     "                fstride = 1\n",
     "                if not flip_rgain:  # rgain was taken during flipped orientation\n",
     "                    fstride = -1\n",
     "                \n",
-    "                data = np.bitwise_and(data, 0b0011111111111111).astype(np.float32)                \n",
+    "                data = np.bitwise_and(data, 0b0011111111111111).astype(np.float32)\n",
+    "\n",
+    "                # creating maps for correction usage\n",
     "                omap = np.repeat(offsetMap[...,None,:], data.shape[2], axis=2)\n",
-    "                rmap = np.repeat(relGain[:,::fstride,None,:], data.shape[2], axis=2)\n",
     "                nmap = np.repeat(noiseMap[...,None,:], data.shape[2], axis=2)\n",
     "                bmap = np.repeat(badPixelMap[...,None,:], data.shape[2], axis=2)\n",
+    "\n",
+    "                # selecting element value related to the gain of the pixel.\n",
     "                offset = np.choose(gain, (omap[...,0], omap[...,1], omap[...,2]))\n",
-    "                rg = np.choose(gain, (rmap[...,0], rmap[...,1], rmap[...,2]))\n",
     "                noise = np.choose(gain, (nmap[...,0], nmap[...,1], nmap[...,2]))\n",
     "                bpix = np.choose(gain, (bmap[...,0], bmap[...,1], bmap[...,2]))\n",
-    "                \n",
+    "\n",
+    "                # same for relative gain if correction is available\n",
+    "                if corr_bools.get('relgain'):\n",
+    "                    rmap = np.repeat(relGain[:,::fstride,None,:], data.shape[2], axis=2)\n",
+    "                    rg = np.choose(gain, (rmap[...,0], rmap[...,1], rmap[...,2]))\n",
+    "\n",
+    "                # Apply offset correction\n",
     "                data -= offset\n",
-    "                data *= rg\n",
+    "\n",
+    "                if corr_bools.get('relgain'):\n",
+    "                    # Apply relative gain correction\n",
+    "                    # TODO: check relgain correction in pydetlib\n",
+    "                    # and use it here if the same.\n",
+    "                    data *= rg\n",
     "                \n",
-    "                if correct_offset_drift:\n",
+    "                if corr_bools.get(\"offset_drift\"):\n",
+    "                    # Put in consideration the temperature \n",
+    "                    # change of the FastCCD's hole.\n",
     "                    lhd = np.mean(data[x//2-10:x//2,y//2-5:y//2+5,:], axis=(0,1))\n",
     "                    data[:x//2, :, :] -= lhd\n",
     "                    drift_lh.append(lhd)\n",
     "                    \n",
     "                    uhd = np.mean(data[x//2:x//2+10,y//2-5:y//2+5,:], axis=(0,1))                    \n",
     "                    data[x//2:, :, :] -= uhd\n",
-    "                    drift_uh.append(lhd)\n",
+    "                    drift_uh.append(uhd)\n",
     "                \n",
     "                histCalOffsetCor.fill(data)\n",
     "\n",
-    "               \n",
     "                ddset[...] = np.moveaxis(data, 2, 0)\n",
     "                ddsetm[...] = np.moveaxis(bpix, 2, 0)\n",
     "                ddsetg[...] = np.moveaxis(gain, 2, 0).astype(np.uint8)\n",
@@ -810,7 +858,7 @@
     "                        mean_im = np.nanmean(data, axis=2)\n",
     "                        single_im = data[...,0]\n",
     "                        \n",
-    "                if do_pattern_classification:\n",
+    "                if corr_bools.get(\"pattern_class\"):\n",
     "                \n",
     "                    ddsetcm = ofile.create_dataset(h5path+\"/pixels_cm\",\n",
     "                                         oshape,\n",
@@ -827,12 +875,21 @@
     "                                                 chunks=(chunk_size_idim, oshape[1], oshape[2]),\n",
     "                                                 dtype=np.int32, compression=\"gzip\")\n",
     "                    \n",
-    "\n",
+    "                    # The calculation of the cluster map\n",
     "                    patternClassifierLH._noisemap = noise[:x//2, :, :]\n",
     "                    patternClassifierUH._noisemap = noise[x//2:, :, :]\n",
+    "                    \n",
+    "                    # common mode correction\n",
+    "                    if corr_bools.get(\"common_mode\"):\n",
+    "                        cellTable = np.zeros(data.shape[2], np.int32) # Common mode correction\n",
+    "                        data = cmCorrection.correct(data.astype(np.float32),\n",
+    "                                                    cellTable=cellTable,\n",
+    "                                                    noiseMap=noise) \n",
     "\n",
-    "                    data = cmCorrection.correct(data)  # correct for the row common mode\n",
-    "                    ddsetcm[...] = np.moveaxis(data, 2, 0)\n",
+    "                        # correct for the row common mode\n",
+    "                        ddsetcm[...] = np.moveaxis(data, 2, 0)\n",
+    "\n",
+    "                        histCalCommonModeCor.fill(data)\n",
     "\n",
     "                    dataLH = data[:x//2, :, :]\n",
     "                    dataUH = data[x//2:, :, :]\n",
@@ -870,12 +927,11 @@
     "ExecuteTime": {
      "end_time": "2018-12-06T16:10:56.094985Z",
      "start_time": "2018-12-06T16:10:55.918900Z"
-    },
-    "collapsed": true
+    }
    },
    "outputs": [],
    "source": [
-    "if correct_offset_drift:\n",
+    "if corr_bools.get(\"offset_drift\"):\n",
     "    lhds = np.concatenate(drift_lh)\n",
     "    uhds = np.concatenate(drift_uh)\n",
     "    fig = plt.figure(figsize=(10,5))\n",
@@ -893,12 +949,11 @@
     "ExecuteTime": {
      "end_time": "2018-12-06T16:10:56.126409Z",
      "start_time": "2018-12-06T16:10:56.096242Z"
-    },
-    "collapsed": true
+    }
    },
    "outputs": [],
    "source": [
-    "if do_pattern_classification:\n",
+    "if corr_bools.get(\"pattern_class\"):\n",
     "    print(\"******************LOWER HEMISPHERE******************\\n\")\n",
     "\n",
     "    patternStatsLH = patternClassifierLH.getPatternStats()\n",
@@ -950,12 +1005,13 @@
     "ExecuteTime": {
      "end_time": "2018-12-06T16:10:56.176160Z",
      "start_time": "2018-12-06T16:10:56.127853Z"
-    },
-    "collapsed": true
+    }
    },
    "outputs": [],
    "source": [
-    "if do_pattern_classification:\n",
+    "if corr_bools.get(\"pattern_class\"):\n",
+    "    print(\"******************UPPER HEMISPHERE******************\\n\")\n",
+    "\n",
     "    patternStatsUH = patternClassifierUH.getPatternStats()\n",
     "    fig = plt.figure(figsize=(15,15))\n",
     "    ax = fig.add_subplot(4,4,1)\n",
@@ -995,9 +1051,7 @@
     "            pmap = ax.imshow(patternStatsUH[m][j], interpolation=\"nearest\", vmax=np.median(patternStatsUH[m][j]))\n",
     "            ax.set_title(m+\"(\"+str(j)+\")\")\n",
     "            cb = fig.colorbar(pmap)\n",
-    "            k+=1\n",
-    "\n",
-    "    print(\"******************UPPER HEMISPHERE******************\\n\")        "
+    "            k+=1\n"
    ]
   },
   {
@@ -1007,12 +1061,11 @@
     "ExecuteTime": {
      "end_time": "2018-12-06T16:10:56.190150Z",
      "start_time": "2018-12-06T16:10:56.177570Z"
-    },
-    "collapsed": true
+    }
    },
    "outputs": [],
    "source": [
-    "if do_pattern_classification:\n",
+    "if corr_bools.get(\"pattern_class\"):\n",
     "    t0 = PrettyTable()\n",
     "    t0.title = \"Total number of Counts after all corrections\"\n",
     "    t0.field_names = [\"Hemisphere\",\"Singles\", \"First-singles\", \"Clusters\"]\n",
@@ -1040,12 +1093,11 @@
     "ExecuteTime": {
      "end_time": "2018-12-06T16:10:56.203219Z",
      "start_time": "2018-12-06T16:10:56.191509Z"
-    },
-    "collapsed": true
+    }
    },
    "outputs": [],
    "source": [
-    "if do_pattern_classification:\n",
+    "if corr_bools.get(\"pattern_class\"):\n",
     "    doublesLH = patternStatsLH['doubles'][0] + patternStatsLH['doubles'][1] + patternStatsLH['doubles'][2] + patternStatsLH['doubles'][3]\n",
     "    triplesLH = patternStatsLH['triples'][0] + patternStatsLH['triples'][1] + patternStatsLH['triples'][2] + patternStatsLH['triples'][3]\n",
     "    quadsLH = patternStatsLH['quads'][0] + patternStatsLH['quads'][1] + patternStatsLH['quads'][2] + patternStatsLH['quads'][3]\n",
@@ -1069,12 +1121,11 @@
     "ExecuteTime": {
      "end_time": "2018-12-06T16:10:56.212586Z",
      "start_time": "2018-12-06T16:10:56.204731Z"
-    },
-    "collapsed": true
+    }
    },
    "outputs": [],
    "source": [
-    "if do_pattern_classification:\n",
+    "if corr_bools.get(\"pattern_class\"):\n",
     "    fig = plt.figure(figsize=(10,5))\n",
     "    ax = fig.add_subplot(1,2,1)\n",
     "    labels = ['singles', 'doubles', 'triples', 'quads']\n",
@@ -1096,8 +1147,7 @@
     "ExecuteTime": {
      "end_time": "2018-12-06T16:13:12.889583Z",
      "start_time": "2018-12-06T16:13:11.122653Z"
-    },
-    "collapsed": true
+    }
    },
    "outputs": [],
    "source": [
@@ -1114,7 +1164,17 @@
     "     },\n",
     "    \n",
     "     ]\n",
-    "     \n",
+    "\n",
+    "if corr_bools.get(\"pattern_class\") and corr_bools.get(\"common_mode\"):\n",
+    "    hcm,ecm,ccm,scm = histCalCommonModeCor.get()\n",
+    "    d.append({'x': ccm,\n",
+    "              'y': hcm,\n",
+    "              'y_err': np.sqrt(hcm[:]),\n",
+    "              'drawstyle': 'steps-mid',\n",
+    "              'errorstyle': 'bars',\n",
+    "              'errorcoarsing': 2,\n",
+    "              'label': 'CommonMode corr.'\n",
+    "               })\n",
     "\n",
     "fig = xana.simplePlot(d, aspect=1, x_label='Energy(ADU)', \n",
     "                      y_label='Number of occurrences', figsize='2col',\n",
@@ -1129,12 +1189,11 @@
     "ExecuteTime": {
      "end_time": "2018-12-06T16:12:57.289742Z",
      "start_time": "2018-12-06T16:12:45.529734Z"
-    },
-    "collapsed": true
+    }
    },
    "outputs": [],
    "source": [
-    "if do_pattern_classification:\n",
+    "if corr_bools.get(\"pattern_class\"):\n",
     "    h1,e1L,c1L,s1L = histCalPcorr.get()\n",
     "    h1s,e1Ls,c1Ls,s1Ls = histCalPcorrS.get()\n",
     "\n",
@@ -1173,8 +1232,7 @@
     "ExecuteTime": {
      "end_time": "2018-12-06T16:11:08.317130Z",
      "start_time": "2018-12-06T16:11:05.788655Z"
-    },
-    "collapsed": true
+    }
    },
    "outputs": [],
    "source": [
@@ -1184,7 +1242,7 @@
     "                       x_range=(0,y),\n",
     "                       y_range=(0,x), vmin=-50, vmax=500)\n",
     "\n",
-    "if do_pattern_classification:\n",
+    "if corr_bools.get(\"pattern_class\"):\n",
     "    fig = xana.heatmapPlot(mean_im_cc,\n",
     "                       x_label='Columns', y_label='Rows',\n",
     "                       lut_label='Signal (ADU)',\n",
@@ -1208,8 +1266,7 @@
     "ExecuteTime": {
      "end_time": "2018-12-06T16:11:10.908912Z",
      "start_time": "2018-12-06T16:11:08.318486Z"
-    },
-    "collapsed": true
+    }
    },
    "outputs": [],
    "source": [
@@ -1219,7 +1276,7 @@
     "                       x_range=(0,y),\n",
     "                       y_range=(0,x), vmin=-50, vmax=500)\n",
     "\n",
-    "if do_pattern_classification:\n",
+    "if corr_bools.get(\"pattern_class\"):\n",
     "    fig = xana.heatmapPlot(single_im_cc,\n",
     "                       x_label='Columns', y_label='Rows',\n",
     "                       lut_label='Signal (ADU)',\n",
@@ -1230,9 +1287,21 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
    "outputs": [],
    "source": []
   }