diff --git a/notebooks/ePix100/Correction_ePix100_NBC.ipynb b/notebooks/ePix100/Correction_ePix100_NBC.ipynb
index 878edb18e2346902e08fe6d2d56be7176afff197..0c46331fffe0b4c4106fa2c0e5ccd85644711a05 100644
--- a/notebooks/ePix100/Correction_ePix100_NBC.ipynb
+++ b/notebooks/ePix100/Correction_ePix100_NBC.ipynb
@@ -4,9 +4,9 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "# ePIX Data Correction ##\n",
+    "# ePIX Data Correction\n",
     "\n",
-    "Authors: Q. Tian S. Hauf, Version 1.0\n",
+    "Authors: Q. Tian S. Hauf M. Cascella, Version 1.0\n",
     "\n",
     "The following notebook provides Offset correction of images acquired with the ePix100 detector."
    ]
@@ -49,6 +49,9 @@
     "photon_energy = 8.0  # Photon energy to calibrate in number of photons, 0 for calibration in keV\n",
     "\n",
     "relative_gain = False  # Apply relative gain correction.\n",
+    "common_mode = True  # Apply common mode correction.\n",
+    "cm_min_frac = 0.25 # No CM correction is performed if after masking the ratio of good pixels falls below this \n",
+    "cm_noise_sigma = 5. # CM correction noise standard deviation\n",
     "\n",
     "split_evt_primary_threshold = 7.  # primary threshold for split event correction\n",
     "split_evt_secondary_threshold = 5.  # secondary threshold for split event correction\n",
@@ -105,8 +108,8 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "# TODO: expose to first cell after fixing common mode correction.\n",
-    "common_mode = False  # Apply common mode correction.\n",
+    "# TODO: expose to first cell after fixing clustering.\n",
+    "pattern_classification = False  # do clustering.\n",
     "\n",
     "h5path = h5path.format(karabo_id, receiver_id)\n",
     "h5path_t = h5path_t.format(karabo_id, receiver_id)\n",
@@ -159,9 +162,11 @@
     "run_parallel = True\n",
     "\n",
     "# Read slow data from the first available sequence file.\n",
-    "filename = ped_dir / fp_name.format(sequences[0] if sequences[0] != -1 else 0)\n",
+    "filename = ped_dir / fp_name.format(\n",
+    "    sequences[0] if sequences[0] != -1 else 0)\n",
     "with h5py.File(filename, 'r') as f:\n",
-    "    integration_time = int(f[f\"{h5path_cntrl}/CONTROL/expTime/value\"][0])\n",
+    "    integration_time = int(\n",
+    "        f[f\"{h5path_cntrl}/CONTROL/expTime/value\"][0])\n",
     "    temperature = np.mean(f[h5path_t]) / 100.\n",
     "    temperature_k = temperature + 273.15\n",
     "    if fix_temperature != 0:\n",
@@ -194,7 +199,8 @@
     "# adapt seq_files list.\n",
     "if sequences != [-1]:\n",
     "    seq_files = [f for f in seq_files\n",
-    "                 if any(f.match(f\"*-S{s:05d}.h5\") for s in sequences)]\n",
+    "                 if any(\n",
+    "                     f.match(f\"*-S{s:05d}.h5\") for s in sequences)]\n",
     "\n",
     "print(f\"Processing a total of {len(seq_files)} sequence files\")"
    ]
@@ -291,7 +297,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "# ************************Calculators************************ #\n",
+    "# ************************Calculators******************** #\n",
     "offsetCorrection = xcal.OffsetCorrection(\n",
     "    sensorSize,\n",
     "    const_data[\"Offset\"],\n",
@@ -357,20 +363,44 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "# ************************Calculators************************ #\n",
+    "# ************************Calculators******************** #\n",
     "if common_mode:\n",
     "    commonModeBlockSize = [x//2, y//2]\n",
-    "    commonModeAxisR = 'row'\n",
-    "    cmCorrection = xcal.CommonModeCorrection(\n",
-    "        [x, y],\n",
-    "        commonModeBlockSize,\n",
-    "        commonModeAxisR,\n",
-    "        nCells=memoryCells,\n",
-    "        noiseMap=const_data[\"Noise\"],\n",
+    "    stats = True\n",
+    "\n",
+    "    cmCorrectionB = xcal.CommonModeCorrection(\n",
+    "        shape=sensorSize,\n",
+    "        blockSize=commonModeBlockSize, \n",
+    "        orientation='block',\n",
+    "        nCells=memoryCells, \n",
+    "        noiseMap=const_data['Noise'],\n",
     "        runParallel=run_parallel,\n",
-    "        stats=True,\n",
+    "        stats=stats,\n",
+    "        minFrac=cm_min_frac,\n",
+    "        noiseSigma=cm_noise_sigma,\n",
+    "    )\n",
+    "    cmCorrectionR = xcal.CommonModeCorrection(\n",
+    "        shape=sensorSize, \n",
+    "        blockSize=commonModeBlockSize, \n",
+    "        orientation='row',\n",
+    "        nCells=memoryCells, \n",
+    "        noiseMap=const_data['Noise'],\n",
+    "        runParallel=run_parallel,\n",
+    "        stats=stats,\n",
+    "        minFrac=cm_min_frac,\n",
+    "        noiseSigma=cm_noise_sigma,\n",
+    "    )\n",
+    "    cmCorrectionC = xcal.CommonModeCorrection(\n",
+    "        shape=sensorSize, \n",
+    "        blockSize=commonModeBlockSize, \n",
+    "        orientation='col',\n",
+    "        nCells=memoryCells, \n",
+    "        noiseMap=const_data['Noise'],\n",
+    "        runParallel=run_parallel,\n",
+    "        stats=stats,\n",
+    "        minFrac=cm_min_frac,\n",
+    "        noiseSigma=cm_noise_sigma,\n",
     "    )\n",
-    "\n",
     "    histCalCMCor = xcal.HistogramCalculator(\n",
     "        sensorSize,\n",
     "        bins=1050,\n",
@@ -379,31 +409,39 @@
     "        nCells=memoryCells,\n",
     "        cores=cpuCores,\n",
     "        blockSize=blockSize,\n",
+    "    )"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "if pattern_classification:\n",
+    "    patternClassifier = xcal.PatternClassifier(\n",
+    "        [x, y],\n",
+    "        const_data[\"Noise\"],\n",
+    "        split_evt_primary_threshold,\n",
+    "        split_evt_secondary_threshold,\n",
+    "        split_evt_mip_threshold,\n",
+    "        tagFirstSingles=0,\n",
+    "        nCells=memoryCells,\n",
+    "        cores=cpuCores,\n",
+    "        allowElongated=False,\n",
+    "        blockSize=[x, y],\n",
+    "        runParallel=run_parallel,\n",
     "    )\n",
     "\n",
-    "patternClassifier = xcal.PatternClassifier(\n",
-    "    [x, y],\n",
-    "    const_data[\"Noise\"],\n",
-    "    split_evt_primary_threshold,\n",
-    "    split_evt_secondary_threshold,\n",
-    "    split_evt_mip_threshold,\n",
-    "    tagFirstSingles=0,\n",
-    "    nCells=memoryCells,\n",
-    "    cores=cpuCores,\n",
-    "    allowElongated=False,\n",
-    "    blockSize=[x, y],\n",
-    "    runParallel=run_parallel,\n",
-    ")\n",
-    "\n",
-    "histCalSECor = xcal.HistogramCalculator(\n",
-    "    sensorSize,\n",
-    "    bins=1050,\n",
-    "    range=[-50, 1000],\n",
-    "    parallel=run_parallel,\n",
-    "    nCells=memoryCells,\n",
-    "    cores=cpuCores,\n",
-    "    blockSize=blockSize,\n",
-    ")"
+    "    histCalSECor = xcal.HistogramCalculator(\n",
+    "        sensorSize,\n",
+    "        bins=1050,\n",
+    "        range=[-50, 1000],\n",
+    "        parallel=run_parallel,\n",
+    "        nCells=memoryCells,\n",
+    "        cores=cpuCores,\n",
+    "        blockSize=blockSize,\n",
+    "    )"
    ]
   },
   {
@@ -413,10 +451,14 @@
    "outputs": [],
    "source": [
     "if common_mode:\n",
-    "    cmCorrection.debug()\n",
+    "    cmCorrectionB.debug()\n",
+    "    cmCorrectionR.debug()\n",
+    "    cmCorrectionC.debug()\n",
     "    histCalCMCor.debug()\n",
-    "patternClassifier.debug()\n",
-    "histCalSECor.debug()"
+    "\n",
+    "if pattern_classification:\n",
+    "    patternClassifier.debug()\n",
+    "    histCalSECor.debug()"
    ]
   },
   {
@@ -458,11 +500,12 @@
     "for f in seq_files:\n",
     "    data = None\n",
     "    out_file = out_folder / f.name.replace(\"RAW\", \"CORR\")\n",
-    "    with h5py.File(f, \"r\") as infile, h5py.File(out_file, \"w\") as ofile:\n",
+    "    with h5py.File(f, \"r\") as infile, h5py.File(out_file, \"w\") as ofile:  # noqa\n",
     "        try:\n",
     "            copy_and_sanitize_non_cal_data(infile, ofile, h5path)\n",
     "            data = infile[h5path+\"/pixels\"][()]\n",
-    "            data = np.compress(np.any(data > 0, axis=(1, 2)), data, axis=0)\n",
+    "            data = np.compress(\n",
+    "                np.any(data > 0, axis=(1, 2)), data, axis=0)\n",
     "            if limit_images > 0:\n",
     "                data = data[:limit_images, ...]\n",
     "\n",
@@ -477,6 +520,17 @@
     "            # Offset correction.\n",
     "            data = offsetCorrection.correct(data.astype(np.float32))\n",
     "\n",
+    "            # Common Mode correction.\n",
+    "            if common_mode:\n",
+    "                # Block CM\n",
+    "                data = cmCorrectionB.correct(data)\n",
+    "                # Row CM\n",
+    "                data = cmCorrectionR.correct(data)\n",
+    "                # COL CM\n",
+    "                data = cmCorrectionC.correct(data)\n",
+    "\n",
+    "                histCalCMCor.fill(data)\n",
+    "\n",
     "            # relative gain correction.\n",
     "            if relative_gain:\n",
     "                data = gainCorrection.correct(data.astype(np.float32))\n",
@@ -486,26 +540,22 @@
     "            histCalOffsetCor.fill(data)\n",
     "            ddset[...] = np.moveaxis(data, 2, 0)\n",
     "\n",
-    "            # Common mode correction\n",
-    "            # TODO: Fix conflict between common mode and relative gain.\n",
-    "\n",
-    "            \"\"\"The gain correction is currently applying an absolute correction\n",
-    "            (not a relative correction as the implied by the name);\n",
-    "            it changes the scale (the unit of measurement) of the data from ADU\n",
-    "            to either keV or n_of_photons. But the common mode correction\n",
-    "            relies on comparing data with the noise map, which is still in ADU.\n",
-    "\n",
-    "            The best solution is probably to do a relative gain correction first\n",
-    "            (correct) and apply the global absolute gain to the data at the end,\n",
-    "            after common mode and clustering.\n",
+    "            \"\"\"The gain correction is currently applying\n",
+    "            an absolute correction (not a relative correction\n",
+    "            as the implied by the name);\n",
+    "            it changes the scale (the unit of measurement)\n",
+    "            of the data from ADU to either keV or n_of_photons.\n",
+    "            But the pattern classification relies on comparing\n",
+    "            data with the noise map, which is still in ADU.\n",
+    "\n",
+    "            The best solution is to do a relative gain\n",
+    "            correction first and apply the global absolute\n",
+    "            gain to the data at the end, after clustering.\n",
     "            \"\"\"\n",
-    "            if common_mode:\n",
-    "                ddsetcm = ofile.create_dataset(\n",
-    "                    h5path+\"/pixels_cm\",\n",
-    "                    oshape,\n",
-    "                    chunks=(chunk_size_idim, oshape[1], oshape[2]),\n",
-    "                    dtype=np.float32)\n",
-    "\n",
+    "            \n",
+    "            # TODO: Fix conflict between pattern classification\n",
+    "            # and gain corr.\n",
+    "            if pattern_classification:\n",
     "                ddsetc = ofile.create_dataset(\n",
     "                    h5path+\"/pixels_classified\",\n",
     "                    oshape,\n",
@@ -518,10 +568,6 @@
     "                    chunks=(chunk_size_idim, oshape[1], oshape[2]),\n",
     "                    dtype=np.int32, compression=\"gzip\")\n",
     "\n",
-    "                # row common mode correction.\n",
-    "                data = cmCorrection.correct(data)\n",
-    "                histCalCMCor.fill(data)\n",
-    "                ddsetcm[...] = np.moveaxis(data, 2, 0)\n",
     "\n",
     "                data, patterns = patternClassifier.classify(data)\n",
     "\n",
@@ -565,6 +611,7 @@
     "        'label': 'CM corr.'\n",
     "    })\n",
     "\n",
+    "if pattern_classification:\n",
     "    ho, eo, co, so = histCalSECor.get()\n",
     "    d.append({\n",
     "        'x': co,\n",
@@ -576,7 +623,6 @@
     "        'label': 'Single split events'\n",
     "    })\n",
     "\n",
-    "\n",
     "fig = xana.simplePlot(\n",
     "    d, aspect=1, x_label=f'Energy({plot_unit})',\n",
     "    y_label='Number of occurrences', figsize='2col',\n",