diff --git a/notebooks/ePix100/Correction_ePix100_NBC.ipynb b/notebooks/ePix100/Correction_ePix100_NBC.ipynb index 878edb18e2346902e08fe6d2d56be7176afff197..34fdbf8aa78773bfd9fb5dd33bf1747411c532cd 100644 --- a/notebooks/ePix100/Correction_ePix100_NBC.ipynb +++ b/notebooks/ePix100/Correction_ePix100_NBC.ipynb @@ -6,7 +6,7 @@ "source": [ "# ePIX Data Correction ##\n", "\n", - "Authors: Q. Tian S. Hauf, Version 1.0\n", + "Authors: Q. Tian S. Hauf M. Cascella, Version 1.0\n", "\n", "The following notebook provides Offset correction of images acquired with the ePix100 detector." ] @@ -49,6 +49,9 @@ "photon_energy = 8.0 # Photon energy to calibrate in number of photons, 0 for calibration in keV\n", "\n", "relative_gain = False # Apply relative gain correction.\n", + "common_mode = True # Apply common mode correction.\n", + "cm_min_frac = 0.25 # No CM correction is performed if after masking the ratio of good pixels falls below this \n", + "cm_noise_sigma = 5. # CM correction noise standard deviation\n", "\n", "split_evt_primary_threshold = 7. # primary threshold for split event correction\n", "split_evt_secondary_threshold = 5. # secondary threshold for split event correction\n", @@ -105,8 +108,8 @@ "metadata": {}, "outputs": [], "source": [ - "# TODO: expose to first cell after fixing common mode correction.\n", - "common_mode = False # Apply common mode correction.\n", + "# TODO: expose to first cell after fixing clustering.\n", + "pattern_classification = False # do clustering.\n", "\n", "h5path = h5path.format(karabo_id, receiver_id)\n", "h5path_t = h5path_t.format(karabo_id, receiver_id)\n", @@ -360,18 +363,72 @@ "# ************************Calculators************************ #\n", "if common_mode:\n", " commonModeBlockSize = [x//2, y//2]\n", - " commonModeAxisR = 'row'\n", - " cmCorrection = xcal.CommonModeCorrection(\n", + "\n", + " cmCorrectionB = xcal.CommonModeCorrection(sensorSize, \n", + " commonModeBlockSize, \n", + " 'block',\n", + " nCells = memoryCells, \n", + " noiseMap = const_data['Noise'],\n", + " runParallel=run_parallel,\n", + " stats=True,\n", + " minFrac = cm_min_frac,\n", + " noiseSigma = cm_noise_sigma,\n", + " )\n", + " cmCorrectionR = xcal.CommonModeCorrection(sensorSize, \n", + " commonModeBlockSize, \n", + " 'row',\n", + " nCells = memoryCells, \n", + " noiseMap = const_data['Noise'],\n", + " runParallel=run_parallel,\n", + " stats=True\n", + " minFrac = cm_min_frac,\n", + " noiseSigma = cm_noise_sigma,\n", + " )\n", + " cmCorrectionC = xcal.CommonModeCorrection(sensorSize, \n", + " commonModeBlockSize, \n", + " 'col',\n", + " nCells = memoryCells, \n", + " noiseMap = const_data['Noise'],\n", + " runParallel=run_parallel,\n", + " stats=True\n", + " minFrac = cm_min_frac,\n", + " noiseSigma = cm_noise_sigma,\n", + " )\n", + "\n", + " histCalCMCor = xcal.HistogramCalculator(\n", + " sensorSize,\n", + " bins=1050,\n", + " range=[-50, 1000],\n", + " parallel=run_parallel,\n", + " nCells=memoryCells,\n", + " cores=cpuCores,\n", + " blockSize=blockSize,\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "if pattern_classification:\n", + " patternClassifier = xcal.PatternClassifier(\n", " [x, y],\n", - " commonModeBlockSize,\n", - " commonModeAxisR,\n", + " const_data[\"Noise\"],\n", + " split_evt_primary_threshold,\n", + " split_evt_secondary_threshold,\n", + " split_evt_mip_threshold,\n", + " tagFirstSingles=0,\n", " nCells=memoryCells,\n", - " noiseMap=const_data[\"Noise\"],\n", + " cores=cpuCores,\n", + " allowElongated=False,\n", + " blockSize=[x, y],\n", " runParallel=run_parallel,\n", - " stats=True,\n", " )\n", "\n", - " histCalCMCor = xcal.HistogramCalculator(\n", + " histCalSECor = xcal.HistogramCalculator(\n", " sensorSize,\n", " bins=1050,\n", " range=[-50, 1000],\n", @@ -379,31 +436,7 @@ " nCells=memoryCells,\n", " cores=cpuCores,\n", " blockSize=blockSize,\n", - " )\n", - "\n", - "patternClassifier = xcal.PatternClassifier(\n", - " [x, y],\n", - " const_data[\"Noise\"],\n", - " split_evt_primary_threshold,\n", - " split_evt_secondary_threshold,\n", - " split_evt_mip_threshold,\n", - " tagFirstSingles=0,\n", - " nCells=memoryCells,\n", - " cores=cpuCores,\n", - " allowElongated=False,\n", - " blockSize=[x, y],\n", - " runParallel=run_parallel,\n", - ")\n", - "\n", - "histCalSECor = xcal.HistogramCalculator(\n", - " sensorSize,\n", - " bins=1050,\n", - " range=[-50, 1000],\n", - " parallel=run_parallel,\n", - " nCells=memoryCells,\n", - " cores=cpuCores,\n", - " blockSize=blockSize,\n", - ")" + " )" ] }, { @@ -413,10 +446,14 @@ "outputs": [], "source": [ "if common_mode:\n", - " cmCorrection.debug()\n", + " cmCorrectionB.debug()\n", + " cmCorrectionR.debug()\n", + " cmCorrectionC.debug()\n", " histCalCMCor.debug()\n", - "patternClassifier.debug()\n", - "histCalSECor.debug()" + "\n", + "if pattern_classification:\n", + " patternClassifier.debug()\n", + " histCalSECor.debug()" ] }, { @@ -477,6 +514,21 @@ " # Offset correction.\n", " data = offsetCorrection.correct(data.astype(np.float32))\n", "\n", + " # Common Mode correction.\n", + " if common_mode:\n", + "# ddsetcm = ofile.create_dataset(\n", + "# h5path+\"/pixels_cm\",\n", + "# oshape,\n", + "# chunks=(chunk_size_idim, oshape[1], oshape[2]),\n", + "# dtype=np.float32)\n", + "\n", + " data = cmCorrectionB.correct(data.astype(np.float32)) #correct for the Block common mode\n", + " data = cmCorrectionR.correct(data.astype(np.float32)) #correct for the Row common mode\n", + " data = cmCorrectionC.correct(data.astype(np.float32)) #correct for the Col common mode\n", + "\n", + " histCalCMCor.fill(data)\n", + "# ddsetcm[...] = np.moveaxis(data, 2, 0)\n", + "\n", " # relative gain correction.\n", " if relative_gain:\n", " data = gainCorrection.correct(data.astype(np.float32))\n", @@ -492,20 +544,14 @@ " \"\"\"The gain correction is currently applying an absolute correction\n", " (not a relative correction as the implied by the name);\n", " it changes the scale (the unit of measurement) of the data from ADU\n", - " to either keV or n_of_photons. But the common mode correction\n", + " to either keV or n_of_photons. But the pattern classification\n", " relies on comparing data with the noise map, which is still in ADU.\n", "\n", " The best solution is probably to do a relative gain correction first\n", - " (correct) and apply the global absolute gain to the data at the end,\n", - " after common mode and clustering.\n", + " and apply the global absolute gain to the data at the end,\n", + " after clustering.\n", " \"\"\"\n", - " if common_mode:\n", - " ddsetcm = ofile.create_dataset(\n", - " h5path+\"/pixels_cm\",\n", - " oshape,\n", - " chunks=(chunk_size_idim, oshape[1], oshape[2]),\n", - " dtype=np.float32)\n", - "\n", + " if pattern_classification:\n", " ddsetc = ofile.create_dataset(\n", " h5path+\"/pixels_classified\",\n", " oshape,\n", @@ -518,10 +564,6 @@ " chunks=(chunk_size_idim, oshape[1], oshape[2]),\n", " dtype=np.int32, compression=\"gzip\")\n", "\n", - " # row common mode correction.\n", - " data = cmCorrection.correct(data)\n", - " histCalCMCor.fill(data)\n", - " ddsetcm[...] = np.moveaxis(data, 2, 0)\n", "\n", " data, patterns = patternClassifier.classify(data)\n", "\n", @@ -565,6 +607,7 @@ " 'label': 'CM corr.'\n", " })\n", "\n", + "if pattern_classification:\n", " ho, eo, co, so = histCalSECor.get()\n", " d.append({\n", " 'x': co,\n",