Skip to content
Snippets Groups Projects

[EPIX100][CORRECT] Fix/epix output fully corrected data

Merged Nuno Duarte requested to merge fix/epix_output_fully_corrected_data into master
All threads resolved!
1 file
+ 46
22
Compare changes
  • Side-by-side
  • Inline
@@ -8,7 +8,14 @@
"\n",
"Author: European XFEL Detector Group, Version: 2.0\n",
"\n",
"The following notebook provides data correction of images acquired with the ePix100 detector."
"The following notebook provides data correction of images acquired with the ePix100 detector. \n",
"\n",
"The sequence of correction applied are:\n",
"Offset --> Common Mode Noise --> Relative Gain --> Charge Sharing --> Absolute Gain.\n",
"\n",
"Offset, common mode and gain corrected data is saved to /data/image/pixels in the CORR files.\n",
"\n",
"If pattern classification is applied (charge sharing correction), this data will be saved to /data/image/pixels_classified, while the corresponding patterns will be saved to /data/image/patterns in the CORR files."
]
},
{
@@ -17,15 +24,15 @@
"metadata": {},
"outputs": [],
"source": [
"in_folder = \"/gpfs/exfel/exp/CALLAB/202031/p900113/raw\" # input folder, required\n",
"out_folder = \"/gpfs/exfel/data/scratch/ahmedk/test/remove/epix_correct\" # output folder, required\n",
"in_folder = \"/gpfs/exfel/exp/HED/202202/p003121/raw\" # input folder, required\n",
"out_folder = \"\" # output folder, required\n",
"metadata_folder = \"\" # Directory containing calibration_metadata.yml when run by xfel-calibrate\n",
"sequences = [-1] # sequences to correct, set to -1 for all, range allowed\n",
"sequences_per_node = 1 # number of sequence files per cluster node if run as slurm job, set to 0 to not run SLURM parallel\n",
"run = 9988 # which run to read data from, required\n",
"run = 156 # which run to read data from, required\n",
"\n",
"# Parameters for accessing the raw data.\n",
"karabo_id = \"MID_EXP_EPIX-1\" # karabo karabo_id\n",
"karabo_id = \"HED_IA1_EPX100-1\" # karabo karabo_id\n",
"karabo_da = \"EPIX01\" # data aggregators\n",
"db_module = \"\" # module id in the database\n",
"receiver_template = \"RECEIVER\" # detector receiver template for accessing raw data files\n",
@@ -48,7 +55,7 @@
"in_vacuum = False # detector operated in vacuum\n",
"integration_time = -1 # Detector integration time, Default value -1 to use the value from the slow data.\n",
"fix_temperature = -1 # fixed temperature value in Kelvin, Default value -1 to use the value from files.\n",
"gain_photon_energy = 9.0 # Photon energy used for gain calibration\n",
"gain_photon_energy = 8.048 # Photon energy used for gain calibration\n",
"photon_energy = 0. # Photon energy to calibrate in number of photons, 0 for calibration in keV\n",
"\n",
"# Flags to select type of applied corrections.\n",
@@ -360,7 +367,6 @@
" blockSize=blockSize\n",
")\n",
"\n",
"\n",
"# *****************Histogram Calculators****************** #\n",
"histCalCor = xcal.HistogramCalculator(\n",
" sensorSize,\n",
@@ -456,7 +462,7 @@
" nCells=memoryCells,\n",
" blockSize=blockSize\n",
" )\n",
" \n",
"\n",
" if absolute_gain:\n",
" histCalAbsGainCor = xcal.HistogramCalculator(\n",
" sensorSize,\n",
@@ -487,7 +493,7 @@
" blockSize=[x, y],\n",
" parallel=run_parallel,\n",
" )\n",
" histCalSECor = xcal.HistogramCalculator(\n",
" histCalCSCor = xcal.HistogramCalculator(\n",
" sensorSize,\n",
" bins=nbins,\n",
" range=hrange,\n",
@@ -495,6 +501,14 @@
" nCells=memoryCells,\n",
" blockSize=blockSize,\n",
" )\n",
" histCalGainCorClusters = xcal.HistogramCalculator(\n",
" sensorSize,\n",
" bins=nbins,\n",
" range=hrange*hscale,\n",
" parallel=run_parallel,\n",
" nCells=memoryCells,\n",
" blockSize=blockSize\n",
" )\n",
" histCalGainCorSingles = xcal.HistogramCalculator(\n",
" sensorSize,\n",
" bins=nbins,\n",
@@ -543,8 +557,6 @@
" d = gainCorrection.correct(d)\n",
" histCalRelGainCor.fill(d)\n",
"\n",
" data[index, ...] = np.squeeze(d)\n",
"\n",
" \"\"\"The gain correction is currently applying\n",
" an absolute correction (not a relative correction\n",
" as the implied by the name);\n",
@@ -561,16 +573,12 @@
" if pattern_classification:\n",
"\n",
" d_clu, patterns = patternClassifier.classify(d)\n",
"\n",
" d_clu[d_clu < (split_evt_primary_threshold*const_data[\"Noise\"])] = 0\n",
"\n",
" data_patterns[index, ...] = np.squeeze(patterns)\n",
"\n",
" \n",
" data_clu[index, ...] = np.squeeze(d_clu)\n",
" data_patterns[index, ...] = np.squeeze(patterns)\n",
"\n",
" d_clu[patterns != 100] = np.nan\n",
"\n",
" histCalSECor.fill(d_clu)\n",
" histCalCSCor.fill(d_clu)\n",
"\n",
" # absolute gain correction\n",
" # changes data from ADU to keV (or n. of photons)\n",
@@ -584,13 +592,19 @@
" if pattern_classification:\n",
" # Modify pattern classification.\n",
" d_clu = d_clu * gain_cnst\n",
" \n",
" if photon_energy > 0:\n",
" d_clu /= photon_energy\n",
"\n",
" histCalGainCorSingles.fill(d_clu)\n",
"\n",
" data_clu[index, ...] = np.squeeze(d_clu)\n",
"\n",
" histCalGainCorClusters.fill(d_clu)\n",
" \n",
" d_sing = d_clu[patterns==100] # pattern 100 corresponds to single photons events\n",
" if len(d_sing):\n",
" histCalGainCorSingles.fill(d_sing)\n",
"\n",
" data[index, ...] = np.squeeze(d)\n",
" histCalCor.fill(d)"
]
},
@@ -609,7 +623,9 @@
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"metadata": {
"scrolled": false
},
"outputs": [],
"source": [
"for f in seq_files:\n",
@@ -668,14 +684,14 @@
" dtype=np.float32)\n",
"\n",
" if pattern_classification:\n",
" # Save /data/image//pixels_classified in corrected file.\n",
" # Save /data/image/pixels_classified in corrected file.\n",
" datasetc = ofile.create_dataset(\n",
" f\"{data_path}/pixels_classified\",\n",
" data=data_clu,\n",
" chunks=dataset_chunk,\n",
" dtype=np.float32)\n",
"\n",
" # Save /data/image//patterns in corrected file.\n",
" # Save /data/image/patterns in corrected file.\n",
" datasetp = ofile.create_dataset(\n",
" f\"{data_path}/patterns\",\n",
" data=data_patterns,\n",
@@ -739,9 +755,8 @@
" 'label': 'Relative gain corr.'\n",
" })\n",
"\n",
"\n",
"if pattern_classification:\n",
" ho, eo, co, so = histCalSECor.get()\n",
" ho, eo, co, so = histCalCSCor.get()\n",
" d.append({\n",
" 'x': co,\n",
" 'y': ho,\n",
@@ -749,7 +764,7 @@
" 'drawstyle': 'steps-mid',\n",
" 'errorstyle': 'bars',\n",
" 'errorcoarsing': 2,\n",
" 'label': 'Isolated photons (singles)'\n",
" 'label': 'Charge sharing corr.'\n",
" })\n",
"\n",
"fig = xana.simplePlot(\n",
@@ -782,6 +797,17 @@
" })\n",
"\n",
" if pattern_classification:\n",
" ho, eo, co, so = histCalGainCorClusters.get()\n",
" d.append({\n",
" 'x': co,\n",
" 'y': ho,\n",
" 'y_err': np.sqrt(ho[:]),\n",
" 'drawstyle': 'steps-mid',\n",
" 'errorstyle': 'bars',\n",
" 'errorcoarsing': 2,\n",
" 'label': 'Charge sharing corr.'\n",
" })\n",
" \n",
" ho, eo, co, so = histCalGainCorSingles.get()\n",
" d.append({\n",
" 'x': co,\n",
@@ -792,7 +818,7 @@
" 'errorcoarsing': 2,\n",
" 'label': 'Isolated photons (singles)'\n",
" })\n",
"\n",
" \n",
" fig = xana.simplePlot(\n",
" d, aspect=1, x_label=f'Energy ({plot_unit})',\n",
" y_label='Number of occurrences', figsize='2col',\n",
@@ -855,9 +881,9 @@
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"display_name": "cal_venv",
"language": "python",
"name": "python3"
"name": "cal_venv"
},
"language_info": {
"codemirror_mode": {
Loading