{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# ePix100 Data Correction\n", "\n", "Author: European XFEL Detector Group, Version: 2.0\n", "\n", "The following notebook provides data correction of images acquired with the ePix100 detector. \n", "\n", "The sequence of correction applied are:\n", "Offset --> Common Mode Noise --> Relative Gain --> Charge Sharing --> Absolute Gain.\n", "\n", "Offset, common mode and gain corrected data is saved to /data/image/pixels in the CORR files.\n", "\n", "If pattern classification is applied (charge sharing correction), this data will be saved to /data/image/pixels_classified, while the corresponding patterns will be saved to /data/image/patterns in the CORR files." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "in_folder = \"/gpfs/exfel/exp/HED/202202/p003121/raw\" # input folder, required\n", "out_folder = \"\" # output folder, required\n", "metadata_folder = \"\" # Directory containing calibration_metadata.yml when run by xfel-calibrate\n", "sequences = [-1] # sequences to correct, set to -1 for all, range allowed\n", "sequences_per_node = 1 # number of sequence files per cluster node if run as slurm job, set to 0 to not run SLURM parallel\n", "run = 156 # which run to read data from, required\n", "\n", "# Parameters for accessing the raw data.\n", "karabo_id = \"HED_IA1_EPX100-1\" # karabo karabo_id\n", "karabo_da = \"EPIX01\" # data aggregators\n", "db_module = \"\" # module id in the database\n", "receiver_template = \"RECEIVER\" # detector receiver template for accessing raw data files\n", "path_template = 'RAW-R{:04d}-{}-S{{:05d}}.h5' # the template to use to access data\n", "instrument_source_template = '{}/DET/{}:daqOutput' # instrument detector data source in h5files\n", "\n", "# Parameters affecting writing corrected data.\n", "chunk_size_idim = 1 # H5 chunking size of output data\n", "limit_trains = 0 # Process only first N images, 0 - process all.\n", "\n", "# Parameters for the calibration database.\n", "cal_db_interface = \"tcp://max-exfl016:8015#8025\" # calibration DB interface to use\n", "cal_db_timeout = 300000 # timeout on caldb requests\n", "creation_time = \"\" # The timestamp to use with Calibration DBe. Required Format: \"YYYY-MM-DD hh:mm:ss\" e.g. 2019-07-04 11:02:41\n", "\n", "# Conditions for retrieving calibration constants.\n", "bias_voltage = 200 # bias voltage\n", "in_vacuum = False # detector operated in vacuum\n", "integration_time = -1 # Detector integration time, Default value -1 to use the value from the slow data.\n", "fix_temperature = -1 # fixed temperature value in Kelvin, Default value -1 to use the value from files.\n", "gain_photon_energy = 8.048 # Photon energy used for gain calibration\n", "photon_energy = 0. # Photon energy to calibrate in number of photons, 0 for calibration in keV\n", "\n", "# Flags to select type of applied corrections.\n", "pattern_classification = True # do clustering.\n", "relative_gain = True # Apply relative gain correction.\n", "absolute_gain = True # Apply absolute gain correction (implies relative gain).\n", "common_mode = True # Apply common mode correction.\n", "\n", "# Parameters affecting applied correction.\n", "cm_min_frac = 0.25 # No CM correction is performed if after masking the ratio of good pixels falls below this \n", "cm_noise_sigma = 5. # CM correction noise standard deviation\n", "split_evt_primary_threshold = 7. # primary threshold for split event correction\n", "split_evt_secondary_threshold = 5. # secondary threshold for split event correction\n", "split_evt_mip_threshold = 1000. # minimum ionizing particle threshold\n", "\n", "\n", "def balance_sequences(in_folder, run, sequences, sequences_per_node, karabo_da):\n", " from xfel_calibrate.calibrate import balance_sequences as bs\n", " return bs(in_folder, run, sequences, sequences_per_node, karabo_da)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import tabulate\n", "import warnings\n", "from logging import warning\n", "from sys import exit\n", "\n", "import h5py\n", "import pasha as psh\n", "import numpy as np\n", "import matplotlib.pyplot as plt\n", "from IPython.display import Latex, display\n", "from extra_data import RunDirectory, H5File\n", "from pathlib import Path\n", "\n", "import cal_tools.restful_config as rest_cfg\n", "from XFELDetAna import xfelpyanatools as xana\n", "from XFELDetAna import xfelpycaltools as xcal\n", "from cal_tools.calcat_interface import EPIX100_CalibrationData\n", "from cal_tools.epix100 import epix100lib\n", "from cal_tools.files import DataFile\n", "from cal_tools.restful_config import restful_config\n", "from cal_tools.tools import (\n", " calcat_creation_time,\n", " CalibrationMetadata,\n", ")\n", "from cal_tools.step_timing import StepTimer\n", "\n", "warnings.filterwarnings('ignore')\n", "\n", "prettyPlotting = True\n", "\n", "%matplotlib inline" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "x = 708 # rows of the ePix100\n", "y = 768 # columns of the ePix100\n", "\n", "if absolute_gain:\n", " relative_gain = True\n", "\n", "plot_unit = 'ADU'" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "in_folder = Path(in_folder)\n", "out_folder = Path(out_folder)\n", "\n", "out_folder.mkdir(parents=True, exist_ok=True)\n", "\n", "run_folder = in_folder / f\"r{run:04d}\"\n", "\n", "instrument_src = instrument_source_template.format(\n", " karabo_id, receiver_template)\n", "\n", "print(f\"Correcting run: {run_folder}\")\n", "print(f\"Instrument H5File source: {instrument_src}\")\n", "print(f\"Data corrected files are stored at: {out_folder}\")" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "creation_time = calcat_creation_time(in_folder, run, creation_time)\n", "print(f\"Using {creation_time.isoformat()} as creation time\")" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "run_dc = RunDirectory(run_folder, _use_voview=False)\n", "\n", "seq_files = [Path(f.filename) for f in run_dc.select(f\"*{karabo_id}*\").files]\n", "\n", "# If a set of sequences requested to correct,\n", "# adapt seq_files list.\n", "if sequences != [-1]:\n", " seq_files = [f for f in seq_files if any(f.match(f\"*-S{s:05d}.h5\") for s in sequences)]\n", "\n", "if not len(seq_files):\n", " raise IndexError(\"No sequence files available for the selected sequences.\")\n", "\n", "print(f\"Processing a total of {len(seq_files)} sequence files\")" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "step_timer = StepTimer()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "step_timer.start()\n", "\n", "sensorSize = [x, y]\n", "# Sensor area will be analysed according to blocksize\n", "blockSize = [sensorSize[0]//2, sensorSize[1]//2]\n", "xcal.defaultBlockSize = blockSize\n", "memoryCells = 1 # ePIX has no memory cells\n", "run_parallel = False\n", "\n", "# Read control data.\n", "ctrl_data = epix100lib.epix100Ctrl(\n", " run_dc=run_dc,\n", " instrument_src=instrument_src,\n", " ctrl_src=f\"{karabo_id}/DET/CONTROL\",\n", " )\n", "\n", "if integration_time < 0:\n", " integration_time = ctrl_data.get_integration_time()\n", " integration_time_str_add = \"\"\n", "else:\n", " integration_time_str_add = \"(manual input)\"\n", "\n", "if fix_temperature < 0:\n", " temperature = ctrl_data.get_temprature()\n", " temperature_k = temperature + 273.15\n", " temp_str_add = \"\"\n", "else:\n", " temperature_k = fix_temperature\n", " temperature = fix_temperature - 273.15\n", " temp_str_add = \"(manual input)\"\n", "\n", "print(f\"Bias voltage is {bias_voltage} V\")\n", "print(f\"Detector integration time is set to {integration_time} \\u03BCs {integration_time_str_add}\")\n", "print(f\"Mean temperature: {temperature:0.2f}°C / {temperature_k:0.2f} K {temp_str_add}\")\n", "print(f\"Operated in vacuum: {in_vacuum}\")" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Table of sequence files to process\n", "table = [(k, f) for k, f in enumerate(seq_files)]\n", "\n", "if len(table):\n", " md = display(Latex(tabulate.tabulate(\n", " table,\n", " tablefmt='latex',\n", " headers=[\"#\", \"file\"]\n", " )))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Retrieving calibration constants\n", "\n", "As a first step, dark maps have to be loaded." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "constant_names = [\"OffsetEPix100\", \"NoiseEPix100\"]\n", "if relative_gain:\n", " constant_names += [\"RelativeGainEPix100\"]\n", "\n", "epix_cal = EPIX100_CalibrationData(\n", " detector_name=karabo_id,\n", " sensor_bias_voltage=bias_voltage,\n", " integration_time=integration_time,\n", " sensor_temperature=temperature_k,\n", " in_vacuum=in_vacuum,\n", " source_energy=gain_photon_energy,\n", " event_at=creation_time,\n", " client=rest_cfg.calibration_client(),\n", ")\n", "const_metadata = epix_cal.metadata(calibrations=constant_names)\n", "\n", "# Load the constant data from files\n", "const_data = epix_cal.ndarray_map(metadata=const_metadata)[karabo_da]\n", "\n", "# Validate the constants availability and raise/warn correspondingly. \n", "missing_dark_constants = {\"OffsetEPix100\", \"NoiseEPix100\"} - set(const_data)\n", "if missing_dark_constants:\n", " raise ValueError(\n", " f\"Dark constants {missing_dark_constants} are not available to correct {karabo_da}.\"\n", " \"No correction is performed!\")\n", "\n", "if relative_gain and \"RelativeGainEPix100\" not in const_data.keys():\n", " warning(\"RelativeGainEPix100 is not found in the calibration database.\")\n", " relative_gain = False\n", " absolute_gain = False" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Record constant details in YAML metadata\n", "epix_metadata = const_metadata[karabo_da]\n", "CalibrationMetadata(metadata_folder or out_folder).add_fragment({\n", " \"retrieved-constants\": {\n", " karabo_da: {\n", " \"constants\": {\n", " cname: {\n", " \"path\": str(epix_cal.caldb_root / ccv_metadata[\"path\"]),\n", " \"dataset\": ccv_metadata[\"dataset\"],\n", " \"creation-time\": ccv_metadata[\"begin_validity_at\"],\n", " \"ccv_id\": ccv_metadata[\"ccv_id\"],\n", " } for cname, ccv_metadata in epix_metadata.items()\n", " },\n", " \"physical-name\": list(epix_metadata.values())[0][\"physical_name\"],\n", " }\n", " }\n", "})" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Initializing some parameters.\n", "hscale = 1\n", "stats = True\n", "hrange = np.array([-50, 1000])\n", "nbins = hrange[1] - hrange[0]\n", "commonModeBlockSize = [x//2, y//2]" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "histCalOffsetCor = xcal.HistogramCalculator(\n", " sensorSize,\n", " bins=nbins,\n", " range=hrange,\n", " parallel=run_parallel,\n", " nCells=memoryCells,\n", " blockSize=blockSize\n", ")\n", "\n", "# *****************Histogram Calculators****************** #\n", "histCalCor = xcal.HistogramCalculator(\n", " sensorSize,\n", " bins=1050,\n", " range=[-50, 1000],\n", " parallel=run_parallel,\n", " nCells=memoryCells,\n", " blockSize=blockSize\n", ")" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "if common_mode:\n", " histCalCMCor = xcal.HistogramCalculator(\n", " sensorSize,\n", " bins=nbins,\n", " range=hrange,\n", " parallel=run_parallel,\n", " nCells=memoryCells,\n", " blockSize=blockSize,\n", " )\n", " cmCorrectionB = xcal.CommonModeCorrection(\n", " shape=sensorSize,\n", " blockSize=commonModeBlockSize, \n", " orientation='block',\n", " nCells=memoryCells, \n", " noiseMap=const_data['NoiseEPix100'],\n", " runParallel=run_parallel,\n", " parallel=run_parallel,\n", " stats=stats,\n", " minFrac=cm_min_frac,\n", " noiseSigma=cm_noise_sigma,\n", " )\n", " cmCorrectionR = xcal.CommonModeCorrection(\n", " shape=sensorSize, \n", " blockSize=commonModeBlockSize, \n", " orientation='row',\n", " nCells=memoryCells, \n", " noiseMap=const_data['NoiseEPix100'],\n", " runParallel=run_parallel,\n", " parallel=run_parallel,\n", " stats=stats,\n", " minFrac=cm_min_frac,\n", " noiseSigma=cm_noise_sigma,\n", " )\n", " cmCorrectionC = xcal.CommonModeCorrection(\n", " shape=sensorSize, \n", " blockSize=commonModeBlockSize, \n", " orientation='col',\n", " nCells=memoryCells, \n", " noiseMap=const_data['NoiseEPix100'],\n", " runParallel=run_parallel,\n", " parallel=run_parallel,\n", " stats=stats,\n", " minFrac=cm_min_frac,\n", " noiseSigma=cm_noise_sigma,\n", " )" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "if relative_gain:\n", " gain_cnst = np.median(const_data[\"RelativeGainEPix100\"])\n", " hscale = gain_cnst\n", " plot_unit = 'keV'\n", " if photon_energy > 0:\n", " plot_unit = '$\\gamma$'\n", " hscale /= photon_energy\n", " \n", " gainCorrection = xcal.RelativeGainCorrection(\n", " sensorSize,\n", " gain_cnst/const_data[\"RelativeGainEPix100\"][..., None],\n", " nCells=memoryCells,\n", " parallel=run_parallel,\n", " blockSize=blockSize,\n", " gains=None,\n", " )\n", "\n", " histCalRelGainCor = xcal.HistogramCalculator(\n", " sensorSize,\n", " bins=nbins,\n", " range=hrange,\n", " parallel=run_parallel,\n", " nCells=memoryCells,\n", " blockSize=blockSize\n", " )\n", "\n", " if absolute_gain:\n", " histCalAbsGainCor = xcal.HistogramCalculator(\n", " sensorSize,\n", " bins=nbins,\n", " range=hrange*hscale,\n", " parallel=run_parallel,\n", " nCells=memoryCells,\n", " blockSize=blockSize\n", " )" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "if pattern_classification :\n", " patternClassifier = xcal.PatternClassifier(\n", " [x, y],\n", " const_data[\"NoiseEPix100\"],\n", " split_evt_primary_threshold,\n", " split_evt_secondary_threshold,\n", " split_evt_mip_threshold,\n", " tagFirstSingles=0,\n", " nCells=memoryCells,\n", " allowElongated=False,\n", " blockSize=[x, y],\n", " parallel=run_parallel,\n", " )\n", " histCalCSCor = xcal.HistogramCalculator(\n", " sensorSize,\n", " bins=nbins,\n", " range=hrange,\n", " parallel=run_parallel,\n", " nCells=memoryCells,\n", " blockSize=blockSize,\n", " )\n", " histCalGainCorClusters = xcal.HistogramCalculator(\n", " sensorSize,\n", " bins=nbins,\n", " range=hrange*hscale,\n", " parallel=run_parallel,\n", " nCells=memoryCells,\n", " blockSize=blockSize\n", " )\n", " histCalGainCorSingles = xcal.HistogramCalculator(\n", " sensorSize,\n", " bins=nbins,\n", " range=hrange*hscale,\n", " parallel=run_parallel,\n", " nCells=memoryCells,\n", " blockSize=blockSize\n", " )" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Applying corrections" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def correct_train(wid, index, tid, d):\n", "\n", " d = d[..., np.newaxis].astype(np.float32)\n", " d = np.compress(\n", " np.any(d > 0, axis=(0, 1)), d, axis=2)\n", " \n", " # Offset correction.\n", " d -= const_data[\"OffsetEPix100\"]\n", "\n", " histCalOffsetCor.fill(d)\n", " # Common Mode correction.\n", " if common_mode:\n", " # Block CM\n", " d = cmCorrectionB.correct(d)\n", " # Row CM\n", " d = cmCorrectionR.correct(d)\n", " # COL CM\n", " d = cmCorrectionC.correct(d)\n", " histCalCMCor.fill(d)\n", "\n", " # relative gain correction.\n", " if relative_gain:\n", " d = gainCorrection.correct(d)\n", " histCalRelGainCor.fill(d)\n", "\n", " \"\"\"The gain correction is currently applying\n", " an absolute correction (not a relative correction\n", " as the implied by the name);\n", " it changes the scale (the unit of measurement)\n", " of the data from ADU to either keV or n_of_photons.\n", " But the pattern classification relies on comparing\n", " data with the NoiseEPix100 map, which is still in ADU.\n", "\n", " The best solution is to do a relative gain\n", " correction first and apply the global absolute\n", " gain to the data at the end, after clustering.\n", " \"\"\"\n", "\n", " if pattern_classification:\n", "\n", " d_clu, patterns = patternClassifier.classify(d)\n", " d_clu[d_clu < (split_evt_primary_threshold*const_data[\"NoiseEPix100\"])] = 0\n", " \n", " data_clu[index, ...] = np.squeeze(d_clu)\n", " data_patterns[index, ...] = np.squeeze(patterns)\n", "\n", " histCalCSCor.fill(d_clu)\n", "\n", " # absolute gain correction\n", " # changes data from ADU to keV (or n. of photons)\n", " if absolute_gain:\n", "\n", " d = d * gain_cnst\n", " if photon_energy > 0:\n", " d /= photon_energy\n", " histCalAbsGainCor.fill(d)\n", "\n", " if pattern_classification:\n", " # Modify pattern classification.\n", " d_clu = d_clu * gain_cnst\n", " \n", " if photon_energy > 0:\n", " d_clu /= photon_energy\n", "\n", " data_clu[index, ...] = np.squeeze(d_clu)\n", "\n", " histCalGainCorClusters.fill(d_clu)\n", " \n", " d_sing = d_clu[patterns==100] # pattern 100 corresponds to single photons events\n", " if len(d_sing):\n", " histCalGainCorSingles.fill(d_sing)\n", "\n", " data[index, ...] = np.squeeze(d)\n", " histCalCor.fill(d)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# 10 is a number chosen after testing 1 ... 71 parallel threads\n", "context = psh.context.ThreadContext(num_workers=10)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "scrolled": false }, "outputs": [], "source": [ "empty_seq = 0\n", "\n", "for f in seq_files:\n", "\n", " seq_dc = H5File(f)\n", " # Save corrected data in an output file with name\n", " # of corresponding raw sequence file.\n", " out_file = out_folder / f.name.replace(\"RAW\", \"CORR\")\n", "\n", " # Data shape in seq_dc excluding trains with empty images. \n", " ishape = seq_dc[instrument_src, \"data.image.pixels\"].shape\n", " corr_ntrains = ishape[0]\n", " all_train_ids = seq_dc.train_ids\n", "\n", " # Raise a WARNING if this sequence has no trains to correct.\n", " # Otherwise, print number of trains with no data.\n", " if corr_ntrains == 0:\n", " warning(f\"No trains to correct for {f.name}: \"\n", " \"Skipping the processing of this file.\")\n", " empty_seq += 1\n", " continue\n", " elif len(all_train_ids) != corr_ntrains:\n", " print(f\"{f.name} has {len(all_train_ids) - corr_ntrains} trains with missing data.\")\n", "\n", " # This parameter is only used for testing.\n", " if limit_trains > 0:\n", " print(f\"\\nCorrected trains are limited to: {limit_trains} trains\")\n", " corr_ntrains = min(corr_ntrains, limit_trains)\n", " oshape = (corr_ntrains, *ishape[1:])\n", "\n", " data = context.alloc(shape=oshape, dtype=np.float32)\n", "\n", " if pattern_classification:\n", " data_clu = context.alloc(shape=oshape, dtype=np.float32)\n", " data_patterns = context.alloc(shape=oshape, dtype=np.int32)\n", "\n", " step_timer.start() # Correct data. \n", "\n", " # Overwrite seq_dc after eliminating empty trains or/and applying limited images.\n", " seq_dc = seq_dc.select(\n", " instrument_src, \"*\", require_all=True).select_trains(np.s_[:corr_ntrains])\n", "\n", " pixel_data = seq_dc[instrument_src, \"data.image.pixels\"]\n", " context.map(correct_train, pixel_data)\n", "\n", " step_timer.done_step(f'Correcting {corr_ntrains} trains.')\n", "\n", " step_timer.start() # Write corrected data.\n", "\n", " # Create CORR files and add corrected data sections.\n", " image_counts = seq_dc[instrument_src, \"data.image.pixels\"].data_counts(labelled=False)\n", "\n", " # Write corrected data.\n", " with DataFile(out_file, \"w\") as ofile:\n", " dataset_chunk = ((chunk_size_idim,) + oshape[1:]) # e.g. (1, pixels_x, pixels_y) \n", "\n", " # Create INDEX datasets.\n", " ofile.create_index(seq_dc.train_ids, from_file=seq_dc.files[0])\n", " # Create METDATA datasets\n", " ofile.create_metadata(\n", " like=seq_dc,\n", " sequence=seq_dc.run_metadata()[\"sequenceNumber\"],\n", " instrument_channels=(f'{instrument_src}/data',)\n", " )\n", " # Create Instrument section to later add corrected datasets.\n", " outp_source = ofile.create_instrument_source(instrument_src)\n", "\n", " # Create count/first datasets at INDEX source.\n", " outp_source.create_index(data=image_counts)\n", "\n", " # Store uncorrected RAW image datasets for the corrected trains.\n", "\n", " data_raw_fields = [ # /data/\n", " \"ambTemp\", \"analogCurr\", \"analogInputVolt\", \"backTemp\",\n", " \"digitalInputVolt\", \"guardCurr\", \"relHumidity\", \"digitalCurr\"\n", " ]\n", " for field in data_raw_fields:\n", " field_arr = seq_dc[instrument_src, f\"data.{field}\"].ndarray()\n", "\n", " outp_source.create_key(\n", " f\"data.{field}\", data=field_arr,\n", " chunks=(chunk_size_idim, *field_arr.shape[1:]))\n", "\n", " image_raw_fields = [ # /data/image/\n", " \"binning\", \"bitsPerPixel\", \"dimTypes\", \"dims\",\n", " \"encoding\", \"flipX\", \"flipY\", \"roiOffsets\", \"rotation\",\n", " ]\n", " for field in image_raw_fields:\n", " field_arr = seq_dc[instrument_src, f\"data.image.{field}\"].ndarray()\n", "\n", " outp_source.create_key(\n", " f\"data.image.{field}\", data=field_arr,\n", " chunks=(chunk_size_idim, *field_arr.shape[1:]))\n", "\n", " # Add main corrected `data.image.pixels` dataset and store corrected data.\n", " outp_source.create_key(\n", " \"data.image.pixels\", data=data, chunks=dataset_chunk)\n", " outp_source.create_key(\n", " \"data.trainId\", data=seq_dc.train_ids, chunks=min(50, len(seq_dc.train_ids)))\n", " if pattern_classification:\n", " # Add main corrected `data.image.pixels` dataset and store corrected data.\n", " outp_source.create_key(\n", " \"data.image.pixels_classified\", data=data_clu, chunks=dataset_chunk)\n", " outp_source.create_key(\n", " \"data.image.patterns\", data=data_patterns, chunks=dataset_chunk)\n", "\n", " step_timer.done_step('Storing data.')\n", "if empty_seq == len(seq_files):\n", " warning(\"No valid trains for RAW data to correct.\")\n", " exit(0)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "ho, eo, co, so = histCalCor.get()\n", "\n", "d = [{\n", " 'x': co,\n", " 'y': ho,\n", " 'y_err': np.sqrt(ho[:]),\n", " 'drawstyle': 'steps-mid',\n", " 'errorstyle': 'bars',\n", " 'errorcoarsing': 2,\n", " 'label': 'Total corr.'\n", "}]\n", "\n", "ho, eo, co, so = histCalOffsetCor.get()\n", "\n", "d.append({\n", " 'x': co,\n", " 'y': ho,\n", " 'y_err': np.sqrt(ho[:]),\n", " 'drawstyle': 'steps-mid',\n", " 'errorstyle': 'bars',\n", " 'errorcoarsing': 2,\n", " 'label': 'Offset corr.'\n", "})\n", "\n", "if common_mode:\n", " ho, eo, co, so = histCalCMCor.get()\n", " d.append({\n", " 'x': co,\n", " 'y': ho,\n", " 'y_err': np.sqrt(ho[:]),\n", " 'drawstyle': 'steps-mid',\n", " 'errorstyle': 'bars',\n", " 'errorcoarsing': 2,\n", " 'label': 'CM corr.'\n", " })\n", " \n", "if relative_gain :\n", " ho, eo, co, so = histCalRelGainCor.get()\n", " d.append({\n", " 'x': co,\n", " 'y': ho,\n", " 'y_err': np.sqrt(ho[:]),\n", " 'drawstyle': 'steps-mid',\n", " 'errorstyle': 'bars',\n", " 'errorcoarsing': 2,\n", " 'label': 'Relative gain corr.'\n", " })\n", "\n", "if pattern_classification:\n", " ho, eo, co, so = histCalCSCor.get()\n", " d.append({\n", " 'x': co,\n", " 'y': ho,\n", " 'y_err': np.sqrt(ho[:]),\n", " 'drawstyle': 'steps-mid',\n", " 'errorstyle': 'bars',\n", " 'errorcoarsing': 2,\n", " 'label': 'Charge sharing corr.'\n", " })\n", "\n", "fig = xana.simplePlot(\n", " d, aspect=1, x_label=f'Energy (ADU)',\n", " y_label='Number of occurrences', figsize='2col',\n", " y_log=True, x_range=(-50, 500),\n", " legend='top-center-frame-2col',\n", ")\n", "plt.title(f'run {run} - {karabo_da}')\n", "plt.grid()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "if absolute_gain :\n", " d=[]\n", " ho, eo, co, so = histCalAbsGainCor.get()\n", " d.append({\n", " 'x': co,\n", " 'y': ho,\n", " 'y_err': np.sqrt(ho[:]),\n", " 'drawstyle': 'steps-mid',\n", " 'errorstyle': 'bars',\n", " 'errorcoarsing': 2,\n", " 'label': 'Absolute gain corr.'\n", " })\n", "\n", " if pattern_classification:\n", " ho, eo, co, so = histCalGainCorClusters.get()\n", " d.append({\n", " 'x': co,\n", " 'y': ho,\n", " 'y_err': np.sqrt(ho[:]),\n", " 'drawstyle': 'steps-mid',\n", " 'errorstyle': 'bars',\n", " 'errorcoarsing': 2,\n", " 'label': 'Charge sharing corr.'\n", " })\n", " \n", " ho, eo, co, so = histCalGainCorSingles.get()\n", " d.append({\n", " 'x': co,\n", " 'y': ho,\n", " 'y_err': np.sqrt(ho[:]),\n", " 'drawstyle': 'steps-mid',\n", " 'errorstyle': 'bars',\n", " 'errorcoarsing': 2,\n", " 'label': 'Isolated photons (singles)'\n", " })\n", " \n", " fig = xana.simplePlot(\n", " d, aspect=1, x_label=f'Energy ({plot_unit})',\n", " y_label='Number of occurrences', figsize='2col',\n", " y_log=True, \n", " x_range=np.array((-50, 500))*hscale,\n", " legend='top-center-frame-2col',\n", " )\n", " plt.grid()\n", " plt.title(f'run {run} - {karabo_da}')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Mean Image of the corrected data" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "step_timer.start()\n", "fig = xana.heatmapPlot(\n", " np.nanmedian(data, axis=0),\n", " x_label='Columns', y_label='Rows',\n", " lut_label=f'Signal ({plot_unit})',\n", " x_range=(0, y),\n", " y_range=(0, x),\n", " vmin=-50, vmax=50)\n", "step_timer.done_step(f'Plotting mean image of {data.shape[0]} trains.')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Single Shot of the corrected data" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "step_timer.start()\n", "fig = xana.heatmapPlot(\n", " data[0, ...],\n", " x_label='Columns', y_label='Rows',\n", " lut_label=f'Signal ({plot_unit})',\n", " x_range=(0, y),\n", " y_range=(0, x),\n", " vmin=-50, vmax=50)\n", "step_timer.done_step(f'Plotting single shot of corrected data.')" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.12" }, "latex_envs": { "LaTeX_envs_menu_present": true, "autocomplete": true, "bibliofile": "biblio.bib", "cite_by": "apalike", "current_citInitial": 1, "eqLabelWithNumbers": true, "eqNumInitial": 1, "hotkeys": { "equation": "Ctrl-E", "itemize": "Ctrl-I" }, "labels_anchors": false, "latex_user_defs": false, "report_style_numbering": false, "user_envs_cfg": false } }, "nbformat": 4, "nbformat_minor": 4 }