{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# ePix100 Data Correction\n",
    "\n",
    "Author: European XFEL Detector Group, Version: 2.0\n",
    "\n",
    "The following notebook provides data correction of images acquired with the ePix100 detector."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "in_folder = \"/gpfs/exfel/exp/CALLAB/202031/p900113/raw\"  # input folder, required\n",
    "out_folder = \"/gpfs/exfel/data/scratch/ahmedk/test/remove/epix_correct\"  # output folder, required\n",
    "metadata_folder = \"\"  # Directory containing calibration_metadata.yml when run by xfel-calibrate\n",
    "sequences = [-1]  # sequences to correct, set to -1 for all, range allowed\n",
    "sequences_per_node = 1  # number of sequence files per cluster node if run as slurm job, set to 0 to not run SLURM parallel\n",
    "run = 9988  # which run to read data from, required\n",
    "\n",
    "# Parameters for accessing the raw data.\n",
    "karabo_id = \"MID_EXP_EPIX-1\"  # karabo karabo_id\n",
    "karabo_da = \"EPIX01\"  # data aggregators\n",
    "db_module = \"\"  # module id in the database\n",
    "receiver_template = \"RECEIVER\"  # detector receiver template for accessing raw data files\n",
    "path_template = 'RAW-R{:04d}-{}-S{{:05d}}.h5'  # the template to use to access data\n",
    "instrument_source_template = '{}/DET/{}:daqOutput'  # instrument detector data source in h5files\n",
    "\n",
    "# Parameters affecting writing corrected data.\n",
    "chunk_size_idim = 1  # H5 chunking size of output data\n",
    "\n",
    "# Only for testing\n",
    "limit_images = 0  # ONLY FOR TESTING. process only first N images, 0 - process all.\n",
    "\n",
    "# Parameters for the calibration database.\n",
    "cal_db_interface = \"tcp://max-exfl016:8015#8025\"  # calibration DB interface to use\n",
    "cal_db_timeout = 300000  # timeout on caldb requests\n",
    "creation_time = \"\"  # The timestamp to use with Calibration DBe. Required Format: \"YYYY-MM-DD hh:mm:ss\" e.g. 2019-07-04 11:02:41\n",
    "\n",
    "# Conditions for retrieving calibration constants.\n",
    "bias_voltage = 200  # bias voltage\n",
    "in_vacuum = False  # detector operated in vacuum\n",
    "fix_temperature = 290.  # fix temperature to this value\n",
    "gain_photon_energy = 9.0  # Photon energy used for gain calibration\n",
    "photon_energy = 0.  # Photon energy to calibrate in number of photons, 0 for calibration in keV\n",
    "\n",
    "# Flags to select type of applied corrections.\n",
    "pattern_classification = True  # do clustering.\n",
    "relative_gain = True  # Apply relative gain correction.\n",
    "absolute_gain = True  # Apply absolute gain correction (implies relative gain).\n",
    "common_mode = True  # Apply common mode correction.\n",
    "\n",
    "# Parameters affecting applied correction.\n",
    "cm_min_frac = 0.25  # No CM correction is performed if after masking the ratio of good pixels falls below this \n",
    "cm_noise_sigma = 5.  # CM correction noise standard deviation\n",
    "split_evt_primary_threshold = 7.  # primary threshold for split event correction\n",
    "split_evt_secondary_threshold = 5.  # secondary threshold for split event correction\n",
    "split_evt_mip_threshold = 1000.  # minimum ionizing particle threshold\n",
    "\n",
    "\n",
    "def balance_sequences(in_folder, run, sequences, sequences_per_node, karabo_da):\n",
    "    from xfel_calibrate.calibrate import balance_sequences as bs\n",
    "    return bs(in_folder, run, sequences, sequences_per_node, karabo_da)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import tabulate\n",
    "import warnings\n",
    "\n",
    "import h5py\n",
    "import pasha as psh\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "from IPython.display import Latex, display\n",
    "from extra_data import RunDirectory, H5File\n",
    "from pathlib import Path\n",
    "\n",
    "from XFELDetAna import xfelpyanatools as xana\n",
    "from XFELDetAna import xfelpycaltools as xcal\n",
    "from cal_tools import h5_copy_except\n",
    "from cal_tools.tools import (\n",
    "    calcat_creation_time,\n",
    "    get_dir_creation_date,\n",
    "    get_constant_from_db,\n",
    "    read_const_yaml,\n",
    "    CalibrationMetadata,\n",
    ")\n",
    "from cal_tools.step_timing import StepTimer\n",
    "from iCalibrationDB import (\n",
    "    Conditions,\n",
    "    Constants,\n",
    ")\n",
    "\n",
    "warnings.filterwarnings('ignore')\n",
    "\n",
    "prettyPlotting = True\n",
    "\n",
    "%matplotlib inline"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "x = 708  # rows of the ePix100\n",
    "y = 768  # columns of the ePix100\n",
    "\n",
    "if absolute_gain:\n",
    "    relative_gain = True\n",
    "\n",
    "plot_unit = 'ADU'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "in_folder = Path(in_folder)\n",
    "out_folder = Path(out_folder)\n",
    "\n",
    "out_folder.mkdir(parents=True, exist_ok=True)\n",
    "\n",
    "run_folder = in_folder / f\"r{run:04d}\"\n",
    "\n",
    "instrument_src = instrument_source_template.format(\n",
    "    karabo_id, receiver_template)\n",
    "\n",
    "print(f\"Correcting run: {run_folder}\")\n",
    "print(f\"Instrument H5File source: {instrument_src}\")\n",
    "print(f\"Data corrected files are stored at: {out_folder}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "creation_time = calcat_creation_time(in_folder, run, creation_time)\n",
    "print(f\"Using {creation_time.isoformat()} as creation time\")\n",
    "\n",
    "metadata = CalibrationMetadata(metadata_folder or out_folder)\n",
    "# Constant paths are saved under retrieved-constants in calibration_metadata.yml.\n",
    "# NOTE: this notebook shouldn't overwrite calibration metadata file.\n",
    "const_yaml = metadata.get(\"retrieved-constants\", {})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "run_dc = RunDirectory(run_folder, _use_voview=False)\n",
    "\n",
    "seq_files = [Path(f.filename) for f in run_dc.select(f\"*{karabo_id}*\").files]\n",
    "\n",
    "# If a set of sequences requested to correct,\n",
    "# adapt seq_files list.\n",
    "if sequences != [-1]:\n",
    "    seq_files = [f for f in seq_files if any(f.match(f\"*-S{s:05d}.h5\") for s in sequences)]\n",
    "\n",
    "if not len(seq_files):\n",
    "    raise IndexError(\"No sequence files available for the selected sequences.\")\n",
    "\n",
    "print(f\"Processing a total of {len(seq_files)} sequence files\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "step_timer = StepTimer()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "step_timer.start()\n",
    "\n",
    "sensorSize = [x, y]\n",
    "# Sensor area will be analysed according to blocksize\n",
    "blockSize = [sensorSize[0]//2, sensorSize[1]//2]\n",
    "xcal.defaultBlockSize = blockSize\n",
    "memoryCells = 1  # ePIX has no memory cells\n",
    "run_parallel = False\n",
    "\n",
    "# Read control data.\n",
    "integration_time = int(run_dc.get_run_value(\n",
    "    f\"{karabo_id}/DET/CONTROL\",\n",
    "    \"expTime.value\"))\n",
    "temperature = np.mean(run_dc.get_array(\n",
    "    f\"{karabo_id}/DET/{receiver_template}:daqOutput\",\n",
    "    f\"data.backTemp\").values) / 100.\n",
    "\n",
    "if fix_temperature != 0:\n",
    "    temperature_k = fix_temperature\n",
    "    print(\"Temperature is fixed!\")\n",
    "else:\n",
    "    temperature_k = temperature + 273.15\n",
    "\n",
    "print(f\"Bias voltage is {bias_voltage} V\")\n",
    "print(f\"Detector integration time is set to {integration_time}\")\n",
    "print(\n",
    "    f\"Mean temperature was {temperature:0.2f} °C \"\n",
    "    f\"/ {temperature_k:0.2f} K at beginning of the run.\"\n",
    ")\n",
    "print(f\"Operated in vacuum: {in_vacuum} \")\n",
    "\n",
    "step_timer.done_step(f'Reading control parameters.')\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Table of sequence files to process\n",
    "table = [(k, f) for k, f in enumerate(seq_files)]\n",
    "\n",
    "if len(table):\n",
    "    md = display(Latex(tabulate.tabulate(\n",
    "        table,\n",
    "        tablefmt='latex',\n",
    "        headers=[\"#\", \"file\"]\n",
    "    )))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Retrieving calibration constants\n",
    "\n",
    "As a first step, dark maps have to be loaded."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "cond_dict = {\n",
    "    \"bias_voltage\": bias_voltage,\n",
    "    \"integration_time\": integration_time,\n",
    "    \"temperature\": temperature_k,\n",
    "    \"in_vacuum\": in_vacuum,\n",
    "}\n",
    "\n",
    "dark_condition = Conditions.Dark.ePix100(**cond_dict)\n",
    "\n",
    "# update conditions with illuminated conditins.\n",
    "cond_dict.update({\n",
    "        \"photon_energy\": gain_photon_energy\n",
    "    })\n",
    "\n",
    "illum_condition = Conditions.Illuminated.ePix100(**cond_dict)\n",
    "\n",
    "const_cond = {\n",
    "    \"Offset\": dark_condition,\n",
    "    \"Noise\": dark_condition,\n",
    "    \"RelativeGain\": illum_condition,\n",
    "}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "if const_yaml:  #  Used while reproducing corrected data.\n",
    "    print(f\"Using stored constants in {metadata.filename}\")\n",
    "    const_data, _ = read_const_yaml(const_yaml[karabo_da][\"constants\"])\n",
    "else:  # First correction attempt.\n",
    "    const_data = dict()\n",
    "    for cname, condition in const_cond.items():\n",
    "        # Avoid retrieving RelativeGain, if not needed for correction.\n",
    "        if cname == \"RelativeGain\" and not relative_gain:\n",
    "            const_data[cname] = None\n",
    "        else:\n",
    "            const_data[cname] = get_constant_from_db(\n",
    "                karabo_id=karabo_id,\n",
    "                karabo_da=karabo_da,\n",
    "                constant=getattr(Constants.ePix100, cname)(),\n",
    "                condition=condition,\n",
    "                empty_constant=None,\n",
    "                cal_db_interface=cal_db_interface,\n",
    "                creation_time=creation_time,\n",
    "                print_once=2,\n",
    "                timeout=cal_db_timeout\n",
    "    )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "if relative_gain and const_data.get(\"RelativeGain\", None) is None:\n",
    "    print(\n",
    "        \"WARNING: RelativeGain map is requested, but not found.\\n\"\n",
    "        \"No gain correction will be applied\"\n",
    "    )\n",
    "    relative_gain = False\n",
    "    absolute_gain = False\n",
    "\n",
    "# Initializing some parameters.\n",
    "hscale = 1\n",
    "stats = True\n",
    "hrange = np.array([-50, 1000])\n",
    "nbins = hrange[1] - hrange[0]\n",
    "commonModeBlockSize = [x//2, y//2]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "histCalOffsetCor = xcal.HistogramCalculator(\n",
    "    sensorSize,\n",
    "    bins=nbins,\n",
    "    range=hrange,\n",
    "    parallel=run_parallel,\n",
    "    nCells=memoryCells,\n",
    "    blockSize=blockSize\n",
    ")\n",
    "\n",
    "\n",
    "# *****************Histogram Calculators****************** #\n",
    "histCalCor = xcal.HistogramCalculator(\n",
    "    sensorSize,\n",
    "    bins=1050,\n",
    "    range=[-50, 1000],\n",
    "    parallel=run_parallel,\n",
    "    nCells=memoryCells,\n",
    "    blockSize=blockSize\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "if common_mode:\n",
    "    histCalCMCor = xcal.HistogramCalculator(\n",
    "        sensorSize,\n",
    "        bins=nbins,\n",
    "        range=hrange,\n",
    "        parallel=run_parallel,\n",
    "        nCells=memoryCells,\n",
    "        blockSize=blockSize,\n",
    "    )\n",
    "    cmCorrectionB = xcal.CommonModeCorrection(\n",
    "        shape=sensorSize,\n",
    "        blockSize=commonModeBlockSize, \n",
    "        orientation='block',\n",
    "        nCells=memoryCells, \n",
    "        noiseMap=const_data['Noise'],\n",
    "        runParallel=run_parallel,\n",
    "        parallel=run_parallel,\n",
    "        stats=stats,\n",
    "        minFrac=cm_min_frac,\n",
    "        noiseSigma=cm_noise_sigma,\n",
    "    )\n",
    "    cmCorrectionR = xcal.CommonModeCorrection(\n",
    "        shape=sensorSize, \n",
    "        blockSize=commonModeBlockSize, \n",
    "        orientation='row',\n",
    "        nCells=memoryCells, \n",
    "        noiseMap=const_data['Noise'],\n",
    "        runParallel=run_parallel,\n",
    "        parallel=run_parallel,\n",
    "        stats=stats,\n",
    "        minFrac=cm_min_frac,\n",
    "        noiseSigma=cm_noise_sigma,\n",
    "    )\n",
    "    cmCorrectionC = xcal.CommonModeCorrection(\n",
    "        shape=sensorSize, \n",
    "        blockSize=commonModeBlockSize, \n",
    "        orientation='col',\n",
    "        nCells=memoryCells, \n",
    "        noiseMap=const_data['Noise'],\n",
    "        runParallel=run_parallel,\n",
    "        parallel=run_parallel,\n",
    "        stats=stats,\n",
    "        minFrac=cm_min_frac,\n",
    "        noiseSigma=cm_noise_sigma,\n",
    "    )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "if relative_gain:\n",
    "    gain_cnst = np.median(const_data[\"RelativeGain\"])\n",
    "    hscale = gain_cnst\n",
    "    plot_unit = 'keV'\n",
    "    if photon_energy > 0:\n",
    "        plot_unit = '$\\gamma$'\n",
    "        hscale /= photon_energy\n",
    "    \n",
    "    gainCorrection = xcal.RelativeGainCorrection(\n",
    "        sensorSize,\n",
    "        gain_cnst/const_data[\"RelativeGain\"][..., None],\n",
    "        nCells=memoryCells,\n",
    "        parallel=run_parallel,\n",
    "        blockSize=blockSize,\n",
    "        gains=None,\n",
    "    )\n",
    "\n",
    "    histCalRelGainCor = xcal.HistogramCalculator(\n",
    "        sensorSize,\n",
    "        bins=nbins,\n",
    "        range=hrange,\n",
    "        parallel=run_parallel,\n",
    "        nCells=memoryCells,\n",
    "        blockSize=blockSize\n",
    "    )\n",
    "    \n",
    "    if absolute_gain:\n",
    "        histCalAbsGainCor = xcal.HistogramCalculator(\n",
    "            sensorSize,\n",
    "            bins=nbins,\n",
    "            range=hrange*hscale,\n",
    "            parallel=run_parallel,\n",
    "            nCells=memoryCells,\n",
    "            blockSize=blockSize\n",
    "        )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "if pattern_classification :\n",
    "    patternClassifier = xcal.PatternClassifier(\n",
    "        [x, y],\n",
    "        const_data[\"Noise\"],\n",
    "        split_evt_primary_threshold,\n",
    "        split_evt_secondary_threshold,\n",
    "        split_evt_mip_threshold,\n",
    "        tagFirstSingles=0,\n",
    "        nCells=memoryCells,\n",
    "        allowElongated=False,\n",
    "        blockSize=[x, y],\n",
    "        parallel=run_parallel,\n",
    "    )\n",
    "    histCalSECor = xcal.HistogramCalculator(\n",
    "        sensorSize,\n",
    "        bins=nbins,\n",
    "        range=hrange,\n",
    "        parallel=run_parallel,\n",
    "        nCells=memoryCells,\n",
    "        blockSize=blockSize,\n",
    "    )\n",
    "    histCalGainCorSingles = xcal.HistogramCalculator(\n",
    "        sensorSize,\n",
    "        bins=nbins,\n",
    "        range=hrange*hscale,\n",
    "        parallel=run_parallel,\n",
    "        nCells=memoryCells,\n",
    "        blockSize=blockSize\n",
    "    )"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Applying corrections"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def correct_train(wid, index, tid, d):\n",
    "\n",
    "    d = d[pixel_data[0]][pixel_data[1]][..., np.newaxis].astype(np.float32)\n",
    "    d = np.compress(\n",
    "        np.any(d > 0, axis=(0, 1)), d, axis=2)\n",
    "    \n",
    "    # Offset correction.\n",
    "    d -= const_data[\"Offset\"]\n",
    "\n",
    "    histCalOffsetCor.fill(d)\n",
    "    # Common Mode correction.\n",
    "    if common_mode:\n",
    "        # Block CM\n",
    "        d = cmCorrectionB.correct(d)\n",
    "        # Row CM\n",
    "        d = cmCorrectionR.correct(d)\n",
    "        # COL CM\n",
    "        d = cmCorrectionC.correct(d)\n",
    "        histCalCMCor.fill(d)\n",
    "\n",
    "    # relative gain correction.\n",
    "    if relative_gain:\n",
    "        d = gainCorrection.correct(d)\n",
    "        histCalRelGainCor.fill(d)\n",
    "\n",
    "    data[index, ...] = np.squeeze(d)\n",
    "\n",
    "    \"\"\"The gain correction is currently applying\n",
    "    an absolute correction (not a relative correction\n",
    "    as the implied by the name);\n",
    "    it changes the scale (the unit of measurement)\n",
    "    of the data from ADU to either keV or n_of_photons.\n",
    "    But the pattern classification relies on comparing\n",
    "    data with the noise map, which is still in ADU.\n",
    "\n",
    "    The best solution is to do a relative gain\n",
    "    correction first and apply the global absolute\n",
    "    gain to the data at the end, after clustering.\n",
    "    \"\"\"\n",
    "\n",
    "    if pattern_classification:\n",
    "\n",
    "        d_clu, patterns = patternClassifier.classify(d)\n",
    "\n",
    "        d_clu[d_clu < (split_evt_primary_threshold*const_data[\"Noise\"])] = 0\n",
    "\n",
    "        data_patterns[index, ...] = np.squeeze(patterns)\n",
    "\n",
    "        data_clu[index, ...] = np.squeeze(d_clu)\n",
    "\n",
    "        d_clu[patterns != 100] = np.nan\n",
    "\n",
    "        histCalSECor.fill(d_clu)\n",
    "\n",
    "    # absolute gain correction\n",
    "    # changes data from ADU to keV (or n. of photons)\n",
    "    if absolute_gain:\n",
    "\n",
    "        d = d * gain_cnst\n",
    "        if photon_energy > 0:\n",
    "            d /= photon_energy\n",
    "        histCalAbsGainCor.fill(d)\n",
    "\n",
    "        if pattern_classification:\n",
    "            # Modify pattern classification.\n",
    "            d_clu = d_clu * gain_cnst\n",
    "            if photon_energy > 0:\n",
    "                d_clu /= photon_energy\n",
    "\n",
    "            histCalGainCorSingles.fill(d_clu)\n",
    "\n",
    "            data_clu[index, ...] = np.squeeze(d_clu)\n",
    "\n",
    "    histCalCor.fill(d)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "pixel_data = (instrument_src, \"data.image.pixels\")\n",
    "\n",
    "# 10 is a number chosen after testing 1 ... 71 parallel threads\n",
    "context = psh.context.ThreadContext(num_workers=10)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "for f in seq_files:\n",
    "\n",
    "    seq_dc = H5File(f)\n",
    "\n",
    "    n_imgs = seq_dc.get_data_counts(*pixel_data).shape[0]\n",
    "\n",
    "    # Data shape in seq_dc excluding trains with empty images. \n",
    "    dshape = seq_dc[pixel_data].shape\n",
    "    dataset_chunk = ((chunk_size_idim,) + dshape[1:])  # e.g. (1, pixels_x, pixels_y) \n",
    "\n",
    "    if n_imgs - dshape[0] != 0:\n",
    "        print(f\"- WARNING: {f} has {n_imgs - dshape[0]} trains with empty data.\")\n",
    "\n",
    "    # This parameter is only used for testing.\n",
    "    if limit_images > 0:\n",
    "        n_imgs = min(n_imgs, limit_images)\n",
    "\n",
    "    data = context.alloc(shape=dshape, dtype=np.float32)\n",
    "\n",
    "    if pattern_classification:\n",
    "        data_clu = context.alloc(shape=dshape, dtype=np.float32)\n",
    "        data_patterns = context.alloc(shape=dshape, dtype=np.int32)\n",
    "\n",
    "    step_timer.start()\n",
    "\n",
    "    context.map(\n",
    "        correct_train, seq_dc.select(\n",
    "            *pixel_data, require_all=True).select_trains(np.s_[:n_imgs])\n",
    "    )\n",
    "    step_timer.done_step(f'Correcting {n_imgs} trains.')\n",
    "\n",
    "    # Store detector h5 information in the corrected file\n",
    "    # and deselect data to correct and store later.\n",
    "    step_timer.start()\n",
    "\n",
    "    out_file = out_folder / f.name.replace(\"RAW\", \"CORR\")\n",
    "    data_path = \"INSTRUMENT/\"+instrument_src+\"/data/image\"\n",
    "    pixels_path = f\"{data_path}/pixels\"\n",
    "    \n",
    "    # First copy all raw data source to the corrected file,\n",
    "    # while excluding the raw data image /data/image/pixels.\n",
    "    with h5py.File(out_file, 'w') as ofile:\n",
    "        # Copy RAW non-calibrated sources.\n",
    "        with h5py.File(f, 'r') as sfile:\n",
    "            h5_copy_except.h5_copy_except_paths(\n",
    "                sfile, ofile,\n",
    "                [pixels_path])\n",
    "\n",
    "        # Create dataset in CORR h5 file and add corrected images.\n",
    "        dataset = ofile.create_dataset(\n",
    "            pixels_path,\n",
    "            data=data,\n",
    "            chunks=dataset_chunk,\n",
    "            dtype=np.float32)\n",
    "\n",
    "        if pattern_classification:\n",
    "            # Save /data/image//pixels_classified in corrected file.\n",
    "            datasetc = ofile.create_dataset(\n",
    "                f\"{data_path}/pixels_classified\",\n",
    "                data=data_clu,\n",
    "                chunks=dataset_chunk,\n",
    "                dtype=np.float32)\n",
    "\n",
    "            # Save /data/image//patterns in corrected file.\n",
    "            datasetp = ofile.create_dataset(\n",
    "                f\"{data_path}/patterns\",\n",
    "                data=data_patterns,\n",
    "                chunks=dataset_chunk,\n",
    "                dtype=np.int32)\n",
    "\n",
    "        step_timer.done_step('Storing data.')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "ho, eo, co, so = histCalCor.get()\n",
    "\n",
    "d = [{\n",
    "    'x': co,\n",
    "    'y': ho,\n",
    "    'y_err': np.sqrt(ho[:]),\n",
    "    'drawstyle': 'steps-mid',\n",
    "    'errorstyle': 'bars',\n",
    "    'errorcoarsing': 2,\n",
    "    'label': 'Total corr.'\n",
    "}]\n",
    "\n",
    "ho, eo, co, so = histCalOffsetCor.get()\n",
    "\n",
    "d.append({\n",
    "    'x': co,\n",
    "    'y': ho,\n",
    "    'y_err': np.sqrt(ho[:]),\n",
    "    'drawstyle': 'steps-mid',\n",
    "    'errorstyle': 'bars',\n",
    "    'errorcoarsing': 2,\n",
    "    'label': 'Offset corr.'\n",
    "})\n",
    "\n",
    "if common_mode:\n",
    "    ho, eo, co, so = histCalCMCor.get()\n",
    "    d.append({\n",
    "        'x': co,\n",
    "        'y': ho,\n",
    "        'y_err': np.sqrt(ho[:]),\n",
    "        'drawstyle': 'steps-mid',\n",
    "        'errorstyle': 'bars',\n",
    "        'errorcoarsing': 2,\n",
    "        'label': 'CM corr.'\n",
    "    })\n",
    "    \n",
    "if relative_gain :\n",
    "    ho, eo, co, so = histCalRelGainCor.get()\n",
    "    d.append({\n",
    "        'x': co,\n",
    "        'y': ho,\n",
    "        'y_err': np.sqrt(ho[:]),\n",
    "        'drawstyle': 'steps-mid',\n",
    "        'errorstyle': 'bars',\n",
    "        'errorcoarsing': 2,\n",
    "        'label': 'Relative gain corr.'\n",
    "    })\n",
    "\n",
    "\n",
    "if pattern_classification:\n",
    "    ho, eo, co, so = histCalSECor.get()\n",
    "    d.append({\n",
    "        'x': co,\n",
    "        'y': ho,\n",
    "        'y_err': np.sqrt(ho[:]),\n",
    "        'drawstyle': 'steps-mid',\n",
    "        'errorstyle': 'bars',\n",
    "        'errorcoarsing': 2,\n",
    "        'label': 'Isolated photons (singles)'\n",
    "    })\n",
    "\n",
    "fig = xana.simplePlot(\n",
    "    d, aspect=1, x_label=f'Energy (ADU)',\n",
    "    y_label='Number of occurrences', figsize='2col',\n",
    "    y_log=True, x_range=(-50, 500),\n",
    "    legend='top-center-frame-2col',\n",
    ")\n",
    "plt.title(f'run {run} - {karabo_da}')\n",
    "plt.grid()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "if absolute_gain :\n",
    "    d=[]\n",
    "    ho, eo, co, so = histCalAbsGainCor.get()\n",
    "    d.append({\n",
    "        'x': co,\n",
    "        'y': ho,\n",
    "        'y_err': np.sqrt(ho[:]),\n",
    "        'drawstyle': 'steps-mid',\n",
    "        'errorstyle': 'bars',\n",
    "        'errorcoarsing': 2,\n",
    "        'label': 'Absolute gain corr.'\n",
    "    })\n",
    "\n",
    "    if pattern_classification:\n",
    "        ho, eo, co, so = histCalGainCorSingles.get()\n",
    "        d.append({\n",
    "            'x': co,\n",
    "            'y': ho,\n",
    "            'y_err': np.sqrt(ho[:]),\n",
    "            'drawstyle': 'steps-mid',\n",
    "            'errorstyle': 'bars',\n",
    "            'errorcoarsing': 2,\n",
    "            'label': 'Isolated photons (singles)'\n",
    "        })\n",
    "\n",
    "    fig = xana.simplePlot(\n",
    "        d, aspect=1, x_label=f'Energy ({plot_unit})',\n",
    "        y_label='Number of occurrences', figsize='2col',\n",
    "        y_log=True, \n",
    "        x_range=np.array((-50, 500))*hscale,\n",
    "        legend='top-center-frame-2col',\n",
    "    )\n",
    "    plt.grid()\n",
    "    plt.title(f'run {run} - {karabo_da}')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Mean Image of the corrected data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "step_timer.start()\n",
    "fig = xana.heatmapPlot(\n",
    "    np.nanmedian(data, axis=0),\n",
    "    x_label='Columns', y_label='Rows',\n",
    "    lut_label=f'Signal ({plot_unit})',\n",
    "    x_range=(0, y),\n",
    "    y_range=(0, x),\n",
    "    vmin=-50, vmax=50)\n",
    "step_timer.done_step(f'Plotting mean image of {data.shape[0]} trains.')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Single Shot of the corrected data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "step_timer.start()\n",
    "fig = xana.heatmapPlot(\n",
    "    data[0, ...],\n",
    "    x_label='Columns', y_label='Rows',\n",
    "    lut_label=f'Signal ({plot_unit})',\n",
    "    x_range=(0, y),\n",
    "    y_range=(0, x),\n",
    "    vmin=-50, vmax=50)\n",
    "step_timer.done_step(f'Plotting single shot of corrected data.')"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.11"
  },
  "latex_envs": {
   "LaTeX_envs_menu_present": true,
   "autocomplete": true,
   "bibliofile": "biblio.bib",
   "cite_by": "apalike",
   "current_citInitial": 1,
   "eqLabelWithNumbers": true,
   "eqNumInitial": 1,
   "hotkeys": {
    "equation": "Ctrl-E",
    "itemize": "Ctrl-I"
   },
   "labels_anchors": false,
   "latex_user_defs": false,
   "report_style_numbering": false,
   "user_envs_cfg": false
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}