{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# AGIPD Characterize Dark Images #\n",
    "\n",
    "Author: S. Hauf, Version: 0.1\n",
    "\n",
    "The following code analyzes a set of dark images taken with the AGIPD detector to deduce detector offsets , noise, bad-pixel maps and thresholding. All four types of constants are evaluated per-pixel and per-memory cell. Data for the detector's three gain stages needs to be present, separated into separate runs.\n",
    "\n",
    "The evaluated calibration constants are stored locally and injected in the calibration data base.\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-02-20T12:42:51.255184Z",
     "start_time": "2019-02-20T12:42:51.225500Z"
    }
   },
   "outputs": [],
   "source": [
    "cluster_profile = \"noDB\" # The ipcluster profile to use\n",
    "in_folder = \"/gpfs/exfel/d/raw/SPB/202030/p900138\" # path to input data, required\n",
    "out_folder = \"/gpfs/exfel/data/scratch/ahmedk/test/AGIPDbad_sep64\" # path to output to, required\n",
    "sequences = [0] # sequence files to evaluate.\n",
    "modules = [-1]  # list of modules to evaluate, RANGE ALLOWED\n",
    "run_high = 167 # run number in which high gain data was recorded, required\n",
    "run_med = 168 # run number in which medium gain data was recorded, required\n",
    "run_low = 169 # run number in which low gain data was recorded, required\n",
    "\n",
    "karabo_id = \"SPB_DET_AGIPD1M-1\" # karabo karabo_id\n",
    "karabo_da = ['-1']  # a list of data aggregators names, Default [-1] for selecting all data aggregators\n",
    "receiver_id = \"{}CH0\" # inset for receiver devices\n",
    "path_template = 'RAW-R{:04d}-{}-S{:05d}.h5' # the template to use to access data\n",
    "h5path = '/INSTRUMENT/{}/DET/{}:xtdf/image' # path in the HDF5 file to images\n",
    "h5path_idx = '/INDEX/{}/DET/{}:xtdf/image' # path in the HDF5 file to images\n",
    "h5path_ctrl = '/CONTROL/{}/MDL/FPGA_COMP_TEST' # path to control information\n",
    "karabo_id_control = \"SPB_IRU_AGIPD1M1\" # karabo-id for control device '\n",
    "karabo_da_control = \"AGIPD1MCTRL00\" # karabo DA for control infromation\n",
    "\n",
    "use_dir_creation_date = True  # use dir creation date as data production reference date\n",
    "cal_db_interface = \"tcp://max-exfl016:8020\" # the database interface to use\n",
    "cal_db_timeout = 3000000 # timeout on caldb requests\"\n",
    "local_output = True # output constants locally\n",
    "db_output = False # output constants to database\n",
    "\n",
    "mem_cells = 0 # number of memory cells used, set to 0 to automatically infer\n",
    "bias_voltage = 300 # detector bias voltage\n",
    "gain_setting = 0.1 # the gain setting, use 0.1 to try to auto-determine\n",
    "acq_rate = 0. # the detector acquisition rate, use 0 to try to auto-determine\n",
    "interlaced = False # assume interlaced data format, for data prior to Dec. 2017\n",
    "rawversion = 2 # RAW file format version\n",
    "\n",
    "thresholds_offset_sigma = 3. # offset sigma thresholds for offset deduced bad pixels\n",
    "thresholds_offset_hard = [0, 0] # For setting the same threshold offset for the 3 gains. Left for backcompatability. Default [0, 0] to take the following parameters.\n",
    "thresholds_offset_hard_hg = [3000, 7000] # High-gain thresholds in absolute ADU terms for offset deduced bad pixels\n",
    "thresholds_offset_hard_mg = [6000, 10000] # Medium-gain thresholds in absolute ADU terms for offset deduced bad pixels\n",
    "thresholds_offset_hard_lg = [6000, 10000] # Low-gain thresholds in absolute ADU terms for offset deduced bad pixels\n",
    "\n",
    "thresholds_noise_sigma = 5. # noise sigma thresholds for offset deduced bad pixels\n",
    "thresholds_noise_hard = [0, 0] # For setting the same threshold noise for the 3 gains. Left for backcompatability. Default [0, 0] to take the following parameters.\n",
    "thresholds_noise_hard_hg = [4, 20] # High-gain thresholds in absolute ADU terms for offset deduced bad pixels\n",
    "thresholds_noise_hard_mg = [4, 20] # Medium-gain thresholds in absolute ADU terms for offset deduced bad pixels\n",
    "thresholds_noise_hard_lg = [4, 20] # Low-gain thresholds in absolute ADU terms for offset deduced bad pixels\n",
    "\n",
    "thresholds_gain_sigma = 5. # Gain separation sigma threshold\n",
    "\n",
    "high_res_badpix_3d = False # set this to True if you need high-resolution 3d bad pixel plots. ~7mins extra time for 64 memory cells"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-02-20T12:42:52.599660Z",
     "start_time": "2019-02-20T12:42:51.472138Z"
    }
   },
   "outputs": [],
   "source": [
    "# imports and things that do not usually need to be changed\n",
    "from datetime import datetime\n",
    "import dateutil.parser\n",
    "import warnings\n",
    "warnings.filterwarnings('ignore')\n",
    "from collections import OrderedDict\n",
    "import os\n",
    "import h5py\n",
    "import numpy as np\n",
    "import matplotlib\n",
    "import tabulate\n",
    "\n",
    "matplotlib.use('agg')\n",
    "import matplotlib.pyplot as plt\n",
    "from IPython.display import display, Markdown, Latex\n",
    "%matplotlib inline\n",
    "\n",
    "from cal_tools.tools import (get_from_db, get_dir_creation_date,\n",
    "                             get_notebook_name, get_random_db_interface,\n",
    "                             map_gain_stages, parse_runs,\n",
    "                             run_prop_seq_from_path, save_const_to_h5,\n",
    "                             send_to_db)\n",
    "from cal_tools.influx import InfluxLogger\n",
    "from cal_tools.enums import BadPixels\n",
    "from cal_tools.plotting import (create_constant_overview,\n",
    "                                plot_badpix_3d, show_processed_modules,\n",
    "                                show_overview)\n",
    "from cal_tools.agipdlib import get_gain_setting\n",
    "\n",
    "# make sure a cluster is running with ipcluster start --n=32, give it a while to start\n",
    "from ipyparallel import Client\n",
    "\n",
    "view = Client(profile=cluster_profile)[:]\n",
    "view.use_dill()\n",
    "\n",
    "from iCalibrationDB import Constants, Conditions, Detectors, Versions\n",
    "\n",
    "gains = np.arange(3)\n",
    "\n",
    "IL_MODE = interlaced\n",
    "max_cells = mem_cells\n",
    "   \n",
    "offset_runs = OrderedDict()\n",
    "offset_runs[\"high\"] = run_high\n",
    "offset_runs[\"med\"] = run_med\n",
    "offset_runs[\"low\"] = run_low\n",
    "\n",
    "creation_time=None\n",
    "if use_dir_creation_date:\n",
    "    creation_time = get_dir_creation_date(in_folder, run_high)\n",
    "\n",
    "print(f\"Using {creation_time} as creation time of constant.\")\n",
    "\n",
    "run, prop, seq = run_prop_seq_from_path(in_folder)\n",
    "\n",
    "cal_db_interface = get_random_db_interface(cal_db_interface)\n",
    "print(f'Calibration database interface: {cal_db_interface}')\n",
    "\n",
    "loc = None\n",
    "instrument = karabo_id.split(\"_\")[0]\n",
    "if instrument == \"SPB\":\n",
    "    dinstance = \"AGIPD1M1\"\n",
    "else:\n",
    "    dinstance = \"AGIPD1M2\"\n",
    "print(f\"Detector in use is {karabo_id}\")\n",
    "print(f\"Instrument {instrument}\")\n",
    "print(f\"Detector instance {dinstance}\")\n",
    "\n",
    "logger = InfluxLogger(detector=\"AGIPD\", instrument=instrument, mem_cells=mem_cells,\n",
    "                      notebook=get_notebook_name(), proposal=prop)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "gain_names = ['High', 'Medium', 'Low']\n",
    "runs = [run_high, run_med, run_low]\n",
    "\n",
    "if \"{\" in h5path_ctrl:\n",
    "    h5path_ctrl = h5path_ctrl.format(karabo_id_control)\n",
    "\n",
    "if gain_setting == 0.1:\n",
    "    if creation_time.replace(tzinfo=None) < dateutil.parser.parse('2020-01-31'):\n",
    "        print(\"Set gain-setting to None for runs taken before 2020-01-31\")\n",
    "        gain_setting = None\n",
    "    else:\n",
    "        try:\n",
    "            # extract gain setting and validate that all runs have the same setting\n",
    "            gsettings = []\n",
    "            for r in runs:\n",
    "                control_fname = '{}/r{:04d}/RAW-R{:04d}-{}-S00000.h5'.format(in_folder, r, r,\n",
    "                                                                             karabo_da_control)\n",
    "                gsettings.append(get_gain_setting(control_fname, h5path_ctrl))\n",
    "            if not all(g == gsettings[0] for g in gsettings):\n",
    "                raise ValueError(f\"Different gain settings for the 3 input runs {gsettings}\")\n",
    "            gain_setting =  gsettings[0]  \n",
    "        except Exception as e:\n",
    "            print(f'Error while reading gain setting from: \\n{control_fname}')\n",
    "            print(f'Error: {e}')\n",
    "            if \"component not found\" in str(e):\n",
    "                print(\"Gain setting is not found in the control information\")\n",
    "            print(\"Data will not be processed\")\n",
    "            sequences = []"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-02-20T12:42:52.608214Z",
     "start_time": "2019-02-20T12:42:52.601257Z"
    }
   },
   "outputs": [],
   "source": [
    "if karabo_da[0] == '-1':\n",
    "    if modules[0] == -1:\n",
    "        modules = list(range(16))\n",
    "    karabo_da = [\"AGIPD{:02d}\".format(i) for i in modules]\n",
    "else:\n",
    "    modules = [int(x[-2:]) for x in karabo_da]\n",
    "h5path = h5path.format(karabo_id, receiver_id)\n",
    "h5path_idx = h5path_idx.format(karabo_id, receiver_id)\n",
    "\n",
    "print(\"Parameters are:\")\n",
    "print(f\"Proposal: {prop}\")\n",
    "print(f\"Memory cells: {mem_cells}/{max_cells}\")\n",
    "print(\"Runs: {}\".format([ v for v in offset_runs.values()]))\n",
    "print(f\"Sequences: {sequences}\")\n",
    "print(f\"Interlaced mode: {IL_MODE}\")\n",
    "print(f\"Using DB: {db_output}\")\n",
    "print(f\"Input: {in_folder}\")\n",
    "print(f\"Output: {out_folder}\")\n",
    "print(f\"Bias voltage: {bias_voltage}V\")\n",
    "print(f\"Gain setting: {gain_setting}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "The following lines will create a queue of files which will the be executed module-parallel. Distiguishing between different gains."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-02-20T12:42:54.024731Z",
     "start_time": "2019-02-20T12:42:53.901555Z"
    }
   },
   "outputs": [],
   "source": [
    "# set everything up filewise\n",
    "os.makedirs(out_folder, exist_ok=True)\n",
    "gmf = map_gain_stages(in_folder, offset_runs, path_template, karabo_da, sequences)\n",
    "gain_mapped_files, total_sequences, total_file_size = gmf\n",
    "print(f\"Will process a total of {total_sequences} files.\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Calculate Offsets, Noise and Thresholds ##\n",
    "\n",
    "The calculation is performed per-pixel and per-memory-cell. Offsets are simply the median value for a set of dark data taken at a given gain, noise the standard deviation, and gain-bit values the medians of the gain array."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-02-20T10:50:55.839958Z",
     "start_time": "2019-02-20T10:50:55.468134Z"
    },
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "import copy\n",
    "from functools import partial\n",
    "def characterize_module(il_mode, cells, bp_thresh, rawversion, loc, acq_rate,\n",
    "                        h5path, h5path_idx, inp):\n",
    "    import numpy as np\n",
    "    import copy\n",
    "    import h5py\n",
    "    from cal_tools.enums import BadPixels\n",
    "    from cal_tools.agipdlib import get_num_cells, get_acq_rate\n",
    "\n",
    "    filename, channel, gg = inp\n",
    "    \n",
    "    if cells == 0:\n",
    "        cells = get_num_cells(filename, loc, channel)\n",
    "\n",
    "    print(f\"Using {cells} memory cells\")\n",
    "    \n",
    "    if acq_rate == 0.:\n",
    "        acq_rate = get_acq_rate(filename, loc, channel)\n",
    "\n",
    "    thresholds_offset, thresholds_offset_sigma, thresholds_noise, thresholds_noise_sigma = bp_thresh \n",
    "    thresholds_offset_hard = thresholds_offset[gg]\n",
    "    thresholds_noise_hard = thresholds_noise[gg]\n",
    "    infile = h5py.File(filename, \"r\", driver=\"core\")\n",
    "    \n",
    "    h5path = h5path.format(channel)\n",
    "    h5path_idx = h5path_idx.format(channel)\n",
    "    \n",
    "    if rawversion == 2:\n",
    "        count = np.squeeze(infile[f\"{h5path_idx}/count\"])\n",
    "        first = np.squeeze(infile[f\"{h5path_idx}/first\"])\n",
    "        last_index = int(first[count != 0][-1]+count[count != 0][-1])\n",
    "        first_index = int(first[count != 0][0])\n",
    "    else:\n",
    "        status = np.squeeze(infile[f\"{h5path_idx}/status\"])\n",
    "        if np.count_nonzero(status != 0) == 0:\n",
    "            return\n",
    "        last = np.squeeze(infile[f\"{h5path_idx}/last\"])\n",
    "        first = np.squeeze(infile[f\"{h5path_idx}/first\"])\n",
    "        last_index = int(last[status != 0][-1]) + 1\n",
    "        first_index = int(first[status != 0][0])\n",
    "    im = np.array(infile[f\"{h5path}/data\"][first_index:last_index,...])    \n",
    "    cellIds = np.squeeze(infile[f\"{h5path}/cellId\"][first_index:last_index,...]) \n",
    "    \n",
    "    infile.close()\n",
    "\n",
    "    if il_mode:\n",
    "        ga = im[1::2, 0, ...]\n",
    "        im = im[0::2, 0, ...].astype(np.float32)\n",
    "        cellIds = cellIds[::2]\n",
    "    else:\n",
    "        ga = im[:, 1, ...]\n",
    "        im = im[:, 0, ...].astype(np.float32)\n",
    "\n",
    "    im = np.rollaxis(im, 2)\n",
    "    im = np.rollaxis(im, 2, 1)\n",
    "\n",
    "    ga = np.rollaxis(ga, 2)\n",
    "    ga = np.rollaxis(ga, 2, 1)\n",
    "\n",
    "    mcells = cells #max(cells, np.max(cellIds)+1)\n",
    "    offset = np.zeros((im.shape[0], im.shape[1], mcells))\n",
    "    gains = np.zeros((im.shape[0], im.shape[1], mcells))\n",
    "    noise = np.zeros((im.shape[0], im.shape[1], mcells))\n",
    "    gains_std = np.zeros((im.shape[0], im.shape[1], mcells))\n",
    "    \n",
    "    for cc in np.unique(cellIds[cellIds < mcells]):\n",
    "        cellidx = cellIds == cc\n",
    "        offset[...,cc] = np.median(im[..., cellidx], axis=2)\n",
    "        noise[...,cc] = np.std(im[..., cellidx], axis=2)\n",
    "        gains[...,cc] = np.median(ga[..., cellidx], axis=2)\n",
    "        gains_std[...,cc] = np.std(ga[..., cellidx], axis=2)\n",
    "\n",
    "    # bad pixels\n",
    "    bp = np.zeros(offset.shape, np.uint32)\n",
    "    # offset related bad pixels\n",
    "    offset_mn = np.nanmedian(offset, axis=(0,1))\n",
    "    offset_std = np.nanstd(offset, axis=(0,1))\n",
    "\n",
    "    bp[(offset < offset_mn-thresholds_offset_sigma*offset_std) |\n",
    "       (offset > offset_mn+thresholds_offset_sigma*offset_std)] |= BadPixels.OFFSET_OUT_OF_THRESHOLD.value\n",
    "    bp[(offset < thresholds_offset_hard[0]) | (\n",
    "        offset > thresholds_offset_hard[1])] |= BadPixels.OFFSET_OUT_OF_THRESHOLD.value\n",
    "    bp[~np.isfinite(offset)] |= BadPixels.OFFSET_NOISE_EVAL_ERROR.value\n",
    "\n",
    "    # noise related bad pixels\n",
    "    noise_mn = np.nanmedian(noise, axis=(0,1))\n",
    "    noise_std = np.nanstd(noise, axis=(0,1))    \n",
    "    bp[(noise < noise_mn-thresholds_noise_sigma*noise_std) |\n",
    "       (noise > noise_mn+thresholds_noise_sigma*noise_std)] |= BadPixels.NOISE_OUT_OF_THRESHOLD.value\n",
    "    bp[(noise < thresholds_noise_hard[0]) | (noise > thresholds_noise_hard[1])] |= BadPixels.NOISE_OUT_OF_THRESHOLD.value\n",
    "    bp[~np.isfinite(noise)] |= BadPixels.OFFSET_NOISE_EVAL_ERROR.value\n",
    "\n",
    "    return offset, noise, gains, gains_std, gg, bp, cells, acq_rate\n",
    "\n",
    "offset_g = OrderedDict()\n",
    "noise_g = OrderedDict()\n",
    "gain_g = OrderedDict()\n",
    "gainstd_g = OrderedDict()\n",
    "badpix_g = OrderedDict()\n",
    "gg = 0\n",
    "\n",
    "start = datetime.now()\n",
    "all_cells = []\n",
    "all_acq_rate = []\n",
    "\n",
    "if thresholds_offset_hard == [0, 0]:\n",
    "    thresholds_offset_hard = [thresholds_offset_hard_hg, thresholds_offset_hard_mg, thresholds_offset_hard_lg]\n",
    "else:\n",
    "    thresholds_offset_hard = [thresholds_offset_hard] * 3\n",
    "\n",
    "if thresholds_noise_hard == [0, 0]:\n",
    "    thresholds_noise_hard = [thresholds_noise_hard_hg, thresholds_noise_hard_mg, thresholds_noise_hard_lg]\n",
    "else:\n",
    "    thresholds_noise_hard = [thresholds_noise_hard] * 3\n",
    "\n",
    "    \n",
    "inp = []\n",
    "for gain, mapped_files in gain_mapped_files.items():\n",
    "    dones = []\n",
    "    for i in modules:\n",
    "        qm = f\"Q{i//4+1}M{i%4+1}\"\n",
    "        if qm in mapped_files and not mapped_files[qm].empty():\n",
    "            fname_in = mapped_files[qm].get()\n",
    "            print(\"Process file: \", fname_in)\n",
    "            dones.append(mapped_files[qm].empty())\n",
    "        else:\n",
    "            continue\n",
    "        inp.append((fname_in, i, gg))\n",
    "        \n",
    "    gg += 1\n",
    "\n",
    "p = partial(characterize_module, IL_MODE, max_cells,\n",
    "           (thresholds_offset_hard, thresholds_offset_sigma,\n",
    "            thresholds_noise_hard, thresholds_noise_sigma),\n",
    "            rawversion, karabo_id, acq_rate, h5path, h5path_idx)\n",
    "\n",
    "# Don't remove. Used for Debugging.\n",
    "#results = list(map(p, inp))\n",
    "results = view.map_sync(p, inp)\n",
    "\n",
    "for ii, r in enumerate(results):\n",
    "    offset, noise, gains, gains_std, gg, bp, thiscell, thisacq = r\n",
    "    all_cells.append(thiscell)\n",
    "    all_acq_rate.append(thisacq)\n",
    "    for i in modules:\n",
    "        qm = f\"Q{i//4+1}M{i%4+1}\"\n",
    "        if qm not in offset_g:\n",
    "            offset_g[qm] = np.zeros((offset.shape[0], offset.shape[1], offset.shape[2], 3))\n",
    "            noise_g[qm] = np.zeros_like(offset_g[qm])\n",
    "            gain_g[qm] = np.zeros_like(offset_g[qm])\n",
    "            gainstd_g[qm] = np.zeros_like(offset_g[qm])\n",
    "            badpix_g[qm] = np.zeros_like(offset_g[qm], np.uint32)\n",
    "\n",
    "        offset_g[qm][...,gg] = offset\n",
    "        noise_g[qm][...,gg] = noise\n",
    "        gain_g[qm][...,gg] = gains\n",
    "        gainstd_g[qm][..., gg] = gains_std\n",
    "        badpix_g[qm][...,gg] = bp\n",
    "    \n",
    "\n",
    "duration = (datetime.now() - start).total_seconds()\n",
    "logger.runtime_summary_entry(success=True, runtime=duration,\n",
    "                             total_sequences=total_sequences,\n",
    "                             filesize=total_file_size)\n",
    "logger.send()\n",
    "max_cells = np.max(all_cells)\n",
    "print(f\"Using {max_cells} memory cells\")\n",
    "acq_rate = np.max(all_acq_rate)\n",
    "print(f\"Using {acq_rate} MHz acquisition rate\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Add a badpixel due to bad gain separation\n",
    "for g in range(2):\n",
    "    # Bad pixels during bad gain separation.\n",
    "    # Fraction of pixels in the module with separation lower than \"thresholds_gain_sigma\".\n",
    "    bad_sep = (gain_g[qm][..., g+1] - gain_g[qm][..., g]) / np.sqrt(gainstd_g[qm][..., g+1]**2 + gainstd_g[qm][..., g]**2)\n",
    "    badpix_g[qm][...,g+1][(bad_sep)<thresholds_gain_sigma]|= BadPixels.GAIN_THRESHOLDING_ERROR.value"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "The thresholds for gain switching are then defined as the mean value between in individual gain bit levels. Note that these thresholds need to be refined with charge induced thresholds, as the two are not the same."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2018-12-06T09:38:18.220833Z",
     "start_time": "2018-12-06T09:38:17.926616Z"
    }
   },
   "outputs": [],
   "source": [
    "thresholds_g = {}\n",
    "for qm in gain_g.keys():\n",
    "    thresholds_g[qm] = np.zeros((gain_g[qm].shape[0], gain_g[qm].shape[1], gain_g[qm].shape[2], 5))\n",
    "    thresholds_g[qm][...,0] = (gain_g[qm][...,1]+gain_g[qm][...,0])/2\n",
    "    thresholds_g[qm][...,1] = (gain_g[qm][...,2]+gain_g[qm][...,1])/2\n",
    "    for i in range(3):\n",
    "        thresholds_g[qm][...,2+i] = gain_g[qm][...,i]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2018-12-06T09:38:18.234582Z",
     "start_time": "2018-12-06T09:38:18.222838Z"
    }
   },
   "outputs": [],
   "source": [
    "res = OrderedDict()\n",
    "for i in modules:\n",
    "    qm = \"Q{}M{}\".format(i//4+1, i%4+1)\n",
    "    res[qm] = {'Offset': offset_g[qm],\n",
    "               'Noise': noise_g[qm],\n",
    "               'ThresholdsDark': thresholds_g[qm],\n",
    "               'BadPixelsDark': badpix_g[qm]    \n",
    "               }"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "proposal = list(filter(None, in_folder.strip('/').split('/')))[-2]\n",
    "file_loc = 'proposal:{} runs:{} {} {}'.format(proposal, run_low, run_med, run_high)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "# Retrieve existing constants for comparison\n",
    "clist = [\"Offset\", \"Noise\", \"ThresholdsDark\", \"BadPixelsDark\"]\n",
    "old_const = {}\n",
    "old_mdata = {}\n",
    "detinst = getattr(Detectors, dinstance)\n",
    "\n",
    "print('Retrieve pre-existing constants for comparison.')\n",
    "\n",
    "for qm in res:\n",
    "    for const in res[qm]:\n",
    "        dconst = getattr(Constants.AGIPD, const)()\n",
    "        dconst.data = res[qm][const]\n",
    "\n",
    "        # Setting conditions\n",
    "        condition = Conditions.Dark.AGIPD(memory_cells=max_cells,\n",
    "                                          bias_voltage=bias_voltage,\n",
    "                                          acquisition_rate=acq_rate,\n",
    "                                          gain_setting=gain_setting)\n",
    "        device = getattr(detinst, qm)\n",
    "        data, mdata = get_from_db(device,\n",
    "                                  getattr(Constants.AGIPD, const)(),\n",
    "                                  condition,\n",
    "                                  None,\n",
    "                                  cal_db_interface, creation_time=creation_time,\n",
    "                                  verbosity=2, timeout=cal_db_timeout)\n",
    "\n",
    "        old_const[const] = data\n",
    "        if mdata is not None and data is not None:\n",
    "            time = mdata.calibration_constant_version.begin_at\n",
    "            old_mdata[const] = time.isoformat()\n",
    "            os.makedirs('{}/old/'.format(out_folder), exist_ok=True)\n",
    "            save_const_to_h5(device,\n",
    "                             getattr(Constants.AGIPD, const)(),\n",
    "                             condition, data, file_loc, creation_time,\n",
    "                             f'{out_folder}/old/')\n",
    "        else:\n",
    "            old_mdata[const] = \"Not found\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2018-12-06T09:49:32.449330Z",
     "start_time": "2018-12-06T09:49:20.231607Z"
    }
   },
   "outputs": [],
   "source": [
    "md = None\n",
    "for qm in res:\n",
    "\n",
    "    for const in res[qm]:\n",
    "        dconst = getattr(Constants.AGIPD, const)()\n",
    "        dconst.data = res[qm][const]\n",
    "\n",
    "        # set the operating condition\n",
    "        condition = Conditions.Dark.AGIPD(memory_cells=max_cells,\n",
    "                                          bias_voltage=bias_voltage,\n",
    "                                          acquisition_rate=acq_rate,\n",
    "                                          gain_setting=gain_setting)\n",
    "        detinst = getattr(Detectors, dinstance)\n",
    "        device = getattr(detinst, qm)\n",
    "\n",
    "        if db_output:\n",
    "            md = send_to_db(device, dconst, condition, file_loc, \n",
    "                            cal_db_interface, creation_time=creation_time, timeout=cal_db_timeout)\n",
    "\n",
    "        if local_output:\n",
    "            md = save_const_to_h5(device, dconst, condition, dconst.data, file_loc, creation_time, out_folder)\n",
    "            print(f\"Calibration constant {const} is stored locally.\\n\")\n",
    "            \n",
    "    print(\"Constants parameter conditions are:\\n\")\n",
    "    print(f\"• memory_cells: {max_cells}\\n• bias_voltage: {bias_voltage}\\n\"\n",
    "          f\"• acquisition_rate: {acq_rate}\\n• gain_setting: {gain_setting}\\n\"\n",
    "          f\"• creation_time: {md.calibration_constant_version.begin_at if md is not None else creation_time}\\n\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "mnames=[]\n",
    "for i in modules:\n",
    "    qm = f\"Q{i//4+1}M{i % 4+1}\"\n",
    "    mnames.append(qm)\n",
    "    display(Markdown(f'## Position of the module {qm} and it\\'s ASICs##'))\n",
    "show_processed_modules(dinstance, constants=None, mnames=mnames, mode=\"position\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Single-Cell Overviews ##\n",
    "\n",
    "Single cell overviews allow to identify potential effects on all memory cells, e.g. on sensor level. Additionally, they should serve as a first sanity check on expected behaviour, e.g. if structuring on the ASIC level is visible in the offsets, but otherwise no immediate artifacts are visible."
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### High Gain ###"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2018-12-06T09:49:14.540552Z",
     "start_time": "2018-12-06T09:49:13.009674Z"
    },
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "cell = 3\n",
    "gain = 0\n",
    "show_overview(res, cell, gain, infix=\"{}-{}-{}\".format(*offset_runs.values()))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Medium Gain ###"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "cell = 3\n",
    "gain = 1\n",
    "show_overview(res, cell, gain, infix=\"{}-{}-{}\".format(*offset_runs.values()))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Low Gain ###"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "cell = 3\n",
    "gain = 2\n",
    "show_overview(res, cell, gain, infix=\"{}-{}-{}\".format(*offset_runs.values()))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "cols = {BadPixels.NOISE_OUT_OF_THRESHOLD.value: (BadPixels.NOISE_OUT_OF_THRESHOLD.name, '#FF000080'),\n",
    "        BadPixels.OFFSET_NOISE_EVAL_ERROR.value: (BadPixels.OFFSET_NOISE_EVAL_ERROR.name, '#0000FF80'),\n",
    "        BadPixels.OFFSET_OUT_OF_THRESHOLD.value: (BadPixels.OFFSET_OUT_OF_THRESHOLD.name, '#00FF0080'),\n",
    "        BadPixels.GAIN_THRESHOLDING_ERROR.value: (BadPixels.GAIN_THRESHOLDING_ERROR.name, '#FF40FF40'),\n",
    "        BadPixels.OFFSET_OUT_OF_THRESHOLD.value | BadPixels.NOISE_OUT_OF_THRESHOLD.value: ('OFFSET_OUT_OF_THRESHOLD + NOISE_OUT_OF_THRESHOLD', '#DD00DD80'),\n",
    "        BadPixels.OFFSET_OUT_OF_THRESHOLD.value | BadPixels.NOISE_OUT_OF_THRESHOLD.value | \n",
    "        BadPixels.GAIN_THRESHOLDING_ERROR.value: ('MIXED', '#BFDF009F')}\n",
    "\n",
    "if high_res_badpix_3d:\n",
    "    display(Markdown(\"\"\"\n",
    "    \n",
    "    ## Global Bad Pixel Behaviour ##\n",
    "\n",
    "    The following plots show the results of bad pixel evaluation for all evaluated memory cells. \n",
    "    Cells are stacked in the Z-dimension, while pixels values in x/y are rebinned with a factor of 2. \n",
    "    This excludes single bad pixels present only in disconnected pixels. \n",
    "    Hence, any bad pixels spanning at least 4 pixels in the x/y-plane, or across at least two memory cells are indicated. \n",
    "    Colors encode the bad pixel type, or mixed type.\n",
    "\n",
    "    \"\"\"))\n",
    "\n",
    "    gnames = ['High Gain', 'Medium Gain', 'Low Gain']\n",
    "    for gain in range(3):\n",
    "        display(Markdown(f'### {gnames[gain]} ###'))\n",
    "        for mod, data in badpix_g.items():\n",
    "            plot_badpix_3d(data[...,gain], cols, title=mod, rebin_fac=1)\n",
    "            plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Aggregate values, and per Cell behaviour ##\n",
    "\n",
    "The following tables and plots give an overview of statistical aggregates for each constant, as well as per cell behavior."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "create_constant_overview(offset_g, \"Offset (ADU)\", max_cells, 4000, 8000,\n",
    "                         badpixels=[badpix_g, np.nan])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "create_constant_overview(noise_g, \"Noise (ADU)\", max_cells, 0, 100,\n",
    "                         badpixels=[badpix_g, np.nan])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "# Plot only three gain threshold maps.\n",
    "bp_thresh = OrderedDict()\n",
    "for mod, con in badpix_g.items():\n",
    "    bp_thresh[mod] = np.zeros((con.shape[0], con.shape[1], con.shape[2], 5), dtype=con.dtype)\n",
    "    bp_thresh[mod][...,:2] = con[...,:2]\n",
    "    bp_thresh[mod][...,2:] = con\n",
    "\n",
    "\n",
    "create_constant_overview(thresholds_g, \"Threshold (ADU)\", max_cells, 4000, 10000, 5,\n",
    "                         badpixels=[bp_thresh, np.nan],\n",
    "                         gmap=['HG-MG Threshold', 'MG-LG Threshold', 'High gain', 'Medium gain', 'low gain'],\n",
    "                         marker=['d','d','','','']\n",
    "                         )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "bad_pixel_aggregate_g = OrderedDict()\n",
    "for m, d in badpix_g.items():\n",
    "    bad_pixel_aggregate_g[m] = d.astype(np.bool).astype(np.float)\n",
    "create_constant_overview(bad_pixel_aggregate_g, \"Bad pixel fraction\", max_cells, 0, 0.10, 3)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Summary tables ##\n",
    "\n",
    "The following tables show summary information for the evaluated module. Values for currently evaluated constants are compared with values for pre-existing constants retrieved from the calibration database."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "table = []\n",
    "gain_names = ['High', 'Medium', 'Low']\n",
    "bits = [BadPixels.NOISE_OUT_OF_THRESHOLD, BadPixels.OFFSET_OUT_OF_THRESHOLD, BadPixels.OFFSET_NOISE_EVAL_ERROR, BadPixels.GAIN_THRESHOLDING_ERROR]\n",
    "for qm in badpix_g.keys():\n",
    "    for gain in range(3):\n",
    "\n",
    "        l_data = []\n",
    "        l_data_old = []\n",
    "\n",
    "        data = np.copy(badpix_g[qm][:,:,:,gain])\n",
    "        datau32 = data.astype(np.uint32)\n",
    "        l_data.append(len(datau32[datau32>0].flatten()))\n",
    "        for bit in bits:\n",
    "            l_data.append(np.count_nonzero(badpix_g[qm][:,:,:,gain] & bit.value))\n",
    "\n",
    "        if old_const['BadPixelsDark'] is not None:\n",
    "            dataold = np.copy(old_const['BadPixelsDark'][:, :, :, gain])\n",
    "            datau32old = dataold.astype(np.uint32)\n",
    "            l_data_old.append(len(datau32old[datau32old>0].flatten()))\n",
    "            for bit in bits:\n",
    "                l_data_old.append(np.count_nonzero(old_const['BadPixelsDark'][:, :, :, gain] & bit.value))\n",
    "\n",
    "        l_data_name = ['All bad pixels', 'NOISE_OUT_OF_THRESHOLD', \n",
    "                       'OFFSET_OUT_OF_THRESHOLD', 'OFFSET_NOISE_EVAL_ERROR', 'GAIN_THRESHOLDING_ERROR']\n",
    "\n",
    "        l_threshold = ['', f'{thresholds_noise_sigma}' f'{thresholds_noise_hard[gain]}', \n",
    "                       f'{thresholds_offset_sigma}' f'{thresholds_offset_hard[gain]}',\n",
    "                       '', f'{thresholds_gain_sigma}']\n",
    "\n",
    "        for i in range(len(l_data)):\n",
    "            line = [f'{l_data_name[i]}, {gain_names[gain]} gain', l_threshold[i], l_data[i]]\n",
    "\n",
    "            if old_const['BadPixelsDark'] is not None:\n",
    "                line += [l_data_old[i]]\n",
    "            else:\n",
    "                line += ['-']\n",
    "\n",
    "            table.append(line)\n",
    "        table.append(['', '', '', ''])\n",
    "\n",
    "display(Markdown('''\n",
    "\n",
    "### Number of bad pixels ###\n",
    "\n",
    "One pixel can be bad for different reasons, therefore, the sum of all types of bad pixels can be more than the number of all bad pixels.\n",
    "\n",
    "'''))\n",
    "if len(table)>0:\n",
    "    md = display(Latex(tabulate.tabulate(table, tablefmt='latex', \n",
    "                                         headers=[\"Pixel type\", \"Threshold\", \n",
    "                                                  \"New constant\", \"Old constant\"])))  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "header = ['Parameter', \n",
    "          \"New constant\", \"Old constant \", \n",
    "          \"New constant\", \"Old constant \", \n",
    "          \"New constant\", \"Old constant \",\n",
    "          \"New constant\", \"Old constant \"]\n",
    "\n",
    "for const in ['Offset', 'Noise', 'ThresholdsDark']:\n",
    "    if const != 'ThresholdsDark':\n",
    "        table = [['','High gain', 'High gain', 'Medium gain', 'Medium gain', 'Low gain', 'Low gain']]\n",
    "    else:\n",
    "        table = [['','HG-MG threshold', 'HG-MG threshold', 'MG-LG threshold', 'MG-LG threshold']]\n",
    "    for qm in res.keys():\n",
    "\n",
    "        data = np.copy(res[qm][const])\n",
    "        if const == 'ThresholdsDark':\n",
    "            data[...,0][res[qm]['BadPixelsDark'][...,0]>0] = np.nan\n",
    "            data[...,1][res[qm]['BadPixelsDark'][...,1]>0] = np.nan\n",
    "        else:\n",
    "            data[res[qm]['BadPixelsDark']>0] = np.nan\n",
    "\n",
    "        if old_const[const] is not None and old_const['BadPixelsDark'] is not None:\n",
    "            dataold = np.copy(old_const[const])\n",
    "            if const == 'ThresholdsDark':\n",
    "                dataold[...,0][old_const['BadPixelsDark'][...,0]>0] = np.nan\n",
    "                dataold[...,1][old_const['BadPixelsDark'][...,1]>0] = np.nan\n",
    "            else:\n",
    "                dataold[old_const['BadPixelsDark']>0] = np.nan\n",
    "\n",
    "        f_list = [np.nanmedian, np.nanmean, np.nanstd, np.nanmin, np.nanmax]\n",
    "        n_list = ['Median', 'Mean', 'Std', 'Min', 'Max']\n",
    "\n",
    "        for i, f in enumerate(f_list):\n",
    "            line = [n_list[i]]\n",
    "            for gain in range(3):\n",
    "                # Compare only 3 threshold gain-maps\n",
    "                if gain == 2 and const == 'ThresholdsDark':\n",
    "                    continue\n",
    "                line.append('{:6.1f}'.format(f(data[...,gain])))\n",
    "                if old_const[const] is not None and old_const['BadPixelsDark'] is not None:\n",
    "                    line.append('{:6.1f}'.format(f(dataold[...,gain])))\n",
    "                else:\n",
    "                    line.append('-')\n",
    "\n",
    "            table.append(line)\n",
    "\n",
    "    display(Markdown('### {} [ADU], good pixels only ###'.format(const)))\n",
    "    md = display(Latex(tabulate.tabulate(table, tablefmt='latex', headers=header)))  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 1
}