From 4ee24cf036c95ab7250a7b2a5e3dff358116e47b Mon Sep 17 00:00:00 2001
From: Steffen Hauf <haufs@max-exfl056.desy.de>
Date: Fri, 17 Nov 2017 14:18:42 +0100
Subject: [PATCH] Add flat field eval

---
 .../Characterize_AGIPD_Gain_FlatFields.ipynb  | 702 ++++++++++++++++++
 1 file changed, 702 insertions(+)
 create mode 100644 AGIPD/Characterize_AGIPD_Gain_FlatFields.ipynb

diff --git a/AGIPD/Characterize_AGIPD_Gain_FlatFields.ipynb b/AGIPD/Characterize_AGIPD_Gain_FlatFields.ipynb
new file mode 100644
index 000000000..745c96f9d
--- /dev/null
+++ b/AGIPD/Characterize_AGIPD_Gain_FlatFields.ipynb
@@ -0,0 +1,702 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Gain Characterization (Flat Fields) #\n",
+    "\n",
+    "The following code characterizes the gain of the AGIPD detector from flat field data, i.e. data with X-rays of defined intensity. This data should fullfil the following requirements:\n",
+    "\n",
+    "* intensity should be such that single photon peaks are visible\n",
+    "* data for all modules should be present\n",
+    "* no shadowing should occur on any of the modules\n",
+    "* each pixel should have at minimum arround 100 events per photon peak per memory cell\n",
+    "* if central beam edges are visible, they should not be significantly more intense\n",
+    "\n",
+    "Characterization is done by a weighted average algorithm, which evaluates the peak locations for all pixels\n",
+    "and memory cells of a given module. These locations are then fitted to a linear function of the average peak\n",
+    "location in each module, such that it yield a relative gain correction."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 5,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Disabled GPU usage after pyCuda import failed!\n",
+      "Using Cython were available\n"
+     ]
+    }
+   ],
+   "source": [
+    "# std library imports\n",
+    "from functools import partial\n",
+    "import h5py\n",
+    "import os\n",
+    "\n",
+    "# numpy and matplot lib specific\n",
+    "import numpy as np\n",
+    "import matplotlib.pyplot as plt\n",
+    "%matplotlib inline\n",
+    "\n",
+    "# parallel processing via ipcluster\n",
+    "# make sure a cluster is running with ipcluster start --n=32, give it a while to start\n",
+    "from ipyparallel import Client\n",
+    "client = Client(profile=\"noDB\")\n",
+    "view = client[:]\n",
+    "view.use_dill()\n",
+    "\n",
+    "# pyDetLib imports\n",
+    "import XFELDetAna.xfelpycaltools as xcal\n",
+    "import XFELDetAna.xfelpyanatools as xana\n",
+    "from XFELDetAna.util import env\n",
+    "env.iprofile = \"noDB\"\n",
+    "\n",
+    "# usually no need to change these lines\n",
+    "sensor_size = [128, 512]\n",
+    "block_size = [64, 64]\n",
+    "QUADRANTS = 4\n",
+    "MODULES_PER_QUAD = 4\n",
+    "DET_FILE_INSET = \"AGIPD\"\n",
+    "IL_MODE = False # or True for interlaced data\n",
+    "\n",
+    "\n",
+    "# the following lines should be adjusted depending on data\n",
+    "in_folder = \"/gpfs/exfel/exp/SPB/201701/p002038/raw/\"\n",
+    "out_folder = \"/gpfs/exfel/data/scratch/haufs/gain_Data\"\n",
+    "runs = [\"r0051\", \"r0052\", \"r0053\"] # the runs the flat field data is in\n",
+    "\n",
+    "# change this to the offsets that shoudl be used\n",
+    "offset_store = \"/gpfs/exfel/exp/SPB/201701/p002038/proc/calibration/dark/agipd_offset_store_r0036_r0022_r0023.h5\"\n",
+    "\n",
+    "# cells in raw data\n",
+    "max_cells = 74\n",
+    "# actual memory cells: max_cells//2 if AGIPD is in interleaved mode\n",
+    "memory_cells = 74\n",
+    "# sequences to take data from\n",
+    "sequences = range(4)\n",
+    "# modules to characterize\n",
+    "modules = range(8,16)\n",
+    "\n",
+    "# these lines can usually stay as is\n",
+    "fbase = \"{}/{{}}/RAW-{{}}-AGIPD{{:02d}}-S{{:05d}}.h5\".format(in_folder)\n",
+    "gains = np.arange(3)\n",
+    "cells = np.arange(max_cells)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "For the characterization offset maps for each module are needed. In the following these are read in"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "\n",
+    "store_file = h5py.File(offset_store, \"r\")\n",
+    "offset_g = {}\n",
+    "noise_g = {}\n",
+    "thresholds_g = {}\n",
+    "for i in modules:\n",
+    "    qm = \"Q{}M{}\".format(i//4+1, i%4+1)\n",
+    "    offset_g[qm] = np.array(store_file[\"{}/Offset/0/data\".format(qm)])\n",
+    "    noise_g[qm] = np.array(store_file[\"{}/Noise/0/data\".format(qm)])\n",
+    "    thresholds_g[qm] = np.array(store_file[\"{}/Threshold/0/data\".format(qm)])\n",
+    "store_file.close()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Initial peak estimates ##\n",
+    "\n",
+    "The following parallel code will read in the flat field runs, offset correct them and then, bin data of each\n",
+    "module into histograms.\n",
+    "\n",
+    "These histograms should then be inspected for initial peak estimates for the single, double, ... photon peaks,\n",
+    "as well es estimates of the relative hights of these peaks to one and another."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "def hist_single_module(fbase, runs, sequences, sensor_size, memory_cells, block_size,\n",
+    "                       il_mode, inp):\n",
+    "    \"\"\" This function calculates a per-pixel histogram for a single module\n",
+    "    \n",
+    "    Runs and sequences give the data to calculate histogram from\n",
+    "    \"\"\"\n",
+    "    channel, offset, thresholds = inp\n",
+    "    \n",
+    "    import XFELDetAna.xfelpycaltools as xcal\n",
+    "    import numpy as np\n",
+    "    import h5py\n",
+    "    from XFELDetAna.util import env\n",
+    "    env.iprofile = \"noDB\"\n",
+    "\n",
+    "    \n",
+    "    # function needs to be inline for parallell processing\n",
+    "    def read_fun(filename, channel):\n",
+    "        \"\"\" A reader function used by pyDetLib\n",
+    "        \"\"\"\n",
+    "        infile = h5py.File(filename, \"r\", driver=\"core\")\n",
+    "        status = np.squeeze(infile[\"/INDEX/SPB_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/status\".format(channel)])\n",
+    "        last = np.squeeze(infile[\"/INDEX/SPB_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/last\".format(channel)])\n",
+    "        last_index = int(last[status != 0][-1])\n",
+    "        im = np.array(infile[\"/INSTRUMENT/SPB_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/data\".format(channel)][:last_index+1,...])    \n",
+    "        carr = infile[\"/INSTRUMENT/SPB_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/cellId\".format(channel)][:last_index+1]\n",
+    "        cells = np.squeeze(np.array(carr))\n",
+    "        infile.close()\n",
+    "    \n",
+    "        if il_mode:\n",
+    "            ga = im[1::2, 0, ...]\n",
+    "            im = im[0::2, 0, ...].astype(np.float32)\n",
+    "        else:\n",
+    "            ga = im[:, 1, ...]\n",
+    "            im = im[:, 0, ...].astype(np.float32)\n",
+    "\n",
+    "        im = np.rollaxis(im, 2)\n",
+    "        im = np.rollaxis(im, 2, 1)\n",
+    "\n",
+    "        ga = np.rollaxis(ga, 2)\n",
+    "        ga = np.rollaxis(ga, 2, 1)\n",
+    "        return im, ga, cells\n",
+    "    \n",
+    "    offset_cor = xcal.OffsetCorrection(sensor_size,\n",
+    "                                       offset,\n",
+    "                                       nCells=memory_cells,\n",
+    "                                       blockSize=block_size,\n",
+    "                                       gains=[0,1,2])\n",
+    "    offset_cor.mapper = offset_cor._view.map_sync\n",
+    "    offset_cor.debug() # force non-parallel processing since outer function will run concurrently\n",
+    "    hist_calc = xcal.HistogramCalculator(sensor_size,\n",
+    "                                         bins=4000,\n",
+    "                                         range=(-4000, 8000),\n",
+    "                                         blockSize=block_size)\n",
+    "    hist_calc.mapper = hist_calc._view.map_sync\n",
+    "    hist_calc.debug() # force non-parallel processing since outer function will run concurrently\n",
+    "    for run in runs:\n",
+    "        for seq in sequences:\n",
+    "            fname = fbase.format(run, run.upper(), channel, seq)\n",
+    "            d, ga, c = read_fun(fname, channel)\n",
+    "            # we need to do proper gain thresholding\n",
+    "            g = np.zeros(ga.shape, np.uint8)\n",
+    "            g[...] = 2\n",
+    "            for cc in range(g.shape[2]//memory_cells):\n",
+    "                tga = ga[...,cc*memory_cells:(cc+1)*memory_cells]\n",
+    "                tg = g[...,cc*memory_cells:(cc+1)*memory_cells]\n",
+    "                tg[tga < thresholds[...,1]] = 1\n",
+    "                tg[tga < thresholds[...,0]] = 0\n",
+    "                g[...,cc*memory_cells:(cc+1)*memory_cells] = tg\n",
+    "            d = offset_cor.correct(d, cellTable=c, gainMap=g)\n",
+    "            hist_calc.fill(d)\n",
+    "    h, e, c, _ = hist_calc.get()\n",
+    "    return h, e, c\n",
+    "\n",
+    "inp = []\n",
+    "for i in modules:\n",
+    "    qm = \"Q{}M{}\".format(i//4+1, i%4+1)\n",
+    "    inp.append((i, offset_g[qm], thresholds_g[qm]))\n",
+    "    \n",
+    "p = partial(hist_single_module, fbase, runs, sequences,\n",
+    "            sensor_size, memory_cells, block_size, IL_MODE)\n",
+    "res_uncorr = view.map_sync(p, inp)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "We inspect the resulting histograms for the estimates. Modules should look roughly the same as no significant deviation is to be expected. Non-function modules and artifacts of single pixels may be visible in the histogram."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false,
+    "scrolled": true
+   },
+   "outputs": [],
+   "source": [
+    "d = []\n",
+    "for i, r in enumerate(res_uncorr):\n",
+    "    qm = \"Q{}M{}\".format(i//4+1, i%4+1)\n",
+    "    h, e, c = r \n",
+    "    d.append({\n",
+    "            'x': c, \n",
+    "            'y': h,\n",
+    "            'drawstyle': 'steps-mid'\n",
+    "        })\n",
+    "    \n",
+    "fig = xana.simplePlot(d, y_log=True,\n",
+    "                      figsize=\"2col\",\n",
+    "                      aspect=2,\n",
+    "                      x_range=(0, 200),\n",
+    "                      x_label=\"Intensity (ADU)\",\n",
+    "                      y_label=\"Counts\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# these should be quite stable\n",
+    "peak_estimates = [0, 55, 110, 165, 220]\n",
+    "peak_heights = [5e7, 5e6, 1e6, 5e5, 1e5]\n",
+    "peak_sigma = [5., 5., 5., 5., 5.]"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "collapsed": true
+   },
+   "source": [
+    "## Calculate relative gain per module ##\n",
+    "\n",
+    "Using the so obtained estimates, we calculate the relative gain per module. For this we use the weighted average method implemented in pyDetLib.\n",
+    "\n",
+    "Since for current AGIPD data taking only every second memory cells sees X-rays we account for this. For technical reasons, the subsequent cell will have the same constants copied, which are irrelevant as not X-rays are to be expected in these cells."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "def relgain_single_module(fbase, runs, sequences, peak_estimates,\n",
+    "                          peak_heights, peak_sigma, memory_cells, sensor_size,\n",
+    "                          block_size, il_mode, inp):\n",
+    "    \"\"\" A function for calculated the relative gain of a single AGIPD module\n",
+    "    \"\"\"\n",
+    "    \n",
+    "    # import needed inline for parallel processing\n",
+    "    import XFELDetAna.xfelpycaltools as xcal\n",
+    "    import numpy as np\n",
+    "    import h5py\n",
+    "    from XFELDetAna.util import env\n",
+    "    env.iprofile = \"noDB\"\n",
+    "    \n",
+    "    channel, offset, thresholds, noise = inp\n",
+    "    \n",
+    "    # function needs to be inline for parallell processing\n",
+    "    def read_fun(filename, channel):\n",
+    "        \"\"\" A reader function used by pyDetLib\n",
+    "        \"\"\"\n",
+    "        infile = h5py.File(filename, \"r\", driver=\"core\")\n",
+    "        status = np.squeeze(infile[\"/INDEX/SPB_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/status\".format(channel)])\n",
+    "        last = np.squeeze(infile[\"/INDEX/SPB_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/last\".format(channel)])\n",
+    "        last_index = int(last[status != 0][-1])\n",
+    "        im = np.array(infile[\"/INSTRUMENT/SPB_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/data\".format(channel)][:last_index+1,...])    \n",
+    "        carr = infile[\"/INSTRUMENT/SPB_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/cellId\".format(channel)][:last_index+1]\n",
+    "        cells = np.squeeze(np.array(carr))\n",
+    "        infile.close()\n",
+    "\n",
+    "        if il_mode:\n",
+    "            ga = im[1::2, 0, ...]\n",
+    "            im = im[0::2, 0, ...].astype(np.float32)\n",
+    "        else:\n",
+    "            ga = im[:, 1, ...]\n",
+    "            im = im[:, 0, ...].astype(np.float32)\n",
+    "            \n",
+    "        im = np.rollaxis(im, 2)\n",
+    "        im = np.rollaxis(im, 2, 1)\n",
+    "\n",
+    "        ga = np.rollaxis(ga, 2)\n",
+    "        ga = np.rollaxis(ga, 2, 1)\n",
+    "        return im, ga, cells\n",
+    "    \n",
+    "    offset_cor = xcal.OffsetCorrection(sensor_size, offset, nCells=memory_cells,\n",
+    "                                       blockSize=block_size, gains=[0,1,2])\n",
+    "    offset_cor.mapper = offset_cor._view.map_sync\n",
+    "\n",
+    "    rel_gain = xcal.GainMapCalculator(sensor_size,\n",
+    "                                      peak_estimates,\n",
+    "                                      peak_sigma,\n",
+    "                                      nCells=memory_cells//2,\n",
+    "                                      peakHeights = peak_heights,\n",
+    "                                      noiseMap=noise[...,::2],\n",
+    "                                      deviationType=\"relative\")\n",
+    "    rel_gain.mapper = rel_gain._view.map_sync\n",
+    "    for run in runs:\n",
+    "        for seq in sequences:\n",
+    "            fname = fbase.format(run, run.upper(), channel, seq)\n",
+    "            d, ga, c = read_fun(fname, channel)\n",
+    "            # we need to do proper gain thresholding\n",
+    "            g = np.zeros(ga.shape, np.uint8)\n",
+    "            g[...] = 2\n",
+    "            for cc in range(g.shape[2]//memory_cells):\n",
+    "                tga = ga[...,cc*memory_cells:(cc+1)*memory_cells]\n",
+    "                tg = g[...,cc*memory_cells:(cc+1)*memory_cells]\n",
+    "                tg[tga < thresholds[...,1]] = 1\n",
+    "                tg[tga < thresholds[...,0]] = 0\n",
+    "                g[...,cc*memory_cells:(cc+1)*memory_cells] = tg\n",
+    "            d = offset_cor.correct(d, cellTable=c, gainMap=g)\n",
+    "            rel_gain.fill(d[...,::2])\n",
+    "    \n",
+    "    gain_map = rel_gain.get()\n",
+    "    gain_map_func = rel_gain.getUsingFunc(inverse=False)\n",
+    "    pks, stds = rel_gain.getRawPeaks()\n",
+    "    return gain_map, pks, stds, gain_map_func\n",
+    "\n",
+    "inp = []\n",
+    "for i in modules:\n",
+    "    qm = \"Q{}M{}\".format(i//4+1, i%4+1)\n",
+    "    inp.append((i, offset_g[qm], thresholds_g[qm], noise_g[qm][...,0]))\n",
+    "    \n",
+    "p = partial(relgain_single_module, fbase, runs, sequences,\n",
+    "            peak_estimates, peak_heights, peak_sigma, memory_cells,\n",
+    "            sensor_size, block_size, IL_MODE)\n",
+    "res_gain = list(map(p, inp)) # don't run concurently as inner function are parelllized"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "collapsed": false
+   },
+   "source": [
+    "Finally, we inspect the results, by plotting the number of entries, peak locations and resulting gain maps for each peak. In the course of doing so, we identify bad pixels by either having 0 entries for a peak, or having `nan` values for the peak location."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false,
+    "scrolled": false
+   },
+   "outputs": [],
+   "source": [
+    "from mpl_toolkits.axes_grid1 import AxesGrid\n",
+    "\n",
+    "\n",
+    "gain_m = {}\n",
+    "flatsff = {}\n",
+    "gainoff_g = {}\n",
+    "entries_g = {}\n",
+    "mask_g = {}\n",
+    "cell_to_preview = 12\n",
+    "masks_eval_peaks = [1, 2]\n",
+    "global_eval_peaks = [1]\n",
+    "global_meds = {}\n",
+    "\n",
+    "for i, r in enumerate(res_gain):\n",
+    "    qm = \"Q{}M{}\".format(i//4+1, i%4+1)\n",
+    "    gain, pks, std, gfunc = r\n",
+    "    gains, errors, chisq, valid, max_dev, stats = gfunc\n",
+    "    _, entries, stds, sow = gain\n",
+    "    gain_db = np.zeros((gains.shape[0], gains.shape[1], memory_cells))\n",
+    "    gain_mdb = np.zeros((gains.shape[0], gains.shape[1], memory_cells))\n",
+    "    entries_db = np.zeros((gains.shape[0], gains.shape[1], memory_cells, 5))    \n",
+    "    gainoff_db = np.zeros((gains.shape[0], gains.shape[1], memory_cells))\n",
+    "    mask_db = np.zeros((gains.shape[0], gains.shape[1], memory_cells), np.uint8)\n",
+    "    \n",
+    "    # take care we have data for all memory cells\n",
+    "    for j in range(2):\n",
+    "        gain_mdb[...,j::2] = gains[...,0]        \n",
+    "        gainoff_db[...,j::2] = gains[...,1]\n",
+    "        entries_db[...,j::2,:] = entries[...]\n",
+    "\n",
+    "    gainoff_g[qm] = gainoff_db\n",
+    "    gain_m[qm] = gain_mdb\n",
+    "    entries_g[qm] = entries_db\n",
+    "    \n",
+    "    # create a mask for unregular pixels\n",
+    "    # first bit set if first peak has nan entries\n",
+    "    for pk in masks_eval_peaks:\n",
+    "        mask_db[(~np.isfinite(pks[...,pk])) | (np.abs(1-pks[...,pk]/np.nanmedian(pks[...,pk]) > 0.8) )] += 1\n",
+    "    # second bit set if entries are 0 for first peak\n",
+    "    mask_db[entries[...,1] == 0] += 2\n",
+    "    # third bit set if entries of a given adc show significant noise\n",
+    "    stds = []\n",
+    "    for ii in range(8):\n",
+    "        for jj in range(8):\n",
+    "            stds.append(np.std(entries_db[ii*16:(ii+1)*16,jj*64+2:(jj+1)*64-2,:,1], axis=(0,1)))\n",
+    "    avg_stds = np.median(np.array(stds), axis=0)\n",
+    "    \n",
+    "    for ii in range(8):\n",
+    "        for jj in range(8):\n",
+    "            std = np.std(entries_db[ii*16:(ii+1)*16,jj*64+2:(jj+1)*64-2,:,1], axis=(0,1))\n",
+    "            if np.any(std > 10*avg_stds):\n",
+    "                mask_db[ii*16:(ii+1)*16,jj*64:(jj+1)*64,std > avg_stds] +=4\n",
+    "                \n",
+    "    mask_g[qm] = mask_db\n",
+    "    \n",
+    "    flat = np.zeros((gains.shape[0], gains.shape[1], memory_cells//2, 3))\n",
+    "    for j in range(2,5):\n",
+    "        flat[...,j-2] = np.mean(entries[...,j]/np.mean(entries[...,j]))\n",
+    "    flat = np.mean(flat, axis=3)\n",
+    "    flat_db = np.zeros((gains.shape[0], gains.shape[1], memory_cells))\n",
+    "    for j in range(2):\n",
+    "        flat_db[...,j::2] = flat\n",
+    "    flatsff[qm] = flat_db\n",
+    "    \n",
+    "    global_meds[qm] = np.nanmedian(pks[...,global_eval_peaks][np.max(mask_db, axis=2) != 0])\n",
+    "        \n",
+    "    \n",
+    "    fig = plt.figure(figsize=(10,10))\n",
+    "    grid = AxesGrid(fig, 111,  \n",
+    "                    nrows_ncols=(5, 2),\n",
+    "                    axes_pad=0.0,\n",
+    "                    share_all=True,\n",
+    "                    label_mode=\"L\",\n",
+    "                    cbar_location=\"top\",\n",
+    "                    cbar_mode=\"each\",\n",
+    "                    cbar_size=\"7%\",\n",
+    "                    cbar_pad=\"2%\",\n",
+    "                    )\n",
+    "\n",
+    "    for j in range(5):\n",
+    "        im = grid[2*j].imshow(entries[...,cell_to_preview,j],\n",
+    "                              interpolation=\"nearest\", vmin=0, vmax=500)\n",
+    "        grid.cbar_axes[2*j].colorbar(im)\n",
+    "        im = grid[2*j+1].imshow(pks[...,cell_to_preview,j], interpolation=\"nearest\", vmin=0, vmax=400)\n",
+    "        grid.cbar_axes[2*j+1].colorbar(im)\n",
+    "    fig.savefig(\"{}/entries_peaks_mod{:02d}.png\".format(out_folder, i))\n",
+    "        \n",
+    "    fig = plt.figure(figsize=(10,5))\n",
+    "    ax = fig.add_subplot(111)\n",
+    "    im = ax.imshow(gains[...,cell_to_preview,0], interpolation=\"nearest\", vmin=0.95, vmax=1.05)\n",
+    "    fig.colorbar(im)\n",
+    "    fig.savefig(\"{}/gain_m_mod{:02d}.png\".format(out_folder, i))\n",
+    "    \n",
+    "    fig = plt.figure(figsize=(10,5))\n",
+    "    ax = fig.add_subplot(111)\n",
+    "    im = ax.imshow(gains[...,cell_to_preview,1], interpolation=\"nearest\", vmin=-2, vmax=2)\n",
+    "    fig.colorbar(im)\n",
+    "    fig.savefig(\"{}/gain_b_mod{:02d}.png\".format(out_folder, i))\n",
+    "    \n",
+    "    fig = plt.figure(figsize=(10,5))\n",
+    "    ax = fig.add_subplot(111)\n",
+    "    im = ax.imshow(mask_db[...,cell_to_preview], interpolation=\"nearest\")\n",
+    "    fig.colorbar(im)\n",
+    "    fig.savefig(\"{}/mask_mod{:02d}.png\".format(out_folder, i))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "# normalize accross modules\n",
+    "global_med = np.median(np.array(list(global_meds.values())))"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Here we save the relevant constants"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "ofile = \"{}/agipd_gain_store_{}_8_2.h5\".format(out_folder, \"_\".join(runs))\n",
+    "store_file = h5py.File(ofile, \"w\")\n",
+    "for i, r in enumerate(res_gain):\n",
+    "    qm = \"Q{}M{}\".format(i//4+1, i%4+1)\n",
+    "    gain, pks, std, gfunc = r\n",
+    "    gains, errors, chisq, valid, max_dev, stats = gfunc\n",
+    "    gainmap, entires, stds, sow = gain\n",
+    "    store_file[\"/{}/Gain/0/data\".format(qm)] = gain_m[qm]*(global_med/global_meds[qm])\n",
+    "    store_file[\"/{}/GainOffset/0/data\".format(qm)] = gainoff_g[qm]    \n",
+    "    store_file[\"/{}/Flat/0/data\".format(qm)] = flatsff[qm]\n",
+    "    store_file[\"/{}/Entries/0/data\".format(qm)] = entries_g[qm]  \n",
+    "    store_file[\"/{}/BadPixels/0/data\".format(qm)] = mask_g[qm]  \n",
+    "store_file.close()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Sanity check ##\n",
+    "\n",
+    "Finally, we perform a correction of the data used to derive the gain constants with said constants. We expect that the histograms of all modules now align."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false,
+    "scrolled": true
+   },
+   "outputs": [],
+   "source": [
+    "sequences = [0]\n",
+    "\n",
+    "def hist_single_module(fbase, runs, sequences, il_mode, inp):\n",
+    "    channel, offset, thresholds, relgain, relgainoff = inp\n",
+    "    \n",
+    "    import XFELDetAna.xfelpycaltools as xcal\n",
+    "    import numpy as np\n",
+    "    import h5py\n",
+    "    import copy\n",
+    "    from XFELDetAna.util import env\n",
+    "    env.iprofile = \"noDBA14\"\n",
+    "    sensor_size = [128, 512]\n",
+    "    memory_cells = 74\n",
+    "    block_size = [64, 64]\n",
+    "    def read_fun(filename, channel):\n",
+    "        \n",
+    "        infile = h5py.File(filename, \"r\", driver=\"core\")\n",
+    "        status = np.squeeze(infile[\"/INDEX/SPB_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/status\".format(channel)])\n",
+    "        last = np.squeeze(infile[\"/INDEX/SPB_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/last\".format(channel)])\n",
+    "        last_index = int(last[status != 0][-1])\n",
+    "        im = np.array(infile[\"/INSTRUMENT/SPB_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/data\".format(channel)][:last_index+1,...])    \n",
+    "        carr = infile[\"/INSTRUMENT/SPB_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/cellId\".format(channel)][:last_index+1]\n",
+    "        cells = np.squeeze(np.array(carr))\n",
+    "        infile.close()\n",
+    "\n",
+    "        if il_mode:\n",
+    "            ga = im[1::2, 0, ...]\n",
+    "            im = im[0::2, 0, ...].astype(np.float32)\n",
+    "        else:\n",
+    "            ga = im[:, 1, ...]\n",
+    "            im = im[:, 0, ...].astype(np.float32)\n",
+    "        im = np.rollaxis(im, 2)\n",
+    "        im = np.rollaxis(im, 2, 1)\n",
+    "\n",
+    "        ga = np.rollaxis(ga, 2)\n",
+    "        ga = np.rollaxis(ga, 2, 1)\n",
+    "        return im, ga, cells\n",
+    "    \n",
+    "    offset_cor = xcal.OffsetCorrection(sensor_size, offset, nCells=memory_cells, blockSize=block_size, gains=[0,1,2])\n",
+    "    offset_cor.debug()\n",
+    "    \n",
+    "    hist_calc = xcal.HistogramCalculator(sensor_size, bins=2000, range=(0, 2000), blockSize=block_size)\n",
+    "    hist_calc.debug()\n",
+    "\n",
+    "    hist_calc_uncorr = xcal.HistogramCalculator(sensor_size, bins=2000, range=(0, 2000), blockSize=block_size)\n",
+    "    hist_calc_uncorr.debug()\n",
+    "    \n",
+    "    \n",
+    "    for run in runs:\n",
+    "        for seq in sequences:\n",
+    "            fname = fbase.format(run, run.upper(), channel, seq)\n",
+    "            d, ga, c = read_fun(fname, channel)\n",
+    "            g = np.zeros(ga.shape, np.uint8)\n",
+    "            g[...] = 2\n",
+    "            for cc in range(g.shape[2]//memory_cells):\n",
+    "                tga = ga[...,cc*memory_cells:(cc+1)*memory_cells]\n",
+    "                tg = g[...,cc*memory_cells:(cc+1)*memory_cells]\n",
+    "                tg[tga < thresholds[...,1]] = 1\n",
+    "                tg[tga < thresholds[...,0]] = 0\n",
+    "                g[...,cc*memory_cells:(cc+1)*memory_cells] = tg\n",
+    "            d = offset_cor.correct(d, cellTable=c, gainMap=g)\n",
+    "    \n",
+    "            hist_calc_uncorr.fill(d)\n",
+    "            for cc in range(g.shape[2]//memory_cells):\n",
+    "                td = d[...,cc*memory_cells:(cc+1)*memory_cells]\n",
+    "                td = (td - relgainoff)/relgain\n",
+    "                d[...,cc*memory_cells:(cc+1)*memory_cells] = td\n",
+    "            hist_calc.fill(d)    \n",
+    "    \n",
+    "    h, e, c, _ = hist_calc.get()\n",
+    "    hu  = hist_calc_uncorr.get()\n",
+    "    return h, e, c, hu[0]\n",
+    "\n",
+    "inp = []\n",
+    "for i in modules:\n",
+    "    qm = \"Q{}M{}\".format(i//4+1, i%4+1)\n",
+    "    \n",
+    "    inp.append((i, offset_g[qm], thresholds_g[qm], gain_m[qm]/(global_med/global_meds[qm]), gainoff_g[qm]))\n",
+    "    \n",
+    "p = partial(hist_single_module, fbase, runs, sequences, IL_MODE)\n",
+    "res = view.map_sync(p, inp)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "d = []\n",
+    "for i, r in enumerate(res):\n",
+    "    qm = \"Q{}M{}\".format(i//4+1, i%4+1)\n",
+    "    \n",
+    "    h, e, c, hu = r \n",
+    "    d.append({\n",
+    "            'x': c, \n",
+    "            'y': h,\n",
+    "            'drawstyle': 'steps-mid'\n",
+    "        })\n",
+    "    \n",
+    "    \n",
+    "fig = xana.simplePlot(d, y_log=True, figsize=\"2col\", aspect=2, x_range=(0, 400))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": []
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.4.3"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 1
+}
-- 
GitLab