diff --git a/cal_tools/cal_tools/tools.py b/cal_tools/cal_tools/tools.py
index f4f7f0939443f4d1a505636c160794090e993ba7..cfea5e7a8b973ac043cfd0fb5db080f55fc3d170 100644
--- a/cal_tools/cal_tools/tools.py
+++ b/cal_tools/cal_tools/tools.py
@@ -64,7 +64,7 @@ def combine_report(run_path, calibration):
                 with open("{}/{}.rst".format(sphinx_path, group),
                           "a") as gfile:
                     if conc_param != "None":
-                        title = "{}. {} = {}".format(calibration, name_param,
+                        title = "{}, {} = {}".format(calibration, name_param,
                                                      conc_param)
                         gfile.write(title + "\n")
                         gfile.write("=" * len(title) + "\n")
@@ -160,7 +160,7 @@ def make_timing_summary(run_path, joblist):
                 break
 
     tmpl = Template('''
-                    Timing summary
+                    Runtime summary
                     ==============
                     
                     .. math::
diff --git a/notebooks/LPD/LPDChar_Darks_NBC.ipynb b/notebooks/LPD/LPDChar_Darks_NBC.ipynb
index f6fff191a548c89c3b889d0dca4bc7f35a6656e0..93a53fddc8f7499c0dc15f9e2017525e6dc1116a 100644
--- a/notebooks/LPD/LPDChar_Darks_NBC.ipynb
+++ b/notebooks/LPD/LPDChar_Darks_NBC.ipynb
@@ -4,13 +4,49 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "# Offset Characterization #\n",
+    "# Offset, Noise and Dead Pixels Characterization #\n",
     "\n",
-    "Author: S. Hauf, Version: 0.1\n",
+    "Author: M. Karnevskiy, S. Hauf\n",
     "\n",
-    "This notebook allows you to recharacterize dark images to get a new offset map. It will correctly handle veto settings, but note that if you veto cells you will not be able to use these offsets for runs with different veto settings - vetoed cells will have zero offset.\n",
+    "This notebook performs re-characterize of dark images to derive offset, noise and bad-pixel maps. All three types of constants are evaluated per-pixel and per-memory cell.\n",
     "\n",
-    "Usually you will only need to alter the cell directly below this comment."
+    "The notebook will correctly handle veto settings, but note that if you veto cells you will not be able to use these offsets for runs with different veto settings - vetoed cells will have zero offset.\n",
+    "\n",
+    "The evaluated calibration constants are stored locally and injected in the calibration data base.\n",
+    "\n",
+    "**The offset** ($O$) is defined as the median ($M$) of the dark signal ($Ds$) over trains ($t$) for a given pixel ($x,y$) and memory cell ($c$). \n",
+    "\n",
+    "**The noise** $N$ is the standard deviation $\\sigma$ of the dark signal.\n",
+    "\n",
+    "$$ O_{x,y,c} = M(Ds)_{t} ,\\,\\,\\,\\,\\,\\, N_{x,y,c} = \\sigma(Ds)_{t}$$\n",
+    "\n",
+    "**The bad pixel** mask is encoded as a bit mask.\n",
+    "\n",
+    "**\"OFFSET_OUT_OF_THRESHOLD\":**\n",
+    "\n",
+    "Offset outside of bounds:\n",
+    "\n",
+    "$$M(O)_{x,y} - \\sigma(O)_{x,y} * \\mathrm{thresholds\\_offset\\_sigma} < O < M(O)_{x,y} + \\sigma(O)_{x,y} * \\mathrm{thresholds\\_offset\\_sigma} $$\n",
+    "\n",
+    "or offset outside of hard limits\n",
+    "\n",
+    "$$ \\mathrm{thresholds\\_offset\\_hard}_\\mathrm{low} < O < \\mathrm{thresholds\\_offset\\_hard}_\\mathrm{high} $$\n",
+    "\n",
+    "**\"NOISE_OUT_OF_THRESHOLD\":**\n",
+    "\n",
+    "Noise outside of bounds:\n",
+    "\n",
+    "$$M(N)_{x,y} - \\sigma(N)_{x,y} * \\mathrm{thresholds\\_noise\\_sigma} < N < M(N)_{x,y} + \\sigma(N)_{x,y} * \\mathrm{thresholds\\_noise\\_sigma} $$\n",
+    "\n",
+    "or noise outside of hard limits\n",
+    "\n",
+    "$$\\mathrm{thresholds\\_noise\\_hard}_\\mathrm{low} < N < \\mathrm{thresholds\\_noise\\_hard}_\\mathrm{high} $$\n",
+    "\n",
+    "**\"OFFSET_NOISE_EVAL_ERROR\":**\n",
+    "\n",
+    "Offset and Noise both not $nan$ values \n",
+    "\n",
+    "Values: $\\mathrm{thresholds\\_offset\\_sigma}$, $\\mathrm{thresholds\\_offset\\_hard}$, $\\mathrm{thresholds\\_noise\\_sigma}$, $\\mathrm{thresholds\\_noise\\_hard}$ are given as parameters."
    ]
   },
   {
@@ -22,31 +58,31 @@
    "outputs": [],
    "source": [
     "cluster_profile = \"noDB\" # The ipcluster profile to use\n",
-    "in_folder = \"/gpfs/exfel/exp/FXE/201701/p900020/raw/\" # path to input data, required\n",
-    "out_folder = \"/gpfs/exfel/exp/FXE/201830/p900020/proc/calibration/dark/\" # path to output to, required\n",
-    "sequences = [0] # sequence files to evaluate.\n",
-    "modules = [-1] # modules to evaluate, range allowed\n",
+    "in_folder = \"/gpfs/exfel/exp/FXE/201930/p900063/raw\" # path to input data, required\n",
+    "out_folder = \"/gpfs/exfel/data/scratch/karnem/LPD/\" # path to output to, required\n",
+    "sequences = [0] # sequence files to evaluate\n",
+    "modules = [-1] # list of modules to evaluate, RANGE ALLOWED\n",
     "\n",
     "capacitor_setting = 5 # capacitor_setting for which data was taken, required\n",
-    "run_high = 603 # run number in which high gain data was recorded, required\n",
-    "run_med = 604 # run number in which medium gain data was recorded, required\n",
-    "run_low = 605 # run number in which low gain data was recorded, required\n",
+    "run_high = 358 # run number in which high gain data was recorded, required\n",
+    "run_med = 359 # run number in which medium gain data was recorded, required\n",
+    "run_low = 360 # run number in which low gain data was recorded, required\n",
     "\n",
-    "mem_cells = 128 # number of memory cells used\n",
+    "mem_cells = 512 # number of memory cells used\n",
     "local_output = True # output constants locally\n",
     "db_output = True # output constants to database\n",
     "bias_voltage = 250 # detector bias voltage\n",
     "cal_db_interface = \"tcp://max-exfl016:8017\" # the database interface to use\n",
     "\n",
-    "thresholds_offset_sigma = 3.\n",
-    "thresholds_offset_hard = [400, 1500]\n",
-    "thresholds_noise_sigma = 7.\n",
-    "thresholds_noise_hard = [1, 35]\n",
-    "skip_first_ntrains = 10\n",
-    "use_dir_creation_date = True # use the creation date of the directory for database time derivation\n",
-    "instrument = \"FXE\"\n",
-    "ntrains = 300\n",
-    "high_res_badpix_3d = False"
+    "thresholds_offset_sigma = 3. # bad pixel relative threshold in terms of n sigma offset\n",
+    "thresholds_offset_hard = [400, 1500] # bad pixel hard threshold\n",
+    "thresholds_noise_sigma = 7. # bad pixel relative threshold in terms of n sigma noise\n",
+    "thresholds_noise_hard = [1, 35] # bad pixel hard threshold\n",
+    "skip_first_ntrains = 10 # Number of first trains to skip\n",
+    "use_dir_creation_date = False # use the creation date of the directory for database time derivation\n",
+    "instrument = \"FXE\" # instrument name\n",
+    "ntrains = 10 # number of trains to use\n",
+    "high_res_badpix_3d = False # plot bad-pixel summary in high resolution"
    ]
   },
   {
@@ -57,37 +93,60 @@
    },
    "outputs": [],
    "source": [
-    "import warnings\n",
-    "warnings.filterwarnings('ignore')\n",
     "from collections import OrderedDict\n",
+    "import copy\n",
     "from datetime import datetime\n",
+    "from functools import partial\n",
     "import os\n",
+    "import warnings\n",
+    "warnings.filterwarnings('ignore')\n",
+    "\n",
+    "import dateutil.parser\n",
     "import h5py\n",
+    "from ipyparallel import Client\n",
+    "from IPython.display import display, Markdown, Latex\n",
     "import numpy as np\n",
     "import matplotlib\n",
     "matplotlib.use(\"agg\")\n",
+    "import matplotlib.patches as patches\n",
     "import matplotlib.pyplot as plt\n",
     "%matplotlib inline\n",
-    "from ipyparallel import Client\n",
+    "import tabulate\n",
+    "from XFELDetAna.plotting.heatmap import heatmapPlot\n",
+    "from XFELDetAna.plotting.simpleplot import simplePlot\n",
     "\n",
-    "from iCalibrationDB import ConstantMetaData, Constants, Conditions, Detectors, Versions\n",
-    "from cal_tools.tools import gain_map_files, parse_runs, run_prop_seq_from_path, get_notebook_name, get_dir_creation_date\n",
+    "from iCalibrationDB import (ConstantMetaData, Constants, \n",
+    "                            Conditions, Detectors, \n",
+    "                            Versions)\n",
+    "from cal_tools.tools import (gain_map_files, parse_runs, \n",
+    "                             run_prop_seq_from_path, \n",
+    "                             get_notebook_name, \n",
+    "                             get_dir_creation_date, get_from_db)\n",
     "from cal_tools.influx import InfluxLogger\n",
     "from cal_tools.enums import BadPixels\n",
-    "from cal_tools.plotting import show_overview, plot_badpix_3d, create_constant_overview\n",
-    "\n",
+    "from cal_tools.plotting import (show_overview, plot_badpix_3d, \n",
+    "                                create_constant_overview)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
     "client = Client(profile=cluster_profile)\n",
     "view = client[:]\n",
     "view.use_dill()\n",
-    "\n",
     "gains = np.arange(3)\n",
     "max_cells = mem_cells\n",
     "cells = np.arange(max_cells)\n",
+    "gain_names = ['High', 'Medium', 'Low']\n",
     "\n",
     "if modules[0] == -1:\n",
     "    modules = list(range(16))\n",
     "\n",
-    "from collections import OrderedDict\n",
     "gain_runs = OrderedDict()\n",
     "if capacitor_setting == 5:\n",
     "    gain_runs[\"high_5pf\"] = \"r{:04d}\".format(run_high)\n",
@@ -98,7 +157,6 @@
     "    gain_runs[\"med_50pf\"] =  \"r{:04d}\".format(run_med)\n",
     "    gain_runs[\"low_50pf\"] =  \"r{:04d}\".format(run_low)\n",
     "\n",
-    "\n",
     "capacitor_settings = [capacitor_setting]\n",
     "capacitor_settings = ['{}pf'.format(c) for c in capacitor_settings]\n",
     "\n",
@@ -115,7 +173,7 @@
     "logger = InfluxLogger(detector=\"LPD\", instrument=instrument, mem_cells=mem_cells,\n",
     "                      notebook=get_notebook_name(), proposal=prop)\n",
     "    \n",
-    "print(\"Parameters are:\")\n",
+    "display(Markdown('## Evaluated parameters'))\n",
     "print(\"Proposal: {}\".format(prop))\n",
     "print(\"Memory cells: {}/{}\".format(mem_cells, max_cells))\n",
     "print(\"Runs: {}, {}, {}\".format(run_high, run_med, run_low))\n",
@@ -139,28 +197,32 @@
     "    os.makedirs(out_folder)\n",
     "\n",
     "gmf = gain_map_files(in_folder, gain_runs, sequences, DET_FILE_INSET, QUADRANTS, MODULES_PER_QUAD)\n",
-    "gain_mapped_files, total_sequences, total_file_size = gmf\n",
-    "\n",
-    "print(\"Will process at total of {} sequences: {:0.2f} GB of data.\".format(total_sequences, total_file_size))"
+    "gain_mapped_files, total_sequences, total_file_size = gmf"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Data processing"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
    "metadata": {
-    "collapsed": true
+    "collapsed": false
    },
    "outputs": [],
    "source": [
     "# the actual characterization - to not eded this without consultation\n",
-    "import copy\n",
-    "from functools import partial\n",
     "def characterize_module(cells, bp_thresh, skip_first_ntrains, ntrains, inp):\n",
     "    import numpy as np\n",
     "    import copy\n",
     "    import h5py\n",
     "    from cal_tools.enums import BadPixels\n",
-    "    \n",
+    "    import scipy.stats\n",
+    "\n",
     "    def splitOffGainLPD(d):\n",
     "        msk = np.zeros(d.shape, np.uint16)\n",
     "        msk[...] = 0b0000111111111111\n",
@@ -169,13 +231,15 @@
     "        gain = np.bitwise_and(d, msk)//4096\n",
     "        gain[gain > 2] = 2\n",
     "        return data, gain\n",
-    "    \n",
+    "\n",
     "    filename, filename_out, channel = inp\n",
-    "    thresholds_offset_hard, thresholds_offset_sigma, thresholds_noise_hard, thresholds_noise_sigma = bp_thresh \n",
+    "    thresholds_offset_hard, thresholds_offset_sigma, thresholds_noise_hard, thresholds_noise_sigma = bp_thresh\n",
     "\n",
     "    infile = h5py.File(filename, \"r\", driver=\"core\")\n",
-    "    im = np.array(infile[\"/INSTRUMENT/FXE_DET_LPD1M-1/DET/{}CH0:xtdf/image/data\".format(channel)][skip_first_ntrains*cells:skip_first_ntrains*cells+ntrains*cells,...])\n",
-    "    cellid = np.squeeze(np.array(infile[\"/INSTRUMENT/FXE_DET_LPD1M-1/DET/{}CH0:xtdf/image/cellId\".format(channel)][skip_first_ntrains*cells:skip_first_ntrains*cells+ntrains*cells,...]))\n",
+    "    im = np.array(infile[\"/INSTRUMENT/FXE_DET_LPD1M-1/DET/{}CH0:xtdf/image/data\".format(\n",
+    "        channel)][skip_first_ntrains*cells:skip_first_ntrains*cells+ntrains*cells, ...])\n",
+    "    cellid = np.squeeze(np.array(infile[\"/INSTRUMENT/FXE_DET_LPD1M-1/DET/{}CH0:xtdf/image/cellId\".format(\n",
+    "        channel)][skip_first_ntrains*cells:skip_first_ntrains*cells+ntrains*cells, ...]))\n",
     "    infile.close()\n",
     "\n",
     "    im, g = splitOffGainLPD(im[:, 0, ...])\n",
@@ -183,42 +247,50 @@
     "\n",
     "    im = np.rollaxis(im, 2)\n",
     "    im = np.rollaxis(im, 2, 1)\n",
-    "    \n",
     "\n",
     "    offset = np.zeros((im.shape[0], im.shape[1], cells))\n",
     "    noise = np.zeros((im.shape[0], im.shape[1], cells))\n",
+    "    normal_test = np.zeros((im.shape[0], im.shape[1], cells))\n",
     "    for cc in range(cells):\n",
     "        idx = cellid == cc\n",
     "        if np.any(idx):\n",
-    "            \n",
-    "            offset[...,cc] = np.median(im[:,:, idx], axis=2)\n",
-    "            noise[...,cc] = np.std(im[:,:,idx], axis=2)\n",
-    "            \n",
+    "\n",
+    "            offset[..., cc] = np.median(im[:, :, idx], axis=2)\n",
+    "            noise[..., cc] = np.std(im[:, :, idx], axis=2)\n",
+    "            _, normal_test[..., cc] = scipy.stats.normaltest(\n",
+    "                im[:, :, idx], axis=2)\n",
+    "\n",
     "    # bad pixels\n",
     "    bp = np.zeros(offset.shape, np.uint32)\n",
     "    # offset related bad pixels\n",
-    "    offset_mn = np.nanmedian(offset, axis=(0,1))\n",
-    "    offset_std = np.nanstd(offset, axis=(0,1))    \n",
-    "    \n",
+    "    offset_mn = np.nanmedian(offset, axis=(0, 1))\n",
+    "    offset_std = np.nanstd(offset, axis=(0, 1))\n",
+    "\n",
     "    bp[(offset < offset_mn-thresholds_offset_sigma*offset_std) |\n",
     "       (offset > offset_mn+thresholds_offset_sigma*offset_std)] |= BadPixels.OFFSET_OUT_OF_THRESHOLD.value\n",
-    "    bp[(offset < thresholds_offset_hard[0]) | (offset > thresholds_offset_hard[1])] |= BadPixels.OFFSET_OUT_OF_THRESHOLD.value\n",
+    "    bp[(offset < thresholds_offset_hard[0]) | (\n",
+    "        offset > thresholds_offset_hard[1])] |= BadPixels.OFFSET_OUT_OF_THRESHOLD.value\n",
     "    bp[~np.isfinite(offset)] |= BadPixels.OFFSET_NOISE_EVAL_ERROR.value\n",
-    "    \n",
+    "\n",
     "    # noise related bad pixels\n",
-    "    noise_mn = np.nanmedian(noise, axis=(0,1))\n",
-    "    noise_std = np.nanstd(noise, axis=(0,1))    \n",
-    "    \n",
+    "    noise_mn = np.nanmedian(noise, axis=(0, 1))\n",
+    "    noise_std = np.nanstd(noise, axis=(0, 1))\n",
+    "\n",
     "    bp[(noise < noise_mn-thresholds_noise_sigma*noise_std) |\n",
     "       (noise > noise_mn+thresholds_noise_sigma*noise_std)] |= BadPixels.NOISE_OUT_OF_THRESHOLD.value\n",
-    "    bp[(noise < thresholds_noise_hard[0]) | (noise > thresholds_noise_hard[1])] |= BadPixels.NOISE_OUT_OF_THRESHOLD.value\n",
+    "    bp[(noise < thresholds_noise_hard[0]) | (\n",
+    "        noise > thresholds_noise_hard[1])] |= BadPixels.NOISE_OUT_OF_THRESHOLD.value\n",
     "    bp[~np.isfinite(noise)] |= BadPixels.OFFSET_NOISE_EVAL_ERROR.value\n",
-    "        \n",
-    "    return offset, noise, channel, bp\n",
-    "        \n",
+    "\n",
+    "    idx = cellid == 12\n",
+    "    return offset, noise, channel, bp, im[12, 12, idx], normal_test\n",
+    "\n",
+    "\n",
     "offset_g = OrderedDict()\n",
     "noise_g = OrderedDict()\n",
     "badpix_g = OrderedDict()\n",
+    "data_g = OrderedDict()\n",
+    "ntest_g = OrderedDict()\n",
     "\n",
     "gg = 0\n",
     "old_cap = None\n",
@@ -231,38 +303,53 @@
     "        offset_g[cap] = OrderedDict()\n",
     "        noise_g[cap] = OrderedDict()\n",
     "        badpix_g[cap] = OrderedDict()\n",
-    "    \n",
-    "    dones = []    \n",
+    "        data_g[cap] = OrderedDict()\n",
+    "        ntest_g[cap] = OrderedDict()\n",
+    "\n",
+    "    dones = []\n",
     "    inp = []\n",
-    "    \n",
+    "\n",
     "    for i in modules:\n",
-    "        qm = \"Q{}M{}\".format(i//4 +1, i % 4 + 1)    \n",
+    "        qm = \"Q{}M{}\".format(i//4 + 1, i % 4 + 1)\n",
     "        if qm in mapped_files and not mapped_files[qm].empty():\n",
-    "            fname_in = mapped_files[qm].get()            \n",
+    "            fname_in = mapped_files[qm].get()\n",
     "            dones.append(mapped_files[qm].empty())\n",
-    "            \n",
+    "\n",
     "        else:\n",
     "            continue\n",
-    "        fout = os.path.abspath(\"{}/{}\".format(out_folder, (os.path.split(fname_in)[-1]).replace(\"RAW\", \"CORR\")))\n",
+    "        fout = os.path.abspath(\n",
+    "            \"{}/{}\".format(out_folder, (os.path.split(fname_in)[-1]).replace(\"RAW\", \"CORR\")))\n",
+    "        print(\"Process file: \", fout)\n",
     "        inp.append((fname_in, fout, i))\n",
     "    first = False\n",
     "    p = partial(characterize_module, max_cells,\n",
-    "               (thresholds_offset_hard, thresholds_offset_sigma,\n",
-    "                thresholds_noise_hard, thresholds_noise_sigma),\n",
+    "                (thresholds_offset_hard, thresholds_offset_sigma,\n",
+    "                 thresholds_noise_hard, thresholds_noise_sigma),\n",
     "                skip_first_ntrains, ntrains)\n",
     "    results = view.map_sync(p, inp)\n",
     "    for r in results:\n",
-    "        offset, noise, i, bp= r\n",
-    "        qm = \"Q{}M{}\".format(i//4 +1, i % 4 + 1)\n",
+    "        offset, noise, i, bp, data, normal = r\n",
+    "        qm = \"Q{}M{}\".format(i//4 + 1, i % 4 + 1)\n",
     "        if qm not in offset_g[cap]:\n",
-    "            offset_g[cap][qm] = np.zeros((offset.shape[0], offset.shape[1], offset.shape[2], 3))\n",
+    "            offset_g[cap][qm] = np.zeros(\n",
+    "                (offset.shape[0], offset.shape[1], offset.shape[2], 3))\n",
     "            noise_g[cap][qm] = np.zeros_like(offset_g[cap][qm])\n",
     "            badpix_g[cap][qm] = np.zeros_like(offset_g[cap][qm])\n",
-    "        offset_g[cap][qm][...,gg] = offset\n",
-    "        noise_g[cap][qm][...,gg] = noise        \n",
-    "        badpix_g[cap][qm][...,gg] = bp        \n",
-    "    gg +=1\n",
-    "    \n",
+    "            data_g[cap][qm] = np.zeros((data.shape[0], 3))\n",
+    "            ntest_g[cap][qm] = np.zeros_like(offset_g[cap][qm])\n",
+    "\n",
+    "        offset_g[cap][qm][..., gg] = offset\n",
+    "        noise_g[cap][qm][..., gg] = noise\n",
+    "        badpix_g[cap][qm][..., gg] = bp\n",
+    "        data_g[cap][qm][..., gg] = data\n",
+    "        ntest_g[cap][qm][..., gg] = normal\n",
+    "\n",
+    "        hn, cn = np.histogram(data, bins=20)\n",
+    "        print(\"{} gain. Module: {}. Number of processed trains per cell: {}.\\n\".format(\n",
+    "            gain_names[gg], qm, data.shape[0]))\n",
+    "    gg += 1\n",
+    "    plt.show()\n",
+    "\n",
     "duration = (datetime.now()-start).total_seconds()\n",
     "logger.runtime_summary_entry(success=True, runtime=duration,\n",
     "                             total_sequences=total_sequences,\n",
@@ -274,7 +361,7 @@
    "cell_type": "code",
    "execution_count": null,
    "metadata": {
-    "collapsed": true
+    "collapsed": false
    },
    "outputs": [],
    "source": [
@@ -282,98 +369,62 @@
     "if local_output:\n",
     "    for cap in capacitor_settings:\n",
     "        runs = [v for k, v in gain_runs.items() if cap in k]\n",
-    "        ofile = \"{}/lpd_offset_store_{}_{}_{}.h5\".format(out_folder, \"_\".join(runs), cap, \"_\".join([str(m) for m in modules]))\n",
+    "        ofile = \"{}/lpd_offset_store_{}_{}_{}.h5\".format(out_folder, \n",
+    "                                                         \"_\".join(runs), \n",
+    "                                                         cap, \n",
+    "                                                         \"_\".join([str(m) for m in modules]))\n",
     "        store_file = h5py.File(ofile, \"w\")\n",
     "        for qm in offset_g[cap].keys():\n",
     "            store_file[\"{}/Offset/0/data\".format(qm)] = offset_g[cap][qm]\n",
     "            store_file[\"{}/Noise/0/data\".format(qm)] = noise_g[cap][qm]\n",
-    "            store_file[\"{}/BadPixelsDark/0/data\".format(qm)] = badpix_g[cap][qm]\n",
-    "        store_file.close()\n"
+    "            store_file[\"{}/BadPixelsDark/0/data\".format(qm)\n",
+    "                       ] = badpix_g[cap][qm]\n",
+    "        store_file.close()\n",
+    "        print('Constants are stored to {}'.format(ofile))"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
    "metadata": {
-    "collapsed": true
+    "collapsed": false
    },
    "outputs": [],
    "source": [
-    "if db_output:\n",
-    "    for cap in capacitor_settings:\n",
-    "        for qm in offset_g[cap].keys():\n",
-    "            metadata = ConstantMetaData()\n",
-    "            offset = Constants.LPD.Offset()\n",
-    "            offset.data = offset_g[cap][qm]\n",
-    "            metadata.calibration_constant = offset\n",
-    "\n",
-    "            # set the operating condition\n",
-    "            condition = Conditions.Dark.LPD(memory_cells=max_cells, bias_voltage=bias_voltage,\n",
-    "                                            capacitor=cap)\n",
-    "            device = getattr(Detectors.LPD1M1, qm)\n",
-    "            \n",
-    "            if device:\n",
-    "\n",
-    "                metadata.detector_condition = condition\n",
-    "\n",
-    "                # specify the a version for this constant\n",
-    "                if creation_time is None:\n",
-    "                    metadata.calibration_constant_version = Versions.Now(device=device)\n",
-    "                else:\n",
-    "                    metadata.calibration_constant_version = Versions.Timespan(device=device,\n",
-    "                                                                              start=creation_time)\n",
-    "                metadata.send(cal_db_interface, timeout=3000000)\n",
-    "                \n",
-    "            metadata = ConstantMetaData()\n",
-    "            noise = Constants.LPD.Noise()\n",
-    "            noise.data = noise_g[cap][qm]\n",
-    "            metadata.calibration_constant = noise\n",
+    "# Retrieve existing constants for comparison\n",
+    "clist = [\"Offset\", \"Noise\", \"BadPixelsDark\"]\n",
+    "old_const = {}\n",
+    "old_mdata = {}\n",
+    "creation_time = dateutil.parser.parse(\"2019-02-14\")\n",
+    "print('Retrieve pre-existing constants for comparison.')\n",
+    "for cap in capacitor_settings:\n",
+    "    for qm in offset_g[cap].keys():\n",
+    "        for const in clist:\n",
     "\n",
-    "            # set the operating condition\n",
     "            condition = Conditions.Dark.LPD(memory_cells=max_cells, bias_voltage=bias_voltage,\n",
     "                                            capacitor=cap)\n",
-    "            device = getattr(Detectors.LPD1M1, qm)\n",
-    "            \n",
-    "            if device:\n",
     "\n",
-    "                metadata.detector_condition = condition\n",
-    "\n",
-    "                # specify the a version for this constant\n",
-    "                if creation_time is None:\n",
-    "                    metadata.calibration_constant_version = Versions.Now(device=device)\n",
-    "                else:\n",
-    "                    metadata.calibration_constant_version = Versions.Timespan(device=device,\n",
-    "                                                                              start=creation_time)\n",
-    "                metadata.send(cal_db_interface, timeout=3000000)\n",
-    "        \n",
-    "            metadata = ConstantMetaData()\n",
-    "            badpixels = Constants.LPD.BadPixelsDark()\n",
-    "            badpixels.data = badpix_g[cap][qm]\n",
-    "            metadata.calibration_constant = badpixels\n",
-    "\n",
-    "            # set the operating condition\n",
-    "            condition = Conditions.Dark.LPD(memory_cells=max_cells, bias_voltage=bias_voltage,\n",
-    "                                            capacitor=cap)\n",
-    "            device = getattr(Detectors.LPD1M1, qm)\n",
-    "            \n",
-    "            if device:\n",
+    "            data, mdata = get_from_db(getattr(Detectors.LPD1M1, qm),\n",
+    "                                      getattr(Constants.LPD, const)(),\n",
+    "                                      condition,\n",
+    "                                      None,\n",
+    "                                      cal_db_interface, creation_time=creation_time,\n",
+    "                                      verbosity=2, timeout=30000)\n",
     "\n",
-    "                metadata.detector_condition = condition\n",
+    "            old_const[const] = data\n",
     "\n",
-    "                # specify the a version for this constant\n",
-    "                if creation_time is None:\n",
-    "                    metadata.calibration_constant_version = Versions.Now(device=device)\n",
-    "                else:\n",
-    "                    metadata.calibration_constant_version = Versions.Timespan(device=device,\n",
-    "                                                                              start=creation_time)\n",
-    "                metadata.send(cal_db_interface, timeout=3000000)"
+    "            if mdata is not None and data is not None:\n",
+    "                time = mdata.calibration_constant_version.begin_at\n",
+    "                old_mdata[const] = time.isoformat()\n",
+    "            else:\n",
+    "                old_mdata[const] = \"Not found\""
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
    "metadata": {
-    "collapsed": true
+    "collapsed": false
    },
    "outputs": [],
    "source": [
@@ -381,72 +432,291 @@
     "for cap in capacitor_settings:\n",
     "    res[cap] = OrderedDict()\n",
     "    for i in modules:\n",
-    "        qm = \"Q{}M{}\".format(i//4+1, i%4+1)\n",
+    "        qm = \"Q{}M{}\".format(i//4+1, i % 4+1)\n",
     "\n",
     "        res[cap][qm] = {'Offset': offset_g[cap][qm],\n",
-    "                   'Noise': noise_g[cap][qm],\n",
-    "                   'BadPixels': badpix_g[cap][qm]    \n",
-    "                   }\n"
+    "                        'Noise': noise_g[cap][qm],\n",
+    "                        'BadPixelsDark': badpix_g[cap][qm]\n",
+    "                        }"
    ]
   },
   {
-   "cell_type": "markdown",
-   "metadata": {},
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
    "source": [
-    "## Single-Cell Overviews ##\n",
+    "# Save constants in the calibration DB\n",
+    "if db_output:\n",
+    "    for cap in capacitor_settings:\n",
+    "        for qm in res[cap]:\n",
+    "            for const in res[cap][qm]:\n",
+    "\n",
+    "                metadata = ConstantMetaData()\n",
+    "                dconst = getattr(Constants.LPD, const)()\n",
+    "                dconst.data = res[cap][qm][const]\n",
+    "                metadata.calibration_constant = dconst\n",
+    "\n",
+    "                # set the operating condition\n",
+    "                condition = Conditions.Dark.LPD(memory_cells=max_cells,\n",
+    "                                                bias_voltage=bias_voltage,\n",
+    "                                                capacitor=cap)\n",
+    "                device = getattr(Detectors.LPD1M1, qm)\n",
+    "                if device:\n",
     "\n",
-    "Single cell overviews allow to identify potential effects on all memory cells, e.g. on sensor level. Additionally, they should serve as a first sanity check on expected behaviour, e.g. if structuring on the ASIC level is visible in the offsets, but otherwise no immediate artifacts are visible."
+    "                    metadata.detector_condition = condition\n",
+    "\n",
+    "                    # specify the a version for this constant\n",
+    "                    if creation_time is None:\n",
+    "                        metadata.calibration_constant_version = Versions.Now(device=device)\n",
+    "                    else:\n",
+    "                        metadata.calibration_constant_version = Versions.Timespan(device=device,\n",
+    "                                                                                  start=creation_time)\n",
+    "                    metadata.send(cal_db_interface)\n",
+    "                    msg = 'Const {} for module {} was injected to the calibration DB. Begin at: {}'\n",
+    "                    print(msg.format(const, qm,\n",
+    "                                     metadata.calibration_constant_version.begin_at))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "qm = \"Q{}M{}\".format(modules[0]//4+1, modules[0] % 4+1)\n",
+    "display(Markdown('## Position of the module {}, it tiles and ASICs of tile ##'.format(qm)))\n",
+    "\n",
+    "fig, ax = plt.subplots(1, figsize=(10, 10))\n",
+    "ax.set_axis_off()\n",
+    "\n",
+    "ax.set_xlim(0, 97)\n",
+    "ax.set_ylim(0, 97)\n",
+    "\n",
+    "q_poses = np.array([[51, 47], [47, 1], [1, 5], [5, 51]])\n",
+    "m_poses = np.array([[22.5, 20.5], [22.5, 0.5], [0.5, 0.5], [0.5, 20.5]])\n",
+    "\n",
+    "for iq, q_pos in enumerate(q_poses):\n",
+    "    ax.add_patch(patches.Rectangle(q_pos, 45, 45, linewidth=2, edgecolor='r',\n",
+    "                                   facecolor='y', fill=True))\n",
+    "\n",
+    "    ax.text(q_pos[0]+20, q_pos[1]+41.5, 'Q{}'.format(iq+1), fontsize=22)\n",
+    "    for im, m_pos in enumerate(m_poses):\n",
+    "        ax.add_patch(patches.Rectangle(q_pos+m_pos, 22, 20, linewidth=3, edgecolor='r',\n",
+    "                                       facecolor='g', fill=True))\n",
+    "\n",
+    "        if iq*4+im == modules[0]:\n",
+    "            for a_posx in range(2):\n",
+    "                for a_posy in range(8):\n",
+    "                    a_pos = np.array([a_posx*11., a_posy*20/8.])\n",
+    "                    pos = q_pos+m_pos+a_pos\n",
+    "\n",
+    "                    ax.add_patch(patches.Rectangle(q_pos+m_pos+a_pos, 11, 20/8., \n",
+    "                                                   linewidth=1, edgecolor='black',\n",
+    "                                                   facecolor='r', fill=True))\n",
+    "\n",
+    "                    if a_posx == 0:\n",
+    "                        label = str(a_posy+9)\n",
+    "                    else:\n",
+    "                        label = str(-a_posy+(a_posx*8))\n",
+    "\n",
+    "                    ax.text(pos[0]+4, pos[1]+0.3, label, fontsize=14)\n",
+    "        else:\n",
+    "            pos = q_pos+m_pos+np.array([5, 8])\n",
+    "            ax.text(pos[0], pos[1], 'Q{}M{}'.format(\n",
+    "                iq+1, im+1),  fontsize=22, color='y')\n",
+    "\n",
+    "ax.add_patch(patches.Rectangle([65, 93], 30, 4, linewidth=1, edgecolor='black',\n",
+    "                               facecolor='r', fill=True))\n",
+    "\n",
+    "ax.text(52, 94, 'ASICs:',  fontsize=22, color='black')\n",
+    "\n",
+    "for i_pos in range(8):\n",
+    "    pos = np.array([65, 93]) + np.array([i_pos*30/8.+0.3, 0.3])\n",
+    "\n",
+    "    ax.add_patch(patches.Rectangle(pos, 24/8., 3.4, linewidth=1, edgecolor='black',\n",
+    "                                   facecolor='deepskyblue', fill=True))\n",
+    "\n",
+    "    ax.text(pos[0]+0.5, pos[1]+0.5, '{}'.format(i_pos + 1),\n",
+    "            fontsize=18, color='black')"
    ]
   },
   {
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "### High Gain ###"
+    "## Raw pedestal distribution ##\n",
+    "\n",
+    "Distribution of a pedestal (ADUs) over trains for the pixel (12,12), memory cell 12. A median of the distribution is shown in yellow. A standard deviation is shown in red. The green line shows average over all pixels for a given memory cell and gain stage."
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
    "metadata": {
-    "collapsed": false,
-    "scrolled": true
+    "collapsed": false
    },
    "outputs": [],
    "source": [
+    "fig, grid = plt.subplots(3, 1, sharex=\"col\", sharey=\"row\", figsize=(10, 7))\n",
+    "fig.subplots_adjust(wspace=0, hspace=0)\n",
+    "\n",
     "for cap in capacitor_settings:\n",
-    "    cell = 12\n",
-    "    gain = 0\n",
-    "    show_overview(res[cap], cell, gain, out_folder=out_folder, infix=\"_\".join(gain_runs.values()))"
+    "    for i in modules:\n",
+    "        qm = \"Q{}M{}\".format(i//4+1, i % 4+1)\n",
+    "        if data_g[cap][qm].shape[0] == 0:\n",
+    "            break\n",
+    "        for gain in range(3):\n",
+    "            data = data_g[cap][qm][:, gain]\n",
+    "            offset = np.nanmedian(data)\n",
+    "            noise = np.nanstd(data)\n",
+    "            xrange = [np.nanmin(data_g[cap][qm]), np.nanmax(data_g[cap][qm])]\n",
+    "            nbins = int(xrange[1] - xrange[0])\n",
+    "\n",
+    "            hn, cn = np.histogram(data, bins=nbins, range=xrange)\n",
+    "\n",
+    "            grid[gain].hist(data, range=xrange, bins=nbins)\n",
+    "            grid[gain].plot([offset-noise, offset-noise], [0, np.nanmax(hn)], \n",
+    "                            linewidth=1.5, color='red',\n",
+    "                            label='1 $\\sigma$ deviation')\n",
+    "            grid[gain].plot([offset+noise, offset+noise],\n",
+    "                            [0, np.nanmax(hn)], linewidth=1.5, color='red')\n",
+    "            grid[gain].plot([offset, offset], [0, 0],\n",
+    "                            linewidth=1.5, color='y', label='median')\n",
+    "\n",
+    "            grid[gain].plot([np.nanmedian(offset_g[cap][qm][:, :, 12, gain]), \n",
+    "                             np.nanmedian(offset_g[cap][qm][:, :, 12, gain])],\n",
+    "                            [0, np.nanmax(hn)], linewidth=1.5, color='green', \n",
+    "                            label='average over pixels')\n",
+    "\n",
+    "            grid[gain].set_xlim(xrange)\n",
+    "            grid[gain].set_ylim(0, np.nanmax(hn)*1.1)\n",
+    "            grid[gain].set_xlabel(\"Offset value [ADU]\")\n",
+    "            grid[gain].set_ylabel(\"# of occurance\")\n",
+    "\n",
+    "            if gain == 0:\n",
+    "                leg = grid[gain].legend(\n",
+    "                    loc='outside-top', ncol=3, \n",
+    "                    bbox_to_anchor=(0.1, 0.25, 0.7, 1.0))\n",
+    "\n",
+    "            grid[gain].text(820, np.nanmax(hn)*0.4,\n",
+    "                            \"{} gain\".format(gain_names[gain]), fontsize=20)\n",
+    "\n",
+    "            a = plt.axes([.125, .1, 0.775, .8], frame_on=False)\n",
+    "            a.patch.set_alpha(0.05)\n",
+    "            a.set_xlim(xrange)\n",
+    "            plt.plot([offset, offset], [0, 1], linewidth=1.5, color='y')\n",
+    "            plt.xticks([])\n",
+    "            plt.yticks([])\n",
+    "\n",
+    "        ypos = 0.9\n",
+    "        x1pos = (np.nanmedian(data_g[cap][qm][:, 0]) +\n",
+    "                 np.nanmedian(data_g[cap][qm][:, 2]))/2.\n",
+    "        x2pos = (np.nanmedian(data_g[cap][qm][:, 2]) +\n",
+    "                 np.nanmedian(data_g[cap][qm][:, 1]))/2.-10\n",
+    "\n",
+    "        plt.annotate(\"\", xy=(np.nanmedian(data_g[cap][qm][:, 0]), ypos), xycoords='data',\n",
+    "                     xytext=(np.nanmedian(data_g[cap][qm][:, 2]), ypos), textcoords='data',\n",
+    "                     arrowprops=dict(arrowstyle=\"<->\", connectionstyle=\"arc3\"))\n",
+    "\n",
+    "        plt.annotate('{}'.format(np.nanmedian(data_g[cap][qm][:, 0])-np.nanmedian(data_g[cap][qm][:, 2])),\n",
+    "                     xy=(x1pos, ypos), xycoords='data', xytext=(5, 5), textcoords='offset points')\n",
+    "\n",
+    "        plt.annotate(\"\", xy=(np.nanmedian(data_g[cap][qm][:, 2]), ypos), xycoords='data',\n",
+    "                     xytext=(np.nanmedian(data_g[cap][qm][:, 1]), ypos), textcoords='data',\n",
+    "                     arrowprops=dict(arrowstyle=\"<->\", connectionstyle=\"arc3\"))\n",
+    "\n",
+    "        plt.annotate('{}'.format(np.nanmedian(data_g[cap][qm][:, 2])-np.nanmedian(data_g[cap][qm][:, 1])),\n",
+    "                     xy=(x2pos, ypos), xycoords='data', xytext=(5, 5), textcoords='offset points')\n",
+    "\n",
+    "plt.show()"
    ]
   },
   {
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "### Medium Gain ###"
+    "## Normality test ##\n",
+    "\n",
+    "Distributions of raw pedestal values have been tested if they are normally distributed. A normality test have been performed for each pixel and each memory cell. Plots below show histogram of p-Values and a 2D distribution for the  memory cell 12."
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
    "metadata": {
-    "collapsed": false,
-    "scrolled": true
+    "collapsed": false
    },
    "outputs": [],
    "source": [
+    "# Loop over capacitor settings, modules, constants\n",
     "for cap in capacitor_settings:\n",
-    "    cell = 12\n",
-    "    gain = 1\n",
-    "    show_overview(res[cap], cell, gain, out_folder=out_folder, infix=\"_\".join(gain_runs.values()))"
+    "    for i in modules:\n",
+    "        qm = \"Q{}M{}\".format(i//4+1, i%4+1)\n",
+    "\n",
+    "        data = np.copy(ntest_g[cap][qm][:,:,:,:])\n",
+    "        data[badpix_g[cap][qm][:,:,:,:]>0] = 1.01\n",
+    "            \n",
+    "        hn,cn = np.histogram(data[:,:,:,0], bins=100)\n",
+    "       \n",
+    "        d = [{'x': np.arange(100)*0.01+0.01,\n",
+    "              'y': np.histogram(data[:,:,:,0], bins=100)[0],\n",
+    "              'drawstyle': 'steps-pre',\n",
+    "              'label' : 'High gain',\n",
+    "              },\n",
+    "             {'x': np.arange(100)*0.01+0.01,\n",
+    "              'y': np.histogram(data[:,:,:,1], bins=100)[0],\n",
+    "              'drawstyle': 'steps-pre',\n",
+    "              'label' : 'Medium gain',\n",
+    "              },\n",
+    "             {'x': np.arange(100)*0.01+0.01,\n",
+    "              'y': np.histogram(data[:,:,:,2], bins=100)[0],\n",
+    "              'drawstyle': 'steps-pre',\n",
+    "              'label' : 'Low gain',\n",
+    "              },\n",
+    "            ]\n",
+    "            \n",
+    "\n",
+    "        fig = plt.figure(figsize=(15,15), tight_layout={'pad': 0.5, 'w_pad': 0.3})\n",
+    "\n",
+    "        for gain in range(3):\n",
+    "            ax = fig.add_subplot(221+gain)\n",
+    "            heatmapPlot(data[:,:,12,gain], add_panels=False, cmap='viridis', figsize=(10,10),\n",
+    "                y_label='Rows', x_label='Columns',\n",
+    "                lut_label='p-Value',\n",
+    "                use_axis=ax,\n",
+    "                title='p-Value for cell 12, {} gain'.format(gain_names[gain]) )\n",
+    "            \n",
+    "        ax = fig.add_subplot(224)\n",
+    "        _ = simplePlot(d, #aspect=1.6, \n",
+    "                              x_label = \"p-Value\".format(gain), \n",
+    "                              y_label=\"# of occurance\",\n",
+    "                              use_axis=ax,\n",
+    "                               y_log=False, legend='outside-top-ncol3-frame', legend_pad=0.05, legend_size='5%')\n",
+    "        ax.ticklabel_format(style='sci', axis='y', scilimits=(4,6))\n",
+    "        \n"
+   ]
+  },
+  {
+   "cell_type": "raw",
+   "metadata": {},
+   "source": [
+    ".. raw:: latex\n",
+    "\n",
+    "    \\newpage"
    ]
   },
   {
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "### Low Gain ###"
+    "## Single-Cell Overviews ##\n",
+    "\n",
+    "Single cell overviews allow to identify potential effects on all memory cells, e.g. on a sensor level. Additionally, they should serve as a first sanity check on expected behaviour, e.g. if structuring on the ASIC level is visible in the offsets, but otherwise no immediate artifacts are visible."
    ]
   },
   {
@@ -458,26 +728,101 @@
    },
    "outputs": [],
    "source": [
+    "cell = 12\n",
     "for cap in capacitor_settings:\n",
-    "    cell = 12\n",
-    "    gain = 2\n",
-    "    show_overview(res[cap], cell, gain, out_folder=out_folder, infix=\"_\".join(gain_runs.values()))"
+    "    for gain in range(3):\n",
+    "        display(\n",
+    "            Markdown('### Cell-12 overview - {} gain'.format(gain_names[gain])))\n",
+    "\n",
+    "        fig = plt.figure(figsize=(15, 12) , tight_layout={'pad': 0.1, 'w_pad': 0.1})\n",
+    "        for qm in res[cap]:\n",
+    "            for iconst, const in enumerate(['Offset', 'Noise', 'BadPixelsDark']):\n",
+    "\n",
+    "                ax = fig.add_subplot(221+iconst)\n",
+    "\n",
+    "                data = res[cap][qm][const][:, :, 12, gain]\n",
+    "                vmax = 1.5 * np.nanmedian(res[cap][qm][const][:, :, 12, gain])\n",
+    "                title = const\n",
+    "                label = '{} value [ADU]'.format(const)\n",
+    "                title = '{} value'.format(const)\n",
+    "                if const == 'BadPixelsDark':\n",
+    "                    vmax = 4\n",
+    "                    data[data == 0] = np.nan\n",
+    "                    title = 'Bad pixel code'\n",
+    "                    label = title\n",
+    "\n",
+    "                    cb_labels = ['1 {}'.format(BadPixels.NOISE_OUT_OF_THRESHOLD.name),\n",
+    "                                 '2 {}'.format(BadPixels.OFFSET_NOISE_EVAL_ERROR.name),\n",
+    "                                 '3 {}'.format(BadPixels.OFFSET_OUT_OF_THRESHOLD.name),\n",
+    "                                 '4 {}'.format('MIXED')]\n",
+    "\n",
+    "                    heatmapPlot(data, add_panels=False, cmap='viridis',\n",
+    "                                y_label='Rows', x_label='Columns',\n",
+    "                                lut_label='', vmax=vmax,\n",
+    "                                use_axis=ax, cb_ticklabels=cb_labels, cb_ticks = np.arange(4)+1,\n",
+    "                                title='{}'.format(title))\n",
+    "\n",
+    "                else:\n",
+    "\n",
+    "                    heatmapPlot(data, add_panels=False, cmap='viridis',\n",
+    "                                y_label='Rows', x_label='Columns',\n",
+    "                                lut_label=label, vmax=vmax,\n",
+    "                                use_axis=ax,\n",
+    "                                title='{}'.format(title))\n",
+    "\n",
+    "        #show_overview(res[cap], cell, gain, out_folder=out_folder, infix=\"_\".join(gain_runs.values()))\n",
+    "\n",
+    "        fig = plt.figure(figsize=(10, 5))\n",
+    "        for qm in res[cap]:\n",
+    "            for iconst, const in enumerate(['Offset', 'Noise']):\n",
+    "                data = res[cap][qm][const]\n",
+    "                dataBP = np.copy(data)\n",
+    "                dataBP[res[cap][qm]['BadPixelsDark'] > 0] = -1\n",
+    "\n",
+    "                x_ranges = [[0, 1500], [0, 40]]\n",
+    "                hn, cn = np.histogram(\n",
+    "                    data[:, :, :, gain], bins=100, range=x_ranges[iconst])\n",
+    "                hnBP, cnBP = np.histogram(dataBP[:, :, :, gain], bins=cn)\n",
+    "\n",
+    "                d = [{'x': cn[:-1],\n",
+    "                      'y': hn,\n",
+    "                      'drawstyle': 'steps-pre',\n",
+    "                      'label': 'All data',\n",
+    "                      },\n",
+    "                     {'x': cnBP[:-1],\n",
+    "                      'y': hnBP,\n",
+    "                      'drawstyle': 'steps-pre',\n",
+    "                      'label': 'Bad pixels masked',\n",
+    "                      },\n",
+    "                     ]\n",
+    "\n",
+    "                ax = fig.add_subplot(121+iconst)\n",
+    "                _ = simplePlot(d, figsize=(5, 7), aspect=1,\n",
+    "                                    x_label=\"{} value [ADU]\".format(const),\n",
+    "                                    y_label=\"# of occurance\",\n",
+    "                                    title='', legend_pad=0.1, legend_size='10%',\n",
+    "                                    use_axis=ax,\n",
+    "                                    y_log=True, legend='outside-top-2col-frame')\n",
+    "\n",
+    "        plt.show()"
    ]
   },
   {
-   "cell_type": "markdown",
+   "cell_type": "raw",
    "metadata": {},
    "source": [
-    "## Global Bad Pixel Behaviour ##\n",
+    ".. raw:: latex\n",
     "\n",
-    "The following plots show the results of bad pixel evaluation for all evaluated memory cells. Cells are stacked in the Z-dimension, while pixels values in x/y are rebinned with a factor of 2. This excludes single bad pixels present only in disconnected pixels. Hence, any bad pixels spanning at least 4 pixels in the x/y-plane, or across at least two memory cells are indicated. Colors encode the bad pixel type, or mixed type."
+    "    \\newpage"
    ]
   },
   {
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "### High Gain ###"
+    "## Global Bad Pixel Behaviour ##\n",
+    "\n",
+    "The following plots shows the results of a bad pixel evaluation for all evaluated memory cells. Cells are stacked in the Z-dimension, while pixels values in x/y are re-binned with a factor of 2. This excludes single bad pixels present only in disconnected pixels. Hence, any bad pixels spanning at least 4 pixels in the x/y-plane, or across at least two memory cells are indicated. Colors encode the bad pixel type, or mixed type."
    ]
   },
   {
@@ -495,17 +840,33 @@
     "\n",
     "rebin = 8 if not high_res_badpix_3d else 2\n",
     "\n",
-    "gain = 0\n",
-    "for cap in capacitor_settings:\n",
-    "    for mod, data in badpix_g[cap].items():\n",
-    "        plot_badpix_3d(data[...,gain], cols, title=mod, rebin_fac=rebin)"
+    "for gain in range(3):\n",
+    "    display(Markdown('### Bad pixel behaviour - {} gain ###'.format(gain_names[gain])))\n",
+    "    for cap in capacitor_settings:\n",
+    "        for mod, data in badpix_g[cap].items():\n",
+    "            plot_badpix_3d(data[...,gain], cols, title='', rebin_fac=rebin)\n",
+    "            ax = plt.gca()\n",
+    "            leg = ax.get_legend()\n",
+    "            leg.set(alpha=0.5)\n",
+    "    plt.show()"
+   ]
+  },
+  {
+   "cell_type": "raw",
+   "metadata": {},
+   "source": [
+    ".. raw:: latex\n",
+    "\n",
+    "    \\newpage"
    ]
   },
   {
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "### Medium Gain ###"
+    "## Summary across tiles ##\n",
+    "\n",
+    "Plots give an overview of calibration constants averaged across tiles. A bad pixel mask is applied. Constants are compared with pre-existing constants retrieved from the calibration database. Differences $\\Delta$ between the old and new constants is shown."
    ]
   },
   {
@@ -516,17 +877,107 @@
    },
    "outputs": [],
    "source": [
-    "gain = 1\n",
-    "for cap in capacitor_settings:\n",
-    "    for mod, data in badpix_g[cap].items():\n",
-    "        plot_badpix_3d(data[...,gain], cols, title=mod, rebin_fac=rebin)"
+    "display(Markdown('The following pre-existing constants are used for comparison: \\n'))\n",
+    "for key in old_mdata:\n",
+    "    display(Markdown('**{}** at {}'.format(key, old_mdata[key])))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "# Loop over capacitor settings, modules, constants\n",
+    "for cap in res:\n",
+    "    for qm in res[cap]:\n",
+    "        for gain in range(3):\n",
+    "            display(Markdown('### Summary across tiles - {} gain'.format(gain_names[gain])))\n",
+    "\n",
+    "            for const in res[cap][qm]:\n",
+    "                data = np.copy(res[cap][qm][const][:, :, :, gain])\n",
+    "\n",
+    "                label = 'Fraction of bad pixels'\n",
+    "\n",
+    "                if const != 'BadPixelsDark':\n",
+    "                    data[badpix_g[cap][qm][:, :, :, gain] > 0] = np.nan\n",
+    "                    label = '{} value [ADU]'.format(const)\n",
+    "                else:\n",
+    "                    data[data>0] = 1.0\n",
+    "                    \n",
+    "                data = data.reshape(\n",
+    "                    int(data.shape[0] / 32),\n",
+    "                    32,\n",
+    "                    int(data.shape[1] / 128),\n",
+    "                    128,\n",
+    "                    data.shape[2])\n",
+    "                data = np.nanmean(data, axis=(1, 3)).swapaxes(\n",
+    "                    0, 2).reshape(512, 16)\n",
+    "\n",
+    "                fig = plt.figure(figsize=(15, 6))\n",
+    "                ax = fig.add_subplot(121)\n",
+    "\n",
+    "                _ = heatmapPlot(data[:510, :], add_panels=True,\n",
+    "                                y_label='Momery Cell ID', x_label='Tile ID',\n",
+    "                                lut_label=label, use_axis=ax,\n",
+    "                                panel_y_label=label, panel_x_label=label,\n",
+    "                                cmap='viridis',  # cb_loc='right',cb_aspect=15,\n",
+    "                                x_ticklabels=np.arange(16)+1,\n",
+    "                                x_ticks=np.arange(16)+0.5)\n",
+    "\n",
+    "                if old_const[const] is not None:\n",
+    "                    ax = fig.add_subplot(122)\n",
+    "\n",
+    "                    dataold = np.copy(old_const[const][:, :, :, gain])\n",
+    "                    \n",
+    "                    label = '$\\Delta$ {}'.format(label)\n",
+    "\n",
+    "                    if const != 'BadPixelsDark':\n",
+    "                        if old_const['BadPixelsDark'] is not None:\n",
+    "                            dataold[old_const['BadPixelsDark'][:, :, :, gain] > 0] = np.nan\n",
+    "                        else:\n",
+    "                            dataold[:] = np.nan\n",
+    "                    else:\n",
+    "                        dataold[dataold>0]=1.0\n",
+    "\n",
+    "                    dataold = dataold.reshape(\n",
+    "                        int(dataold.shape[0] / 32),\n",
+    "                        32,\n",
+    "                        int(dataold.shape[1] / 128),\n",
+    "                        128,\n",
+    "                        dataold.shape[2])\n",
+    "                    dataold = np.nanmean(dataold, axis=(\n",
+    "                        1, 3)).swapaxes(0, 2).reshape(512, 16)\n",
+    "                    dataold = dataold - data\n",
+    "\n",
+    "                    _ = heatmapPlot(dataold[:510, :], add_panels=True,\n",
+    "                                    y_label='Momery Cell ID', x_label='Tile ID',\n",
+    "                                    lut_label=label, use_axis=ax,\n",
+    "                                    panel_y_label=label, panel_x_label=label,\n",
+    "                                    cmap='viridis',  # cb_loc='right',cb_aspect=15,\n",
+    "                                    x_ticklabels=np.arange(16)+1,\n",
+    "                                    x_ticks=np.arange(16)+0.5)\n",
+    "            plt.show()\n"
+   ]
+  },
+  {
+   "cell_type": "raw",
+   "metadata": {},
+   "source": [
+    ".. raw:: latex\n",
+    "\n",
+    "    \\newpage"
    ]
   },
   {
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "### Low Gain ###"
+    "## Variation of offset and noise across Tiles and ASICs ##\n",
+    "\n",
+    "The following plots show a standard deviation $\\sigma$ of the calibration constant. The plot of standard deviations across tiles show pixels of one tile ($128 \\times 32$). Value for each pixel shows a standard deviation across 16 tiles. The standard deviation across ASICs are shown overall tiles. The plot shows pixels of one ASIC ($16 \\times 32$), where the value shows a standard deviation across all ACIS of the module."
    ]
   },
   {
@@ -537,19 +988,87 @@
    },
    "outputs": [],
    "source": [
-    "gain = 2\n",
-    "for cap in capacitor_settings:\n",
-    "    for mod, data in badpix_g[cap].items():\n",
-    "        plot_badpix_3d(data[...,gain], cols, title=mod, rebin_fac=rebin)"
+    "# Loop over capacitor settings, modules, constants\n",
+    "for cap in res:\n",
+    "    for qm in res[cap]:\n",
+    "        for gain in range(3):\n",
+    "            display(Markdown('### Variation of offset and noise across ASICs - {} gain'.format(gain_names[gain])))\n",
+    "\n",
+    "            fig = plt.figure(figsize=(15, 6))\n",
+    "            for iconst, const in enumerate(['Offset', 'Noise']):\n",
+    "                data = np.copy(res[cap][qm][const][:, :, :, gain])\n",
+    "                data[badpix_g[cap][qm][:, :, :, gain] > 0] = np.nan\n",
+    "                label = '$\\sigma$ {} [ADU]'.format(const)\n",
+    "\n",
+    "                dataA = np.nanmean(data, axis=2)  # average over cells\n",
+    "                dataA = dataA.reshape(8, 32, 16, 16)\n",
+    "                dataA = np.nanstd(dataA, axis=(0, 2))  # average across ASICs\n",
+    "\n",
+    "                ax = fig.add_subplot(121+iconst)\n",
+    "                _ = heatmapPlot(dataA, add_panels=True,\n",
+    "                                y_label='rows', x_label='columns',\n",
+    "                                lut_label=label, use_axis=ax,\n",
+    "                                panel_y_label=label, panel_x_label=label,\n",
+    "                                cmap='viridis'\n",
+    "                                )\n",
+    "\n",
+    "            plt.show()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "# Loop over capacitor settings, modules, constants\n",
+    "for cap in res:\n",
+    "    for qm in res[cap]:\n",
+    "        for gain in range(3):\n",
+    "            display(Markdown('### Variation of offset and noise across tiles - {} gain'.format(gain_names[gain])))\n",
+    "\n",
+    "            fig = plt.figure(figsize=(15, 6))\n",
+    "            for iconst, const in enumerate(['Offset', 'Noise']):\n",
+    "                data = np.copy(res[cap][qm][const][:, :, :, gain])\n",
+    "                data[badpix_g[cap][qm][:, :, :, gain] > 0] = np.nan\n",
+    "                label = '$\\sigma$ {} [ADU]'.format(const)\n",
+    "                    \n",
+    "                dataT = data.reshape(\n",
+    "                    int(data.shape[0] / 32),\n",
+    "                    32,\n",
+    "                    int(data.shape[1] / 128),\n",
+    "                    128,\n",
+    "                    data.shape[2])\n",
+    "                dataT = np.nanstd(dataT, axis=(0, 2))\n",
+    "                dataT = np.nanmean(dataT, axis=2)\n",
+    "                \n",
+    "                ax = fig.add_subplot(121+iconst)\n",
+    "                _ = heatmapPlot(dataT, add_panels=True,\n",
+    "                                y_label='rows', x_label='columns',\n",
+    "                                lut_label=label, use_axis=ax,\n",
+    "                                panel_y_label=label, panel_x_label=label,\n",
+    "                                cmap='viridis')\n",
+    "            plt.show()"
+   ]
+  },
+  {
+   "cell_type": "raw",
+   "metadata": {},
+   "source": [
+    ".. raw:: latex\n",
+    "\n",
+    "    \\newpage"
    ]
   },
   {
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "## Aggregate values, and per Cell behaviour ##\n",
+    "## Aggregate values and per cell behaviour ##\n",
     "\n",
-    "The following tables and plots give an overview of statistical aggregates for each constant, as well as per cell behavior."
+    "The following tables and plots give an overview of statistical aggregates for each constant, as well as per-cell behavior, averaged across pixels."
    ]
   },
   {
@@ -560,50 +1079,189 @@
    },
    "outputs": [],
    "source": [
-    "for cap in capacitor_settings:\n",
-    "    create_constant_overview(offset_g[cap], \"Offset (ADU)\", mem_cells, 500, 1500,\n",
-    "                             out_folder=out_folder, infix=\"_\".join(gain_runs.values()))"
+    "# Loop over capacitor settings, modules, constants\n",
+    "for cap in res:\n",
+    "    for qm in res[cap]:\n",
+    "        for gain in range(3):\n",
+    "            display(Markdown('### Mean over pixels - {} gain'.format(gain_names[gain])))\n",
+    "            \n",
+    "            fig = plt.figure(figsize=(9,11))\n",
+    "\n",
+    "            for iconst, const in enumerate(res[cap][qm]):\n",
+    "\n",
+    "                ax = fig.add_subplot(311+iconst)\n",
+    "                    \n",
+    "                data = res[cap][qm][const][:,:,:510,gain]\n",
+    "                if const == 'BadPixelsDark':\n",
+    "                    data[data>0] = 1.0\n",
+    "                    \n",
+    "                dataBP = np.copy(data)\n",
+    "                dataBP[badpix_g[cap][qm][:,:,:510,gain]>0] = -10\n",
+    "\n",
+    "                data = np.nanmean(data, axis=(0,1))\n",
+    "                dataBP = np.nanmean(dataBP, axis=(0,1))\n",
+    "                \n",
+    "                d = [{'y': data,\n",
+    "                      'x': np.arange(data.shape[0]),\n",
+    "                      'drawstyle': 'steps-mid',\n",
+    "                      'label' : 'All data'\n",
+    "                     }\n",
+    "                    ]\n",
+    "                \n",
+    "                if const != 'BadPixelsDark':\n",
+    "                    d.append({'y': dataBP,\n",
+    "                      'x': np.arange(data.shape[0]),\n",
+    "                      'drawstyle': 'steps-mid',\n",
+    "                      'label' : 'good pixels only'\n",
+    "                     })\n",
+    "                    y_title = \"{} value [ADU]\".format(const)\n",
+    "                    title = \"{} value, {} gain\".format(const, gain_names[gain])\n",
+    "                else:\n",
+    "                    y_title = \"Fraction of Bad Pixels\"\n",
+    "                    title = \"Fraction of Bad Pixels, {} gain\".format(gain_names[gain])\n",
+    "                \n",
+    "                data_min = np.min([data, dataBP])if const != 'BadPixelsDark' else np.min([data])\n",
+    "                data_max = np.max([data[20:], dataBP[20:]])\n",
+    "                data_dif = data_max - data_min\n",
+    "                \n",
+    "                local_max = np.max([data[200:300], dataBP[200:300]])\n",
+    "                frac = 0.35\n",
+    "                new_max = (local_max - data_min*(1-frac))/frac\n",
+    "                new_max = np.max([data_max, new_max])\n",
+    "               \n",
+    "                _ = simplePlot(d, figsize=(10,10), aspect=2, xrange=(-12, 510),\n",
+    "                                  x_label = 'Memory Cell ID', \n",
+    "                                  y_label=y_title, use_axis=ax,\n",
+    "                                  title=title,\n",
+    "                                  title_position=[0.5, 1.15],  \n",
+    "                                  inset='xy-coord-right', inset_x_range=(0,20), inset_indicated=True,\n",
+    "                                  inset_labeled=True, inset_coord=[0.2,0.5,0.6,0.95],\n",
+    "                                    inset_lw = 1.0, y_range = [data_min-data_dif*0.05, new_max+data_dif*0.05],\n",
+    "                                  y_log=False, legend='outside-top-ncol2-frame', legend_size='18%',\n",
+    "                                     legend_pad=0.00)\n",
+    "                \n",
+    "                plt.tight_layout(pad=1.08, h_pad=0.35)\n",
+    "                \n",
+    "            plt.show()"
    ]
   },
   {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false,
-    "scrolled": false
-   },
-   "outputs": [],
+   "cell_type": "raw",
+   "metadata": {},
    "source": [
-    "for cap in capacitor_settings:\n",
-    "    create_constant_overview(noise_g[cap], \"Noise (ADU)\", mem_cells, 0, 25,\n",
-    "                             out_folder=out_folder, infix=\"_\".join(gain_runs.values()))"
+    ".. raw:: latex\n",
+    "\n",
+    "    \\newpage"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Summary tables ##\n",
+    "\n",
+    "The following tables show summary information for the evaluated module. Values for currently evaluated constants are compared with values for pre-existing constants retrieved from the calibration database."
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
    "metadata": {
-    "collapsed": false,
-    "scrolled": false
+    "collapsed": false
    },
    "outputs": [],
    "source": [
-    "for cap in capacitor_settings:\n",
-    "    bad_pixel_aggregate_g = OrderedDict()\n",
-    "    for m, d in badpix_g[cap].items():\n",
-    "        bad_pixel_aggregate_g[m] = d.astype(np.bool).astype(np.float)\n",
-    "    create_constant_overview(bad_pixel_aggregate_g, \"Bad pixel fraction\", mem_cells, 0, 0.10, 3,\n",
-    "                             out_folder=out_folder, infix=\"_\".join(gain_runs.values()))"
+    "table = []\n",
+    "\n",
+    "for cap in res:\n",
+    "    for qm in res[cap]:\n",
+    "        for gain in range(3):\n",
+    "            \n",
+    "            l_data = []\n",
+    "            l_data_old = []\n",
+    "            \n",
+    "            data = np.copy(res[cap][qm]['BadPixelsDark'][:,:,:,gain])\n",
+    "            datau32 = data.astype(np.uint32)\n",
+    "            l_data.append(data)\n",
+    "            l_data.append(datau32 - np.bitwise_or(datau32,BadPixels.NOISE_OUT_OF_THRESHOLD.value))\n",
+    "            l_data.append(datau32 - np.bitwise_or(datau32,BadPixels.OFFSET_OUT_OF_THRESHOLD.value))\n",
+    "            l_data.append(datau32 - np.bitwise_or(datau32,BadPixels.OFFSET_NOISE_EVAL_ERROR.value))\n",
+    "            \n",
+    "            if old_const['BadPixelsDark'] is not None:\n",
+    "                dataold = np.copy(old_const['BadPixelsDark'][:, :, :, gain])\n",
+    "                datau32old = dataold.astype(np.uint32)\n",
+    "                l_data_old.append(dataold)\n",
+    "                l_data_old.append(datau32old - np.bitwise_or(datau32old,BadPixels.NOISE_OUT_OF_THRESHOLD.value))\n",
+    "                l_data_old.append(datau32old - np.bitwise_or(datau32old,BadPixels.OFFSET_OUT_OF_THRESHOLD.value))\n",
+    "                l_data_old.append(datau32old - np.bitwise_or(datau32old,BadPixels.OFFSET_NOISE_EVAL_ERROR.value))\n",
+    "\n",
+    "            l_data_name = ['All bad pixels', 'NOISE_OUT_OF_THRESHOLD', \n",
+    "                           'OFFSET_OUT_OF_THRESHOLD', 'OFFSET_NOISE_EVAL_ERROR']\n",
+    "            \n",
+    "            l_threshold = ['', '{}'.format(thresholds_noise_sigma), '{}'.format(thresholds_offset_sigma),\n",
+    "                          '{}/{}'.format(thresholds_offset_hard, thresholds_noise_hard)]\n",
+    "            \n",
+    "            for i in range(len(l_data)):\n",
+    "                line = ['{}, gain {}'.format(l_data_name[i], gain_names[gain]),\n",
+    "                          l_threshold[i],\n",
+    "                          len(l_data[i][l_data[i]>0].flatten())\n",
+    "                       ]\n",
+    "            \n",
+    "                if old_const['BadPixelsDark'] is not None:\n",
+    "                    line += [len(l_data_old[i][l_data_old[i]>0].flatten())]\n",
+    "                else:\n",
+    "                    line += ['-']\n",
+    "                    \n",
+    "            table.append(line)\n",
+    "\n",
+    "display(Markdown('### Number of bad pixels ###'.format(qm)))\n",
+    "md = display(Latex(tabulate.tabulate(table, tablefmt='latex', \n",
+    "                                     headers=[\"Pixel type\", \"Threshold\", \"New constant\", \"Old constant \"])))  "
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
    "metadata": {
-    "collapsed": true
+    "collapsed": false
    },
    "outputs": [],
-   "source": []
+   "source": [
+    "\n",
+    "header = ['Parameter', \n",
+    "          \"New constant\", \"Old constant \", \n",
+    "          \"New constant\", \"Old constant \", \n",
+    "          \"New constant\", \"Old constant \"]\n",
+    "\n",
+    "for const in ['Offset', 'Noise']:\n",
+    "    table = [['','High gain', 'High gain', 'Medium gain', 'Medium gain', 'Low gain', 'Low gain']]\n",
+    "    for cap in res:\n",
+    "        for qm in res[cap]:\n",
+    "\n",
+    "            data = np.copy(res[cap][qm][const])\n",
+    "            data[res[cap][qm]['BadPixelsDark']>0] = np.nan\n",
+    "            \n",
+    "            if old_const[const] is not None and old_const['BadPixelsDark'] is not None :\n",
+    "                dataold = np.copy(old_const[const])\n",
+    "                dataold[old_const['BadPixelsDark']>0] = np.nan\n",
+    "\n",
+    "            f_list = [np.nanmedian, np.nanmean, np.nanstd, np.nanmin, np.nanmax]\n",
+    "            n_list = ['Median', 'Mean', 'Std', 'Min', 'Max']\n",
+    "\n",
+    "            for i, f in enumerate(f_list):\n",
+    "                line = [n_list[i]]\n",
+    "                for gain in range(3):\n",
+    "                    line.append('{:6.1f}'.format(f(data[...,gain])))\n",
+    "                    if old_const[const] is not None and old_const['BadPixelsDark'] is not None:\n",
+    "                        line.append('{:6.1f}'.format(f(dataold[...,gain])))\n",
+    "                    else:\n",
+    "                        line.append('-')\n",
+    "\n",
+    "                table.append(line)\n",
+    "\n",
+    "    display(Markdown('### {} [ADU], good pixels only ###'.format(const)))\n",
+    "    md = display(Latex(tabulate.tabulate(table, tablefmt='latex', headers=header)))  "
+   ]
   }
  ],
  "metadata": {
@@ -622,7 +1280,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.6.6"
+   "version": "3.4.3"
   }
  },
  "nbformat": 4,
diff --git a/notebooks/LPD/LPDChar_Darks_Summary_NBC.ipynb b/notebooks/LPD/LPDChar_Darks_Summary_NBC.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..136a8e3241cf4774994205867e593c8ff90104a6
--- /dev/null
+++ b/notebooks/LPD/LPDChar_Darks_Summary_NBC.ipynb
@@ -0,0 +1,381 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Summary of LPD dark characterization #\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "cluster_profile = \"noDB\" # The ipcluster profile to use\n",
+    "out_folder = \"/gpfs/exfel/data/scratch/karnem/LPD/\" # path to output to, required"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "from collections import OrderedDict\n",
+    "import copy\n",
+    "from datetime import datetime\n",
+    "import os\n",
+    "import warnings\n",
+    "warnings.filterwarnings('ignore')\n",
+    "\n",
+    "import h5py\n",
+    "from IPython.display import display, Markdown, Latex\n",
+    "import numpy as np\n",
+    "import matplotlib\n",
+    "matplotlib.use(\"agg\")\n",
+    "import matplotlib.patches as patches\n",
+    "import matplotlib.pyplot as plt\n",
+    "%matplotlib inline\n",
+    "import tabulate\n",
+    "from XFELDetAna.plotting.heatmap import heatmapPlot\n",
+    "from XFELDetAna.plotting.simpleplot import simplePlot\n",
+    "\n",
+    "gain_names = ['High', 'Medium', 'Low']"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "# Load constants from local files\n",
+    "files = glob.glob('{}/*h5'.format(out_folder))\n",
+    "\n",
+    "data = OrderedDict()\n",
+    "mod_names = []\n",
+    "# Loop over files\n",
+    "for filename in files:\n",
+    "    with h5py.File(filename, 'r') as f:\n",
+    "        # Loop over modules\n",
+    "        for mKey in f.keys():\n",
+    "            if mKey not in data:\n",
+    "                mod_names.append(mKey)\n",
+    "                data[mKey] = OrderedDict()\n",
+    "            # Loop over constants\n",
+    "            for cKey in f.get(mKey):\n",
+    "                if cKey not in data[mKey]:\n",
+    "                    #print(\"/\".join((mKey, cKey, '0', 'data')))\n",
+    "                    data[mKey][cKey] = f.get(\n",
+    "                        \"/\".join((mKey, cKey, '0', 'data'))).value\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "mod_idx = np.argsort(mod_names)\n",
+    "\n",
+    "constants = {'Offset': np.zeros((len(mod_names), 256, 256, 512, 3)),\n",
+    "             'Noise': np.zeros((len(mod_names), 256, 256, 512, 3)),\n",
+    "             'BadPixelsDark': np.zeros((len(mod_names), 256, 256, 512, 3))}\n",
+    "\n",
+    "for i, idx in enumerate(mod_idx):\n",
+    "    for key, item in constants.items():\n",
+    "        item[i] = data[mod_names[idx]][key]\n",
+    "\n",
+    "mod_names = np.array(mod_names)[mod_idx]"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "display(Markdown('## Processed modules ##'))\n",
+    "\n",
+    "fig, ax = plt.subplots(1, figsize=(10, 10))\n",
+    "ax.set_axis_off()\n",
+    "\n",
+    "ax.set_xlim(0, 97)\n",
+    "ax.set_ylim(0, 97)\n",
+    "\n",
+    "q_poses = np.array([[51, 47], [47, 1], [1, 5], [5, 51]])\n",
+    "m_poses = np.array([[22.5, 20.5], [22.5, 0.5], [0.5, 0.5], [0.5, 20.5]])\n",
+    "\n",
+    "counter = 0\n",
+    "for iq, q_pos in enumerate(q_poses):\n",
+    "    ax.add_patch(patches.Rectangle(q_pos, 45, 45, linewidth=2, edgecolor='dodgerblue',\n",
+    "                                   facecolor='y', fill=True))\n",
+    "\n",
+    "    ax.text(q_pos[0]+20, q_pos[1]+41.5, 'Q{}'.format(iq+1), fontsize=22)\n",
+    "    for im, m_pos in enumerate(m_poses):\n",
+    "        color = 'gray'\n",
+    "        if 'Q{}M{}'.format(iq+1, im+1) in mod_names:\n",
+    "            color = 'green'\n",
+    "            if np.nanmean(constants['Noise'][counter, :, :, :, 0]) == 0:\n",
+    "                color = 'red'\n",
+    "            counter += 1\n",
+    "        ax.add_patch(patches.Rectangle(q_pos+m_pos, 22, 20, linewidth=3, edgecolor='dodgerblue',\n",
+    "                                       facecolor=color, fill=True))\n",
+    "\n",
+    "        pos = q_pos+m_pos+np.array([5, 8])\n",
+    "        ax.text(pos[0], pos[1], 'Q{}M{}'.format(\n",
+    "            iq+1, im+1),  fontsize=24, color='yellow')\n",
+    "\n",
+    "\n",
+    "_ = ax.legend(handles=[patches.Patch(facecolor='red', label='No data'),\n",
+    "                       patches.Patch(facecolor='gray', label='Not processed'),\n",
+    "                       patches.Patch(facecolor='green', label='Processed')],\n",
+    "              loc='outside-top', ncol=3, bbox_to_anchor=(0.1, 0.25, 0.7, 0.8))"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Summary figures across Modules ##\n",
+    "\n",
+    "Plots give an overview of calibration constants averaged across pixels. A bad pixel mask is applied. Constants are averaged across pixels."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "q_pad = 15\n",
+    "m_pad = 5\n",
+    "\n",
+    "m_size = 256\n",
+    "q_size = m_size*2+m_pad*2\n",
+    "\n",
+    "image = np.zeros((m_size*4+q_pad+m_pad*3, m_size*4+q_pad+m_pad*3))\n",
+    "\n",
+    "q_poses = [[q_size+q_pad, q_size],\n",
+    "           [q_size, 0],\n",
+    "           [0, q_pad],  [q_pad, q_size+q_pad]]\n",
+    "m_poses = [[m_size+m_pad, m_size+m_pad], [m_size+m_pad, 0], [0, 0],\n",
+    "           [0, m_size+m_pad]]\n",
+    "\n",
+    "# Loop over capacitor settings, modules, constants\n",
+    "for const_name, const in constants.items():\n",
+    "\n",
+    "    if const_name == 'BadPixelsDark':\n",
+    "        continue\n",
+    "\n",
+    "    display(Markdown('### {}'.format(const_name)))\n",
+    "    for gain in range(3):\n",
+    "\n",
+    "        image[:] = np.nan\n",
+    "        counter = 0\n",
+    "        for iq, q_pos in enumerate(q_poses):\n",
+    "            for im, m_pos in enumerate(m_poses):\n",
+    "                if 'Q{}M{}'.format(iq+1, im+1) in mod_names:\n",
+    "                    values = np.nanmean(const[counter, :, :, :, gain], axis=2)\n",
+    "                    values[values == 0] = np.nan\n",
+    "                    image[q_pos[1]+m_pos[1]: q_pos[1]+m_pos[1]+m_size,\n",
+    "                          q_pos[0]+m_pos[0]: q_pos[0]+m_pos[0] + m_size] = values\n",
+    "                    counter += 1\n",
+    "\n",
+    "        std = np.nanstd(image)\n",
+    "        mean = np.nanmedian(image)\n",
+    "        if const_name == 'Noise':\n",
+    "            std = mean/4.\n",
+    "        _ = heatmapPlot(image, add_panels=False, figsize=(20, 20),\n",
+    "                        vmin=mean-std*2, vmax=mean+std*2,\n",
+    "                        x_label='columns', y_label='rows',\n",
+    "                        cb_label='{}, mean over memory cells [ADU]'.format(\n",
+    "                            const_name),\n",
+    "                        cmap='viridis',\n",
+    "                        title='{}. {} gain'.format(const_name, gain_names[gain]))\n",
+    "        plt.show()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false,
+    "scrolled": true
+   },
+   "outputs": [],
+   "source": [
+    "# Loop over capacitor settings, modules, constants\n",
+    "for const_name, const in constants.items():\n",
+    "\n",
+    "    display(Markdown('### Summary across Modules - {}'.format(const_name)))\n",
+    "    for gain in range(3):\n",
+    "        data = np.copy(const[:, :, :, :, gain])\n",
+    "\n",
+    "        if const_name != 'BadPixelsDark':\n",
+    "            label = '{} value [ADU], good pixels only'.format(const_name)\n",
+    "            data[constants['BadPixelsDark'][:, :, :, :, gain] > 0] = np.nan\n",
+    "            datamean = np.nanmean(data, axis=(1, 2))\n",
+    "\n",
+    "            fig = plt.figure(figsize=(15, 6), tight_layout={\n",
+    "                             'pad': 0.2, 'w_pad': 1.3, 'h_pad': 1.3})\n",
+    "            ax = fig.add_subplot(121)\n",
+    "        else:\n",
+    "            label = 'Fraction of bad pixels'\n",
+    "            data[data > 0] = 1.0\n",
+    "            datamean = np.nanmean(data, axis=(1, 2))\n",
+    "            datamean[datamean == 1.0] = np.nan\n",
+    "\n",
+    "            fig = plt.figure(figsize=(15, 6), tight_layout={\n",
+    "                             'pad': 0.2, 'w_pad': 1.3, 'h_pad': 1.3})\n",
+    "            ax = fig.add_subplot(111)\n",
+    "\n",
+    "        d = []\n",
+    "        for im, mod in enumerate(datamean):\n",
+    "            d.append({'x': np.arange(mod.shape[0]),\n",
+    "                      'y': mod,\n",
+    "                      'drawstyle': 'steps-pre',\n",
+    "                      'label': mod_names[im],\n",
+    "                      })\n",
+    "\n",
+    "        _ = simplePlot(d, figsize=(10, 10), xrange=(-12, 510),\n",
+    "                            x_label='Memory Cell ID',\n",
+    "                            y_label=label,\n",
+    "                            use_axis=ax,\n",
+    "                            title='{} gain'.format(gain_names[gain]),\n",
+    "                            title_position=[0.5, 1.18],\n",
+    "                            legend='outside-top-ncol6-frame', legend_size='18%',\n",
+    "                            legend_pad=0.00)\n",
+    "\n",
+    "        if const_name != 'BadPixelsDark':\n",
+    "            ax = fig.add_subplot(122)\n",
+    "            label = '$\\sigma$ {} [ADU], good pixels only'.format(const_name)\n",
+    "            d = []\n",
+    "            for im, mod in enumerate(np.nanstd(data, axis=(1, 2))):\n",
+    "                d.append({'x': np.arange(mod.shape[0]),\n",
+    "                          'y': mod,\n",
+    "                          'drawstyle': 'steps-pre',\n",
+    "                          'label': mod_names[im],\n",
+    "                          })\n",
+    "\n",
+    "            _ = simplePlot(d, figsize=(10, 10), xrange=(-12, 510),\n",
+    "                                x_label='Memory Cell ID',\n",
+    "                                y_label=label,\n",
+    "                                use_axis=ax,\n",
+    "                                title='{} gain'.format(gain_names[gain]),\n",
+    "                                title_position=[0.5, 1.18],\n",
+    "                                legend='outside-top-ncol6-frame', legend_size='18%',\n",
+    "                                legend_pad=0.00)\n",
+    "\n",
+    "        plt.show()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Summary tables across Modules ##\n",
+    "\n",
+    "Tables show values averaged across all pixels and memory cells of a given detector module."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "if u'$' in tabulate.LATEX_ESCAPE_RULES:\n",
+    "    del(tabulate.LATEX_ESCAPE_RULES[u'$'])\n",
+    "    \n",
+    "if u'\\\\' in tabulate.LATEX_ESCAPE_RULES:\n",
+    "    del(tabulate.LATEX_ESCAPE_RULES[u'\\\\'])"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "header = ['Module', 'High gain', 'Medium gain', 'Low gain']\n",
+    "\n",
+    "for const_name, const in constants.items():\n",
+    "    table = []\n",
+    "\n",
+    "    for i_mod, mod in enumerate(mod_names):\n",
+    "\n",
+    "        t_line = [mod]\n",
+    "        for gain in range(3):\n",
+    "\n",
+    "            data = np.copy(const[i_mod, :, :, :, gain])\n",
+    "            if const_name == 'BadPixelsDark':\n",
+    "                data[data > 0] = 1.0\n",
+    "                datasum = np.nansum(data)\n",
+    "                datamean = np.nanmean(data)\n",
+    "                if datamean == 1.0:\n",
+    "                    datamean = np.nan\n",
+    "                    datasum = np.nan\n",
+    "\n",
+    "                t_line.append('{:6.0f} ({:6.3f}) '.format(\n",
+    "                    datasum, datamean))\n",
+    "\n",
+    "                label = '## Number (fraction) of bad pixels'\n",
+    "            else:\n",
+    "\n",
+    "                data[constants['BadPixelsDark']\n",
+    "                     [i_mod, :, :, :, gain] > 0] = np.nan\n",
+    "\n",
+    "                t_line.append('{:6.1f} $\\\\pm$ {:6.1f}'.format(\n",
+    "                    np.nanmean(data), np.nanstd(data)))\n",
+    "\n",
+    "                label = '## Average {} [ADU], good pixels only ##'.format(const_name)\n",
+    "\n",
+    "        table.append(t_line)\n",
+    "\n",
+    "    display(Markdown(label))\n",
+    "    md = display(Latex(tabulate.tabulate(\n",
+    "        table, tablefmt='latex', headers=header)))"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.4.3"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/xfel_calibrate/calibrate.py b/xfel_calibrate/calibrate.py
index 858060827c1fd7d102b6dc386774b3778d622221..ab451ab231fe220b86014d7bfec5efa3728562f4 100755
--- a/xfel_calibrate/calibrate.py
+++ b/xfel_calibrate/calibrate.py
@@ -399,7 +399,8 @@ def set_figure_format(nb, enable_vector_format):
 
 def concurrent_run(temp_path, nb, nbname, args, cparm=None, cval=None,
                    final_job=False, job_list=[], fmtcmd="", cluster_cores=8,
-                   sequential=False, priority=2, dependent=False):
+                   sequential=False, priority=2, dependent=False,
+                   show_title=True):
     """ Launch a concurrent job on the cluster via SLURM
     """
 
@@ -415,6 +416,8 @@ def concurrent_run(temp_path, nb, nbname, args, cparm=None, cval=None,
     parms = extract_parameters(nb)
     params = parameter_values(parms, **args)
     new_nb = replace_definitions(nb, params, execute=False)
+    if not show_title:
+        first_markdown_cell(new_nb).source = ''
     set_figure_format(new_nb, args["vector_figs"])
     base_name = nbname.replace(".ipynb", "")
     new_name = "{}__{}__{}.ipynb".format(
@@ -663,15 +666,21 @@ def run():
         cmd = ('"from cal_tools.tools import finalize; ' +
                'finalize({{joblist}}, $1, \'{run_path}\', \'{out_path}\', ' +
                '\'{project}\', \'{calibration}\', \'{author}\', '
-               '\'{version}\', \'{report_to}\')"')
+               '\'{version}\', \'{report_to}\', \'{in_folder}\' )"')
 
         report_to = title.replace(" ", "")
         if args["report_to"] is not None:
             report_to = args["report_to"]
 
+        folder = ''
+        for p in parms:
+            if p.name == 'in_folder':
+                folder = p.value
+
         fmtcmd = cmd.format(run_path=run_tmp_path, out_path=out_path,
                             project=title, calibration=title,
-                            author=author, version=version, report_to=report_to)
+                            author=author, version=version,
+                            report_to=report_to, in_folder=folder)
 
         joblist = []
         if concurrency.get("parameter", None) is None:
@@ -740,6 +749,7 @@ def run():
                     break
 
             for cnum, cval in enumerate(cvals):
+                show_title = cnum == 0
                 # Job is not final if there are dependent notebooks
                 jobid = concurrent_run(run_tmp_path, nb, notebook, args,
                                        cvar, [cval, ] if not isinstance(
@@ -747,7 +757,8 @@ def run():
                                        cnum == len(list(cvals)) -
                                        1 and len(dep_notebooks) == 0,
                                        joblist, fmtcmd,
-                                       cluster_cores=cluster_cores, sequential=sequential, priority=priority)
+                                       cluster_cores=cluster_cores, sequential=sequential, priority=priority,
+                                       show_title=show_title)
                 joblist.append(jobid)
 
 
diff --git a/xfel_calibrate/notebooks.py b/xfel_calibrate/notebooks.py
index b23925021f593aceb15400f087b5b36afd03773c..b123714a3a13cbdf9c3a5510f269deb86b58a7d7 100644
--- a/xfel_calibrate/notebooks.py
+++ b/xfel_calibrate/notebooks.py
@@ -44,6 +44,7 @@ notebooks = {
              "LPD": {
                        "DARK": {
                                "notebook": "notebooks/LPD/LPDChar_Darks_NBC.ipynb",
+                               "dep_notebooks": ["notebooks/LPD/LPDChar_Darks_Summary_NBC.ipynb"],
                                "concurrency": {"parameter": "modules",
                                                "default concurrency": list(range(16)),
                                                "cluster cores": 8},