From ec2e71cb0125db1e7a1919ee3ab3b1c890069324 Mon Sep 17 00:00:00 2001
From: Steffen Hauf <steffen.hauf@xfel.eu>
Date: Thu, 27 Jun 2019 17:49:28 +0200
Subject: [PATCH] Propagate backlog of FastCCD notebook related changes from
 production system as of 06/2019

---
 .../Characterize_Darks_FastCCD_NBC.ipynb      |  729 ------
 ...haracterize_Darks_NewDAQ_FastCCD_NBC.ipynb |  150 +-
 .../Characterize_FlatFields_FastCCD_NBC.ipynb | 1975 -----------------
 ...terize_FlatFields_NewDAQ_FastCCD_NBC.ipynb | 1542 -------------
 ...orrectionNotebook_NewDAQ_FastCCD_NBC.ipynb |  768 +++----
 xfel_calibrate/notebooks.py                   |    6 -
 6 files changed, 471 insertions(+), 4699 deletions(-)
 delete mode 100644 notebooks/FastCCD/Characterize_Darks_FastCCD_NBC.ipynb
 delete mode 100644 notebooks/FastCCD/Characterize_FlatFields_FastCCD_NBC.ipynb
 delete mode 100644 notebooks/FastCCD/Characterize_FlatFields_NewDAQ_FastCCD_NBC.ipynb

diff --git a/notebooks/FastCCD/Characterize_Darks_FastCCD_NBC.ipynb b/notebooks/FastCCD/Characterize_Darks_FastCCD_NBC.ipynb
deleted file mode 100644
index ea4675d6e..000000000
--- a/notebooks/FastCCD/Characterize_Darks_FastCCD_NBC.ipynb
+++ /dev/null
@@ -1,729 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "# FastCCD Dark Characterization\n",
-    "\n",
-    "Author: I. Klačková, Version 1.0\n",
-    "\n",
-    "The following notebook provides dark image analysis of the FastCCD detector.\n",
-    "\n",
-    "Dark characterization evaluates offset and noise of the detector and gives information about bad pixels. Resulting maps are saved as .h5 files for a latter use."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "#files with dark images\n",
-    "dark_frames = \"/gpfs/exfel/data/user/klackova/FCCDdata/XFEL/20171120/Dark/Exptimes/50ms/Dark_sameRemoved.hdf5\"\n",
-    "out_folder = '/gpfs/exfel/data/user/klackova/FCCDdata/test' #output folder\n",
-    "number_dark_frames = 100 #number of images to be used, if set to 0 all available images are used\n",
-    "cluster_profile = \"noDB\" #ipcluster profile to use\n",
-    "operation_mode = \"FS\" #or \"FF\". FS stands for frame-store and FF for full-frame opeartion\n",
-    "sigmaNoise = 5. #Pixel exceeding 'sigmaNoise' * noise value in that pixel will be masked"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "import XFELDetAna.xfelprofiler as xprof\n",
-    "\n",
-    "profiler = xprof.Profiler()\n",
-    "profiler.disable()\n",
-    "from XFELDetAna.util import env\n",
-    "env.iprofile = cluster_profile\n",
-    "\n",
-    "import warnings\n",
-    "warnings.filterwarnings('ignore')\n",
-    "\n",
-    "from XFELDetAna import xfelpycaltools as xcal\n",
-    "from XFELDetAna import xfelpyanatools as xana\n",
-    "from XFELDetAna.plotting.util import prettyPlotting\n",
-    "prettyPlotting=True\n",
-    "from XFELDetAna.xfelreaders import ChunkReader, fastccdreader\n",
-    "\n",
-    "import numpy as np\n",
-    "import h5py\n",
-    "import matplotlib.pyplot as plt\n",
-    "from iminuit import Minuit\n",
-    "\n",
-    "import time\n",
-    "import copy\n",
-    "\n",
-    "from prettytable import PrettyTable\n",
-    "\n",
-    "%matplotlib inline\n",
-    "\n",
-    "def nImagesOrLimit(nImages, limit):\n",
-    "    if limit == 0:\n",
-    "        return nImages\n",
-    "    else:\n",
-    "        return min(nImages, limit)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "if operation_mode == \"FS\":\n",
-    "    x = 960 # rows of the FastCCD to analyze in FS mode \n",
-    "    y = 960 # columns of the FastCCD to analyze in FS mode \n",
-    "    print('\\nYou are analyzing data in FS mode.')\n",
-    "else:\n",
-    "    x = 1920 # rows of the FastCCD to analyze in FF mode \n",
-    "    y = 960 # columns of the FastCCD to analyze in FF mode\n",
-    "    print('\\nYou are analyzing data in FF mode.')"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "filename = dark_frames\n",
-    "sensorSize = [x, y]\n",
-    "chunkSize = 100 #Number of images to read per chunk\n",
-    "#Sensor area will be analysed according to blocksize\n",
-    "blockSize = [sensorSize[0]//4, sensorSize[1]//4] \n",
-    "xcal.defaultBlockSize = blockSize\n",
-    "cpuCores = 8 #Specifies the number of running cpu cores\n",
-    "memoryCells = 1 #FastCCD has 1 memory cell\n",
-    "#Specifies total number of images to proceed\n",
-    "nImages = fastccdreader.getDataSize(filename, '/Dataset')[2] \n",
-    "nImages = nImagesOrLimit(nImages, number_dark_frames)\n",
-    "print(\"\\nNumber of dark images to analyze: \",nImages)\n",
-    "commonModeBlockSize = blockSize\n",
-    "commonModeAxisR = 'row'#Axis along which common mode will be calculated\n",
-    "run_parallel = True\n",
-    "profile = False"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "reader = ChunkReader(filename, fastccdreader.readData, \n",
-    "                     nImages, chunkSize, \n",
-    "                     path = \"/Dataset\", \n",
-    "                     pixels_x = sensorSize[0],\n",
-    "                     pixels_y = sensorSize[1],)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "noiseCal = xcal.NoiseCalculator(sensorSize, memoryCells, \n",
-    "                                cores=cpuCores,\n",
-    "                               runParallel=run_parallel)\n",
-    "histCalRaw = xcal.HistogramCalculator(sensorSize, bins=1000, \n",
-    "                                      range=[0, 10000], parallel=False, \n",
-    "                                      memoryCells=memoryCells, cores=cpuCores)\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### First Iteration"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Characterization of dark images with purpose to create dark maps (offset, noise and bad pixel maps) is an iterative process. Firstly, initial offset and noise maps are produced from raw dark data."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "for data in reader.readChunks():\n",
-    "   \n",
-    "    histCalRaw.fill(data)\n",
-    "    #Filling calculators with data\n",
-    "    noiseCal.fill(data)\n",
-    "          \n",
-    "offsetMap = noiseCal.getOffset() #Produce offset map\n",
-    "noiseMap = noiseCal.get() #Produce noise map\n",
-    "noiseCal.reset() #Reset noise calculator\n",
-    "print(\"Initial maps were created\")"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "#**************OFFSET MAP HISTOGRAM***********#\n",
-    "ho,co = np.histogram(offsetMap.flatten(), bins=700)\n",
-    "\n",
-    "do = {'x': co[:-1],\n",
-    "     'y': ho,\n",
-    "     'y_err': np.sqrt(ho[:]),\n",
-    "     'drawstyle': 'bars',\n",
-    "     'color': 'cornflowerblue',\n",
-    "     }\n",
-    "\n",
-    "fig = xana.simplePlot(do, figsize='1col', aspect=2, \n",
-    "                      x_label = 'Offset (ADU)', \n",
-    "                      y_label=\"Counts\", y_log=True,\n",
-    "                      )\n",
-    "                      \n",
-    "\n",
-    "#*****NOISE MAP HISTOGRAM FROM THE OFFSET CORRECTED DATA*******#\n",
-    "hn,cn = np.histogram(noiseMap.flatten(), bins=200)\n",
-    "\n",
-    "dn = {'x': cn[:-1],\n",
-    "     'y': hn,\n",
-    "     'y_err': np.sqrt(hn[:]),\n",
-    "     'drawstyle': 'bars',\n",
-    "     'color': 'cornflowerblue',\n",
-    "     }\n",
-    "\n",
-    "fig = xana.simplePlot(dn, figsize='1col', aspect=2, \n",
-    "                      x_label = 'Noise (ADU)', \n",
-    "                      y_label=\"Counts\", \n",
-    "                      y_log=True)\n",
-    "\n",
-    "\n",
-    "#**************HEAT MAPS*******************#\n",
-    "fig = xana.heatmapPlot(offsetMap[:,:,0],\n",
-    "                       x_label='Columns', y_label='Rows',\n",
-    "                       lut_label='Offset (ADU)',\n",
-    "                       x_range=(0,y),\n",
-    "                       y_range=(0,x), vmax=1.2*np.mean(offsetMap))\n",
-    "\n",
-    "fig = xana.heatmapPlot(noiseMap[:,:,0],\n",
-    "                       x_label='Columns', y_label='Rows',\n",
-    "                       lut_label='Noise (ADU)',\n",
-    "                       x_range=(0,y),\n",
-    "                       y_range=(0,x), vmax=2*np.mean(noiseMap))"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "offsetCal = xcal.OffsetCalculator(sensorSize, memoryCells, cores=cpuCores, \n",
-    "                                 runParallel=run_parallel)\n",
-    "offsetCorrection = xcal.OffsetCorrection(sensorSize, offsetMap, \n",
-    "                                         nCells = memoryCells,\n",
-    "                                         cores=cpuCores, gains=None,\n",
-    "                                        runParallel=run_parallel)\n",
-    "\n",
-    "cmCorrectionR = xcal.CommonModeCorrection(sensorSize, \n",
-    "                                         commonModeBlockSize, \n",
-    "                                         commonModeAxisR,\n",
-    "                                         nCells = memoryCells, \n",
-    "                                         noiseMap = noiseMap,\n",
-    "                                        runParallel=run_parallel,\n",
-    "                                        stats=True, gains=None)\n",
-    "\n",
-    "histCalCorrected = xcal.HistogramCalculator(sensorSize, bins=4000, \n",
-    "                                            range=[-1000, 3000],\n",
-    "                                            memoryCells=memoryCells, \n",
-    "                                            cores=cpuCores, gains=None)\n",
-    "histCalCMCorrected = xcal.HistogramCalculator(sensorSize, bins=2000, \n",
-    "                                              range=[-1000, 1000],\n",
-    "                                              memoryCells=memoryCells, \n",
-    "                                              cores=cpuCores, gains=None)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Second Iteration"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "The initial maps are then used for corrections in the second iteration. In this step also common mode correction is applied. The corrected data is used to calculate threshold for exlusion of events. It is calculated as $5*\\sigma$ noise, where $\\sigma$ is estimated as median of noise."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "for data in reader.readChunks():\n",
-    "    \n",
-    "    offsetCal.fill(data)\n",
-    "    data = offsetCorrection.correct(data)\n",
-    "    histCalCorrected.fill(data)\n",
-    "    data = cmCorrectionR.correct(data)\n",
-    "    histCalCMCorrected.fill(data)\n",
-    "    noiseCal.fill(data)  \n",
-    "    \n",
-    "offsetMap = offsetCal.get() #Produce offset map\n",
-    "noiseMap = noiseCal.get() #Produce noise map after offset correction\n",
-    "noiseCal.reset()\n",
-    "offsetCal.reset()\n",
-    "\n",
-    "event_threshold = sigmaNoise*np.median(noiseMap)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "histCalCorrected.reset()\n",
-    "cmCorrectionR.reset()\n",
-    "histCalCMCorrected.reset()\n",
-    "\n",
-    "cmCorrectionR2 = xcal.CommonModeCorrection(sensorSize, \n",
-    "                                         commonModeBlockSize, \n",
-    "                                         commonModeAxisR,\n",
-    "                                         nCells = memoryCells, \n",
-    "                                         noiseMap = noiseMap,\n",
-    "                                        runParallel=run_parallel,\n",
-    "                                        stats=True, gains=None)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Third Iteration "
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "In the next step, events are excluded and aditionally bad pixels are evaluated so the bad pixel map can be calculated for latter use in a flat-field analysis."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "for data in reader.readChunks():\n",
-    "    \n",
-    "    d2 = offsetCorrection.correct(copy.copy(data))\n",
-    "    d2 = cmCorrectionR2.correct(d2)\n",
-    "    data[d2 > event_threshold] = np.nan\n",
-    "    \n",
-    "    offsetCal.fill(data)\n",
-    "    data = offsetCorrection.correct(data)\n",
-    "    histCalCorrected.fill(data)\n",
-    "    \n",
-    "    data = np.ma.MaskedArray(data, np.isnan(data))\n",
-    "    \n",
-    "    data = cmCorrectionR.correct(data)\n",
-    "    histCalCMCorrected.fill(data)\n",
-    "    \n",
-    "    noiseCal.fill(data)\n",
-    "    \n",
-    "    \n",
-    "offsetMap = offsetCal.get() #Produce offset map\n",
-    "noiseMap = noiseCal.get() #Produce noise map\n",
-    "noiseCal.reset()\n",
-    "offsetCal.reset()\n",
-    "\n",
-    "badPixelMap = xcal.badPixelMaskFromNoiseAndOffset(offsetMap, noiseMap, hotThreshold=4800, coldThreshold=3000, \n",
-    "                                                 nSigmaNoisy=sigmaNoise)\n",
-    "offsetCal.setBadPixelMask(badPixelMap)\n",
-    "noiseCal.setBadPixelMask(badPixelMap)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "histCalCorrected.reset()\n",
-    "cmCorrectionR.reset()\n",
-    "cmCorrectionR2.reset()\n",
-    "histCalCMCorrected.reset()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Final Iteration "
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "In the last iteration also bad pixels are excluded from offset and noise map and final maps are produced."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "for data in reader.readChunks():\n",
-    "    \n",
-    "    d2 = offsetCorrection.correct(copy.copy(data))\n",
-    "    d2 = cmCorrectionR2.correct(d2)\n",
-    "    data[d2 > event_threshold] = np.nan #creating mask for masking cosmics \n",
-    "    \n",
-    "    offsetCal.fill(data)\n",
-    "    data = offsetCorrection.correct(data)\n",
-    "    histCalCorrected.fill(data)\n",
-    "    \n",
-    "    data = np.ma.MaskedArray(data, np.isnan(data)) #masking cosmics\n",
-    "    \n",
-    "    data = cmCorrectionR.correct(data)\n",
-    "    histCalCMCorrected.fill(data)\n",
-    "    \n",
-    "    noiseCal.fill(data)\n",
-    "    \n",
-    "    \n",
-    "offsetMap = offsetCal.get() #Produce offset map\n",
-    "noiseMap = noiseCal.get() #Produce noise map\n",
-    "noiseCal.reset()\n",
-    "offsetCal.reset()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Plotting the Results"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "rawHist = histCalRaw.get(cumulatative = True)\n",
-    "\n",
-    "fig = plt.figure(figsize=(5,5))\n",
-    "ax = fig.add_subplot(111)\n",
-    "xana.histPlot(ax, rawHist)\n",
-    "t = ax.set_xlabel(\"Raw signal (ADU)\")\n",
-    "t = ax.set_ylabel(\"Counts\")\n",
-    "l = ax.set_xlim(np.nanmin(offsetMap),np.nanmax(offsetMap))\n",
-    "\n",
-    "\n",
-    "#***************COMPARISON HISTOGRAM*************#\n",
-    "h, e, c ,s = histCalCorrected.get(cumulatative=True)\n",
-    "hm, em, cm ,sm = histCalCMCorrected.get(cumulatative=True)\n",
-    "\n",
-    "d = [{'x': c,\n",
-    "     'y': h,\n",
-    "     'y_err': np.sqrt(h[:]),\n",
-    "     'drawstyle': 'steps-mid',\n",
-    "     'color': 'cornflowerblue',\n",
-    "     'errorstyle': 'bars',\n",
-    "     'label': 'offset corr' \n",
-    "     },\n",
-    "    {'x': cm,\n",
-    "     'y': hm,\n",
-    "     'y_err': np.sqrt(hm[:]),\n",
-    "     'drawstyle': 'steps-mid',\n",
-    "     'color': 'red',\n",
-    "     'errorstyle': 'bars',\n",
-    "     'ecolor': 'crimson',\n",
-    "     'label': 'CM corr'\n",
-    "     }]\n",
-    "\n",
-    "fig = xana.simplePlot(d, figsize='1col', aspect=2, \n",
-    "                      x_label = 'Signal (ADU)', \n",
-    "                      y_label=\"Counts\", x_range=(-20,20), y_log=False, legend='top-left-frame-1col', \n",
-    "                     title=\"histograms\")\n",
-    "\n",
-    "cmMode = cmCorrectionR.get()\n",
-    "fig = plt.figure(figsize=(12,4))\n",
-    "ax = fig.add_subplot(121)\n",
-    "xana.histPlot(ax, cmMode, bins=50, plot_errors=True)\n",
-    "t = ax.set_xlabel(\"Common mode distribution - Signal (ADU)\")\n",
-    "t = ax.set_ylabel(\"Counts\")\n",
-    "\n",
-    "\n",
-    "#**************OFFSET MAP HISTOGRAM***********#\n",
-    "ho,co = np.histogram(offsetMap.flatten(), bins=700)\n",
-    "\n",
-    "do = {'x': co[:-1],\n",
-    "     'y': ho,\n",
-    "     'y_err': np.sqrt(ho[:]),\n",
-    "     'drawstyle': 'bars',\n",
-    "     'color': 'cornflowerblue',\n",
-    "     }\n",
-    "\n",
-    "fig = xana.simplePlot(do, figsize='1col', aspect=2, \n",
-    "                      x_label = 'Offset (ADU)', \n",
-    "                      y_label=\"Counts\", y_log=True,\n",
-    "                      )\n",
-    "                      \n",
-    "\n",
-    "#*****NOISE MAP HISTOGRAM FROM THE OFFSET CORRECTED DATA*******#\n",
-    "hn,cn = np.histogram(noiseMap.flatten(), bins=200)\n",
-    "\n",
-    "dn = {'x': cn[:-1],\n",
-    "     'y': hn,\n",
-    "     'y_err': np.sqrt(hn[:]),\n",
-    "     'drawstyle': 'bars',\n",
-    "     'color': 'cornflowerblue',\n",
-    "     }\n",
-    "\n",
-    "fig = xana.simplePlot(dn, figsize='1col', aspect=2, \n",
-    "                      x_label = 'Noise (ADU)', \n",
-    "                      y_label=\"Counts\", \n",
-    "                      y_log=True)\n",
-    "\n",
-    "\n",
-    "#**************HEAT MAPS*******************#\n",
-    "fig = xana.heatmapPlot(offsetMap[:,:,0],\n",
-    "                       x_label='Columns', y_label='Rows',\n",
-    "                       lut_label='Offset (ADU)',\n",
-    "                       x_range=(0,y),\n",
-    "                       y_range=(0,x))\n",
-    "\n",
-    "fig = xana.heatmapPlot(noiseMap[:,:,0],\n",
-    "                       x_label='Columns', y_label='Rows',\n",
-    "                       lut_label='Noise (ADU)',\n",
-    "                       x_range=(0,y),\n",
-    "                       y_range=(0,x),\n",
-    "                      )\n",
-    "\n",
-    "fig = xana.heatmapPlot(badPixelMap[:,:,0],\n",
-    "                       x_label='Columns', y_label='Rows',\n",
-    "                       lut_label='Bad pixels',\n",
-    "                       x_range=(0,y),\n",
-    "                       y_range=(0,x), vmax=1\n",
-    "                      )"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "num_bad_pixels = np.count_nonzero(badPixelMap)\n",
-    "num_all_pixels = x*y\n",
-    "percentage_bad_pixels = num_bad_pixels*100/num_all_pixels\n",
-    "print(\"Number of bad pixels: {:0.0f}, i.e. {:0.2f}% of all pixels\".format(num_bad_pixels, percentage_bad_pixels))"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "#**************RMS CALCULATION*******************#\n",
-    "\n",
-    "#all\n",
-    "rms = np.mean(noiseMap)*6.1\n",
-    "stdRms = np.std(noiseMap)*6.1\n",
-    "print(\"RMS noise all = {:0.2f} ADU\".format(rms/6.1))\n",
-    "print(\"RMS noise all = {:0.2f} e\".format(rms))\n",
-    "print('standard deviation of rms all ={:0.2f} e'.format(stdRms))\n",
-    "print()\n",
-    "\n",
-    "#lower hem\n",
-    "rms = np.mean(noiseMap[:x//2,:])*6.1\n",
-    "stdRms = np.std(noiseMap[:x//2,:])*6.1\n",
-    "print(\"RMS noise LH = {:0.2f} ADU\".format(rms/6.1))\n",
-    "print(\"RMS noise LH = {:0.2f} e\".format(rms))\n",
-    "print('standard deviation of rms LH = {:0.2f} e'.format(stdRms))\n",
-    "print()\n",
-    "\n",
-    "\n",
-    "#upper hem\n",
-    "rms = np.mean(noiseMap[x//2:,:])*6.1\n",
-    "stdRms = np.std(noiseMap[x//2:,:])*6.1\n",
-    "print(\"RMS noise UH = {:0.2f} ADU\".format(rms/6.1))\n",
-    "print(\"RMS noise UH = {:0.2f} e\".format(rms))\n",
-    "print('standard deviation of rms UH = {:0.2f} e'.format(stdRms))"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Saving Maps"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Here, the calculated maps are saved to .h5 file named **DarkMaps.h5**. Each map type is accessible in dataset structure via keyword: *offsetMap*, *noiseMap* or *badPixelMap*."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "#Function for creating .h5 files\n",
-    "import os\n",
-    "\n",
-    "def createHdf5(path, filename):\n",
-    "    completeName = os.path.join(path, filename)\n",
-    "    f = h5py.File(completeName, \"w\")\n",
-    "    f.close()\n",
-    "\n",
-    "def saveDataset(path ,filename, Name, SavingArray):\n",
-    "    completeName = os.path.join(path, filename)\n",
-    "    f = h5py.File(completeName, \"a\")\n",
-    "    dset = f.require_dataset(Name, SavingArray.shape, dtype='f')\n",
-    "    dset[...] = SavingArray\n",
-    "    f.close()"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "path = out_folder\n",
-    "name = ('DarkMaps.h5')"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "createHdf5(path, name) #Creation of .h5 file"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "#Saving maps to .h5\n",
-    "saveDataset(path, name,  '/offsetMap', offsetMap)\n",
-    "saveDataset(path, name,  '/noiseMap', noiseMap)\n",
-    "saveDataset(path, name, '/badPixelMap', badPixelMap)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": []
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.6.6"
-  },
-  "latex_envs": {
-   "LaTeX_envs_menu_present": true,
-   "autocomplete": true,
-   "bibliofile": "biblio.bib",
-   "cite_by": "apalike",
-   "current_citInitial": 1,
-   "eqLabelWithNumbers": true,
-   "eqNumInitial": 1,
-   "hotkeys": {
-    "equation": "Ctrl-E",
-    "itemize": "Ctrl-I"
-   },
-   "labels_anchors": false,
-   "latex_user_defs": false,
-   "report_style_numbering": false,
-   "user_envs_cfg": false
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 1
-}
diff --git a/notebooks/FastCCD/Characterize_Darks_NewDAQ_FastCCD_NBC.ipynb b/notebooks/FastCCD/Characterize_Darks_NewDAQ_FastCCD_NBC.ipynb
index 5ab017b55..d71cd29ae 100644
--- a/notebooks/FastCCD/Characterize_Darks_NewDAQ_FastCCD_NBC.ipynb
+++ b/notebooks/FastCCD/Characterize_Darks_NewDAQ_FastCCD_NBC.ipynb
@@ -25,11 +25,10 @@
    },
    "outputs": [],
    "source": [
-    "#files with dark images\n",
     "in_folder = \"/gpfs/exfel/exp/SCS/201930/p900074/raw/\" # input folder, required\n",
     "out_folder = 'gpfs/exfel/data/scratch/haufs/test/' # output folder, required\n",
     "path_template = 'RAW-R{:04d}-DA05-S{{:05d}}.h5' # the template to use to access data\n",
-    "run = 365 # which run to read data from, required\n",
+    "run = 321 # which run to read data from, required\n",
     "number_dark_frames = 0 # number of images to be used, if set to 0 all available images are used\n",
     "cluster_profile = \"noDB\" # ipcluster profile to use\n",
     "operation_mode = \"FF\" #o r \"FF\". FS stands for frame-store and FF for full-frame opeartion\n",
@@ -39,10 +38,13 @@
     "h5path_cntrl = '/RUN/SCS_CDIDET_FCCD2M/DET/FCCD'  # path to control data\n",
     "cal_db_interface = \"tcp://max-exfl016:8020\" # calibration DB interface to use\n",
     "local_output = False # output also in as H5 files\n",
-    "temp_limits = 5\n",
+    "temp_limits = 5 # limits within which temperature is considered the same\n",
     "sequence = 0 # sequence file to use\n",
     "multi_iteration = False # use multiple iterations\n",
-    "use_dir_creation_date = True"
+    "use_dir_creation_date = True # use dir creation date\n",
+    "bad_pixel_offset_sigma = 5. # offset standard deviations above which to consider pixel bad \n",
+    "bad_pixel_noise_sigma = 5. # noise standard deviations above which to consider pixel bad \n",
+    "fix_temperature = 0. # fix temperature to this value in K, set to 0. if to keep the same"
    ]
   },
   {
@@ -185,10 +187,14 @@
     "    integration_time = int(f['{}/acquisitionTime/value'.format(h5path_cntrl)][0])\n",
     "    temperature = np.mean(f[h5path_t])\n",
     "    temperature_k = temperature + 273.15\n",
+    "        \n",
+    "    if fix_temperature != 0.:\n",
+    "        temperature_k = fix_temperature\n",
+    "        print(\"Using fixed temperature\")\n",
     "    print(\"Bias voltage is {} V\".format(bias_voltage))\n",
     "    print(\"Detector gain is set to x{}\".format(det_gain))\n",
     "    print(\"Detector integration time is set to {}\".format(integration_time))\n",
-    "    print(\"Mean temperature was {:0.2f} °C / {:0.2f} K\".format(temperature, temperature_k))"
+    "    print(\"Mean temperature was {:0.2f} °C / {:0.2f} K\".format(temperature, temperature_k))\n"
    ]
   },
   {
@@ -258,6 +264,7 @@
    "outputs": [],
    "source": [
     "for data in reader.readChunks():\n",
+    "    data = np.bitwise_and(data.astype(np.uint16), 0b0011111111111111).astype(np.float32)\n",
     "    dx = np.count_nonzero(data, axis=(0, 1))\n",
     "    data = data[:,:,dx != 0]\n",
     "    histCalRaw.fill(data)\n",
@@ -411,58 +418,67 @@
    "cell_type": "code",
    "execution_count": null,
    "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-12-06T10:56:31.538005Z",
-     "start_time": "2018-12-06T10:56:22.744438Z"
-    },
     "collapsed": false
    },
    "outputs": [],
    "source": [
-    "#**************OFFSET MAP HISTOGRAM***********#\n",
-    "ho,co = np.histogram(offsetMap.flatten(), bins=700)\n",
+    "from cal_tools.enums import BadPixels\n",
+    "bad_pixels = np.zeros(offsetMap.shape, np.uint32)\n",
+    "mnoffset = np.nanmedian(offsetMap)\n",
+    "stdoffset = np.nanstd(offsetMap)\n",
+    "bad_pixels[(offsetMap < mnoffset-bad_pixel_offset_sigma*stdoffset) | \n",
+    "           (offsetMap > mnoffset+bad_pixel_offset_sigma*stdoffset)] = BadPixels.OFFSET_OUT_OF_THRESHOLD.value\n",
+    "\n",
+    "mnnoise = np.nanmedian(noiseMap)\n",
+    "stdnoise = np.nanstd(noiseMap)\n",
+    "bad_pixels[(noiseMap < mnnoise-bad_pixel_noise_sigma*stdnoise) | \n",
+    "           (noiseMap > mnnoise+bad_pixel_noise_sigma*stdnoise)] = BadPixels.NOISE_OUT_OF_THRESHOLD.value\n",
+    "\n",
+    "fig = xana.heatmapPlot(np.log2(bad_pixels[:,:,0]),\n",
+    "                       x_label='Columns', y_label='Rows',\n",
+    "                       lut_label='Bad Pixel Value (ADU)',\n",
+    "                       x_range=(0,y),\n",
+    "                       y_range=(0,x), vmin=0, vmax=32)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "metadata = ConstantMetaData()\n",
+    "badpix = Constants.CCD(DetectorTypes.fastCCD).BadPixelsDark()\n",
+    "badpix.data = bad_pixels.data\n",
+    "metadata.calibration_constant = badpix\n",
     "\n",
-    "do = {'x': co[:-1],\n",
-    "     'y': ho,\n",
-    "     'y_err': np.sqrt(ho[:]),\n",
-    "     'drawstyle': 'bars',\n",
-    "     'color': 'cornflowerblue',\n",
-    "     }\n",
+    "# set the operating condition\n",
+    "condition = Conditions.Dark.CCD(bias_voltage=bias_voltage,\n",
+    "                                integration_time=integration_time,\n",
+    "                                gain_setting=det_gain,\n",
+    "                                temperature=temperature_k,\n",
+    "                                pixels_x=1934,\n",
+    "                                pixels_y=960)\n",
     "\n",
-    "fig = xana.simplePlot(do, figsize='1col', aspect=2, \n",
-    "                      x_label = 'Offset (ADU)', \n",
-    "                      y_label=\"Counts\", y_log=True,\n",
-    "                      )\n",
-    "                      \n",
+    "for parm in condition.parameters:\n",
+    "    if parm.name == \"Sensor Temperature\":\n",
+    "        parm.lower_deviation = temp_limits\n",
+    "        parm.upper_deviation = temp_limits\n",
     "\n",
-    "#*****NOISE MAP HISTOGRAM FROM THE OFFSET CORRECTED DATA*******#\n",
-    "hn,cn = np.histogram(noiseMap.flatten(), bins=200)\n",
     "\n",
-    "dn = {'x': cn[:-1],\n",
-    "     'y': hn,\n",
-    "     'y_err': np.sqrt(hn[:]),\n",
-    "     'drawstyle': 'bars',\n",
-    "     'color': 'cornflowerblue',\n",
-    "     }\n",
-    "\n",
-    "fig = xana.simplePlot(dn, figsize='1col', aspect=2, \n",
-    "                      x_label = 'Noise (ADU)', \n",
-    "                      y_label=\"Counts\", \n",
-    "                      y_log=True)\n",
+    "device = Detectors.fastCCD1\n",
     "\n",
     "\n",
-    "#**************HEAT MAPS*******************#\n",
-    "fig = xana.heatmapPlot(offsetMap[:,:,0],\n",
-    "                       x_label='Columns', y_label='Rows',\n",
-    "                       lut_label='Offset (ADU)',\n",
-    "                       x_range=(0,y),\n",
-    "                       y_range=(0,x), vmin=3000, vmax=4500)\n",
+    "metadata.detector_condition = condition\n",
     "\n",
-    "fig = xana.heatmapPlot(noiseMap[:,:,0],\n",
-    "                       x_label='Columns', y_label='Rows',\n",
-    "                       lut_label='Noise (ADU)',\n",
-    "                       x_range=(0,y),\n",
-    "                       y_range=(0,x), vmax=2*np.mean(noiseMap))"
+    "# specify the a version for this constant\n",
+    "if creation_time is None:\n",
+    "    metadata.calibration_constant_version = Versions.Now(device=device)\n",
+    "else:\n",
+    "    metadata.calibration_constant_version = Versions.Timespan(device=device, start=creation_time)\n",
+    "metadata.send(cal_db_interface)"
    ]
   },
   {
@@ -472,7 +488,47 @@
     "collapsed": true
    },
    "outputs": [],
-   "source": []
+   "source": [
+    "histCalCorr = xcal.HistogramCalculator(sensorSize, bins=200, \n",
+    "                                      range=[-200, 200], parallel=False, \n",
+    "                                      memoryCells=memoryCells, \n",
+    "                                      cores=cpuCores, blockSize=blockSize)\n",
+    "\n",
+    "\n",
+    "for data in reader.readChunks():\n",
+    "    data = np.bitwise_and(data.astype(np.uint16), 0b0011111111111111).astype(np.float32)\n",
+    "    data -= offsetMap.data\n",
+    "    histCalCorr.fill(data)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "ho,eo,co,so = histCalCorr.get()\n",
+    "\n",
+    "\n",
+    "d = [{'x': co,\n",
+    "      'y': ho,\n",
+    "      'y_err': np.sqrt(ho[:]),\n",
+    "      'drawstyle': 'steps-mid',\n",
+    "      'errorstyle': 'bars',\n",
+    "      'errorcoarsing': 2,\n",
+    "      'label': 'Offset corr.'\n",
+    "     },\n",
+    "    \n",
+    "     ]\n",
+    "     \n",
+    "\n",
+    "fig = xana.simplePlot(d, aspect=1, x_label='Energy(ADU)', \n",
+    "                      y_label='Number of occurrences', figsize='2col',\n",
+    "                      y_log=True, x_range=(-50,500),\n",
+    "                      legend='top-center-frame-2col')"
+   ]
   },
   {
    "cell_type": "code",
diff --git a/notebooks/FastCCD/Characterize_FlatFields_FastCCD_NBC.ipynb b/notebooks/FastCCD/Characterize_FlatFields_FastCCD_NBC.ipynb
deleted file mode 100644
index 529c007d5..000000000
--- a/notebooks/FastCCD/Characterize_FlatFields_FastCCD_NBC.ipynb
+++ /dev/null
@@ -1,1975 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "# FastCCD Flat-Field Characterization ##\n",
-    "\n",
-    "Author: I. Klačková, Version 1.0\n",
-    "\n",
-    "The following notebook provides flat-field image analysis of the FastCCD detector.\n",
-    "\n",
-    "As a prerequisite, dark maps are required for the analysis to be performed. These can be obtained by running **FastCCD Dark Characterization**.\n",
-    "\n",
-    "Flat-field characterization provides analysis of a detector gain and other CCD-related effects. "
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "dark_maps = '/gpfs/exfel/data/user/klackova/FCCDdata/XFEL/20180111/Dark/DarkMaps.h5' #file containing dark maps\n",
-    "#files with flat-field images\n",
-    "flatField_frames = '/gpfs/exfel/data/user/klackova/FCCDdata/XFEL/20180111/Fe55/Fe55.hdf5'\n",
-    "output_folder = '/gpfs/exfel/data/user/klackova/FCCDdata/XFEL/20180111/results/' #output folder\n",
-    "number_flatField_frames = 1000\n",
-    "cluster_profile = \"noDB\" #ipcluster profile to use\n",
-    "operation_mode = \"FF\" #or \"FF\". FS stands for frame-store and FF for full-frame opeartion\n",
-    "split_evt_primary_threshold = 7. # primary threshold for split event classification in terms of n sigma noise\n",
-    "split_evt_secondary_threshold = 4. # secondary threshold for split event classification in terms of n sigma noise\n",
-    "split_evt_mip_threshold = 1000. # MIP threshold for event classification\n",
-    "tag_101_LH = 0 #if 1 tagging first singles for upper hemisphere, if 0 then are not evaluated\n",
-    "tag_101_UH = 0 #if 2 tagging first singles for upper hemisphere, if 0 then are not evaluated\n",
-    "cti_Axis_LH = 1 #CTI axis index for lower hemisphere\n",
-    "cti_Axis_UH = 0 #CTI axis indes for upper hemisphere\n",
-    "cti_lim_low = 220 #lower limit in ADU for events used in CTI calculator\n",
-    "cti_lim_high = 350 #upper limit in ADU for events used in CTI calculator\n",
-    "local_output = False # set to trou if local output is to be provided to output_folder\n",
-    "cal_db_interface = \"tcp://max-exfl016:8020\" # calibration DB interface to use\n",
-    "bias_voltage = 0\n",
-    "integration_time = 1\n",
-    "gain_setting = 8"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "import XFELDetAna.xfelprofiler as xprof\n",
-    "\n",
-    "profiler = xprof.Profiler()\n",
-    "profiler.disable()\n",
-    "from XFELDetAna.util import env\n",
-    "env.iprofile = cluster_profile\n",
-    "\n",
-    "import warnings\n",
-    "warnings.filterwarnings('ignore')\n",
-    "\n",
-    "from XFELDetAna import xfelpycaltools as xcal\n",
-    "from XFELDetAna import xfelpyanatools as xana\n",
-    "from XFELDetAna.plotting.util import prettyPlotting\n",
-    "prettyPlotting=True\n",
-    "from XFELDetAna.xfelreaders import ChunkReader, fastccdreader\n",
-    "\n",
-    "import numpy as np\n",
-    "import h5py\n",
-    "import matplotlib.pyplot as plt\n",
-    "from iminuit import Minuit\n",
-    "\n",
-    "import time\n",
-    "import copy\n",
-    "\n",
-    "from prettytable import PrettyTable\n",
-    "\n",
-    "%matplotlib inline\n",
-    "\n",
-    "def nImagesOrLimit(nImages, limit):\n",
-    "    if limit == 0:\n",
-    "        return nImages\n",
-    "    else:\n",
-    "        return min(nImages, limit)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "if operation_mode == \"FS\":\n",
-    "    x = 960 # rows of the FastCCD to analyze in FS mode \n",
-    "    y = 960 # columns of the FastCCD to analyze in FS mode \n",
-    "    print('\\nYou are analyzing data in FS mode.')\n",
-    "else:\n",
-    "    x = 1920 # rows of the FastCCD to analyze in FF mode \n",
-    "    y = 960 # columns of the FastCCD to analyze in FF mode\n",
-    "    print('\\nYou are analyzing data in FF mode.')"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "filename = flatField_frames\n",
-    "sensorSize = [x, y]\n",
-    "chunkSize = 200 #Number of images to read per chunk\n",
-    "blockSize = [sensorSize[0]//4, sensorSize[1]//4] #Sensor area will be analysed according to blocksize\n",
-    "xcal.defaultBlockSize = blockSize\n",
-    "cpuCores = 16 #Specifies the number of running cpu cores\n",
-    "memoryCells = 1 #FastCCD has 1 memory cell\n",
-    "#Specifies total number of images to proceed\n",
-    "nImages = fastccdreader.getDataSize(filename, '/Dataset')[2] \n",
-    "nImages = nImagesOrLimit(nImages, number_flatField_frames)\n",
-    "print(\"\\nNumber of flat-field images to analyze: \",nImages)\n",
-    "\n",
-    "commonModeBlockSize = blockSize\n",
-    "commonModeAxisR = 'row'#Axis along which common mode will be calculated\n",
-    "run_parallel = True\n",
-    "profile = False\n",
-    "\n",
-    "reader = ChunkReader(filename, fastccdreader.readData, \n",
-    "                     nImages, chunkSize, \n",
-    "                     path = \"/Dataset\", \n",
-    "                     pixels_x = x,\n",
-    "                     pixels_y = y)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "As a first step, dark maps has to be loaded."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "f = h5py.File(dark_maps, 'r')\n",
-    "offsetMap = np.array(f[\"/offsetMap\"])\n",
-    "noiseMap = np.array(f[\"/noiseMap\"])\n",
-    "badPixelMap = np.array(f[\"/badPixelMap\"])\n",
-    "f.close()\n",
-    "\n",
-    "offsetMap = np.ma.MaskedArray(offsetMap, badPixelMap)\n",
-    "noiseMap = np.ma.MaskedArray(noiseMap, badPixelMap)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "#************************Calculators************************#\n",
-    "\n",
-    "offsetCorrection = xcal.OffsetCorrection([x, y], \n",
-    "                                          offsetMap, \n",
-    "                                          nCells = memoryCells, \n",
-    "                                          cores=cpuCores, gains=None,\n",
-    "                                        blockSize=blockSize)\n",
-    "\n",
-    "cmCorrectionLH = xcal.CommonModeCorrection([x//2, y], \n",
-    "                                         commonModeBlockSize, \n",
-    "                                         commonModeAxisR,\n",
-    "                                         nCells = memoryCells, \n",
-    "                                         noiseMap = noiseMap,\n",
-    "                                        runParallel=run_parallel,\n",
-    "                                        stats=False)\n",
-    "\n",
-    "cmCorrectionUH = xcal.CommonModeCorrection([x//2, y], \n",
-    "                                         commonModeBlockSize, \n",
-    "                                         commonModeAxisR,\n",
-    "                                         nCells = memoryCells, \n",
-    "                                         noiseMap = noiseMap,\n",
-    "                                        runParallel=run_parallel,\n",
-    "                                        stats=False)\n",
-    "\n",
-    "\n",
-    "patternClassifierLH = xcal.PatternClassifier([x//2, y], \n",
-    "                                              noiseMap[:x//2, :], \n",
-    "                                              split_evt_primary_threshold, \n",
-    "                                             split_evt_secondary_threshold,\n",
-    "                                             split_evt_mip_threshold,\n",
-    "                                              tagFirstSingles = tag_101_LH, \n",
-    "                                              nCells=memoryCells, \n",
-    "                                              cores=cpuCores, \n",
-    "                                              allowElongated = False,\n",
-    "                                          blockSize=blockSize,\n",
-    "                                            runParallel=run_parallel)\n",
-    "\n",
-    "patternClassifierUH = xcal.PatternClassifier([x//2, y], \n",
-    "                                              noiseMap[x//2:, :], \n",
-    "                                              split_evt_primary_threshold, \n",
-    "                                              split_evt_secondary_threshold,\n",
-    "                                              split_evt_mip_threshold,\n",
-    "                                              tagFirstSingles = tag_101_UH, \n",
-    "                                              nCells=memoryCells, \n",
-    "                                              cores=cpuCores, \n",
-    "                                              allowElongated = False,\n",
-    "                                          blockSize=blockSize,\n",
-    "                                            runParallel=run_parallel)\n",
-    "\n",
-    "patternSelectorLH = xcal.PatternSelector([x//2, y], \n",
-    "                                          selectionList = [100, 200, 201, 202, 203,\n",
-    "                                                           300, 301, 302, 303,\n",
-    "                                                           400, 401, 402, 403],\n",
-    "                                      blockSize=blockSize, runParallel=run_parallel)\n",
-    "patternSelectorUH = xcal.PatternSelector([x//2, y], \n",
-    "                                          selectionList = [100, 200, 201, 202, 203,\n",
-    "                                                          300, 301, 302, 303,\n",
-    "                                                           400, 401, 402, 403],\n",
-    "                                      blockSize=blockSize, runParallel=run_parallel)\n",
-    "\n",
-    "ctiCalLH = xcal.CTICalculator([x//2, y], \n",
-    "                               noiseMap[:x//2, :], \n",
-    "                               axis=cti_Axis_LH, \n",
-    "                               nCells=memoryCells,\n",
-    "                               cores=cpuCores,\n",
-    "                               absLow=cti_lim_low, absHigh=cti_lim_high, gains=None,\n",
-    "                           blockSize=blockSize, runParallel=run_parallel)\n",
-    "\n",
-    "ctiCalUH = xcal.CTICalculator([x//2, y], \n",
-    "                               noiseMap[x//2:, :], \n",
-    "                               axis=cti_Axis_UH, \n",
-    "                               nCells=memoryCells,\n",
-    "                               cores=cpuCores,\n",
-    "                               absLow=cti_lim_low, absHigh=cti_lim_high, gains=None,\n",
-    "                           blockSize=blockSize, runParallel=run_parallel)\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "patternClassifierLH._imagesPerChunk = chunkSize//2\n",
-    "patternClassifierUH._imagesPerChunk = chunkSize//2"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "#*********Setting bad pixel mask***************#\n",
-    "\n",
-    "patternClassifierLH.setBadPixelMask(badPixelMap[:x//2, :])\n",
-    "patternClassifierUH.setBadPixelMask(badPixelMap[x//2:, :])\n",
-    "patternSelectorLH.setBadPixelMask(badPixelMap[:x//2, :])\n",
-    "patternSelectorUH.setBadPixelMask(badPixelMap[x//2:, :])\n",
-    "ctiCalLH.setBadPixelMask(badPixelMap[:x//2, :])\n",
-    "ctiCalUH.setBadPixelMask(badPixelMap[x//2:, :])"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "#*****************Histogram Calculators******************#\n",
-    "\n",
-    "histCalOffsetCor = xcal.HistogramCalculator([x, y], \n",
-    "                                            bins=1000, \n",
-    "                                            range=[0, 1000],\n",
-    "                                            nCells=memoryCells, \n",
-    "                                            cores=cpuCores,\n",
-    "                                           blockSize=blockSize)\n",
-    "\n",
-    "histCalCMCor = xcal.HistogramCalculator([x, y], bins=1000, \n",
-    "                                        range=[0, 1000], \n",
-    "                                        cores=cpuCores,\n",
-    "                                        memoryCells=memoryCells)\n",
-    "\n",
-    "histCalPatternCorLH = xcal.HistogramCalculator([x//2, y], \n",
-    "                                                bins=1000, \n",
-    "                                                range=[0, 1000],\n",
-    "                                                nCells=memoryCells, \n",
-    "                                                cores=cpuCores,\n",
-    "                                            blockSize=blockSize)\n",
-    "\n",
-    "histCalPatternCorUH = xcal.HistogramCalculator([x//2, y], \n",
-    "                                                bins=1000, \n",
-    "                                                range=[0, 1000],\n",
-    "                                                nCells=memoryCells, \n",
-    "                                                cores=cpuCores,\n",
-    "                                            blockSize=blockSize)\n",
-    "\n",
-    "histCalSinglesLH = xcal.HistogramCalculator([x//2, y], \n",
-    "                                             bins=1000, \n",
-    "                                             range=[0, 1000],\n",
-    "                                             nCells=memoryCells, \n",
-    "                                             cores=cpuCores,\n",
-    "                                         blockSize=blockSize)\n",
-    "\n",
-    "histCalSinglesUH = xcal.HistogramCalculator([x//2, y], \n",
-    "                                             bins=1000, \n",
-    "                                             range=[0, 1000],\n",
-    "                                             nCells=memoryCells, \n",
-    "                                             cores=cpuCores,\n",
-    "                                         blockSize=blockSize)\n",
-    "\n",
-    "histCalDoublesLH = xcal.HistogramCalculator([x//2, y], \n",
-    "                                             bins=1000, \n",
-    "                                             range=[0, 1000],\n",
-    "                                             nCells=memoryCells, \n",
-    "                                             cores=cpuCores,\n",
-    "                                         blockSize=blockSize)\n",
-    "\n",
-    "histCalDoublesUH = xcal.HistogramCalculator([x//2, y], \n",
-    "                                             bins=1000, \n",
-    "                                             range=[0, 1000],\n",
-    "                                             nCells=memoryCells, \n",
-    "                                             cores=cpuCores,\n",
-    "                                         blockSize=blockSize)\n",
-    "\n",
-    "histCalTriplesLH = xcal.HistogramCalculator([x//2, y], \n",
-    "                                             bins=1000, \n",
-    "                                             range=[0, 1000],\n",
-    "                                             nCells=memoryCells, \n",
-    "                                             cores=cpuCores)\n",
-    "histCalTriplesUH = xcal.HistogramCalculator([x//2, y], \n",
-    "                                             bins=1000, \n",
-    "                                             range=[0, 1000],\n",
-    "                                             nCells=memoryCells, \n",
-    "                                             cores=cpuCores)\n",
-    "\n",
-    "histCalQuadsLH = xcal.HistogramCalculator([x//2, y], \n",
-    "                                             bins=1000, \n",
-    "                                             range=[0, 1000],\n",
-    "                                             nCells=memoryCells, \n",
-    "                                             cores=cpuCores)\n",
-    "histCalQuadsUH = xcal.HistogramCalculator([x//2, y], \n",
-    "                                             bins=1000, \n",
-    "                                             range=[0, 1000],\n",
-    "                                             nCells=memoryCells, \n",
-    "                                             cores=cpuCores)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "The very first step when analyzing illuminated data is correction for the dark effects (offset, common mode).  \n",
-    "\n",
-    "The next step is correction for charge sharing effects between neighbouring pixels. PatternClassifier performs classification according to event patterns with different multiplicities, defined as the number of pattern consituents. Valid patterns can range from one to four constituents. For the classification, event thresholds are used. There are three types of thresholds: a *primary threshold*, a *secondary threshold* and an *upper threshold*. \n",
-    "\n",
-    "Primary and secondary thresholds are given in $n\\times\\sigma$, where $\\sigma$ is the noise value in the pixel. First, the values in all pixels are checked against primary threshold. Values exceeding $prim.threshold\\times\\sigma$ are considered as events. Adjacent piexls (to pixel contaning photon event) are checked against secondary threshold, if they exceed value $sec.threshold\\times\\sigma$ they are considered one pattern. The upper event threhsold corresponds to a summed value of all pattern constituents. If the summed value is below the upper threshold, the event pattern is considered valid. \n",
-    "\n",
-    "Next option is tracking first single events, which are used for CTI analysis. These are single pixel events nearest to the readout node. As there are readout ports on the both sides of the sensor, lower and upper halves (hemispheres) are analysed separately. For the FastCCD two options are used for tracking: for the lower half $tagFirstSingles = 1$ and for the upper half $tagFirstSingles = 2$. If value is set to 0, first singles are not evaluated.\n",
-    "\n",
-    "PatternClassifier returns information about the summed energy (ADU) of all pattern consituents and about multiplicity of each pattern.\n",
-    "\n",
-    "PatternSelector is used to select only patterns of interest based on pattern indices.\n",
-    "\n",
-    "To evaluate CTI (Charge Transfer Inefficiency) and relative gain effects, CTICalculator is used. Calculator needs  axis along which it should be calculated (axis=1 for columns) and lower and upper limit in ADU to select only events around the expcted peak values."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "from IPython.lib import backgroundjobs as bg\n",
-    "jobs = bg.BackgroundJobManager()\n",
-    "from time import sleep\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "\n",
-    "def processLH(dataLH):\n",
-    "    dataLH = cmCorrectionLH.correct(dataLH) #correct for the row common mode\n",
-    "    histCalCMCor.fill(dataLH)   \n",
-    "    dataLH, patternsLH = patternClassifierLH.classify(dataLH)\n",
-    "    histCalPatternCorLH.fill(dataLH)\n",
-    "    singlesLH, d0LH, d1LH, d2LH, d3LH, t0LH, t1LH, t2LH, t3LH, q0LH, q1LH, q2LH, q3LH = patternSelectorLH.select(dataLH, patternsLH)\n",
-    "    histCalSinglesLH.fill(singlesLH)\n",
-    "    histCalDoublesLH.fill(d0LH+d1LH+d2LH+d3LH)\n",
-    "    histCalTriplesLH.fill(t0LH+t1LH+t2LH+t3LH)\n",
-    "    histCalQuadsLH.fill(q0LH+q1LH+q2LH+q3LH)\n",
-    "    ctiCalLH.fill(singlesLH)\n",
-    "    \n",
-    "def processUH(dataUH):\n",
-    "    dataUH = cmCorrectionUH.correct(dataUH) #correct for the row common mode\n",
-    "    histCalCMCor.fill(dataUH)   \n",
-    "    dataUH, patternsUH = patternClassifierUH.classify(dataUH)\n",
-    "    histCalPatternCorUH.fill(dataUH)\n",
-    "    singlesUH, d0UH, d1UH, d2UH, d3UH, t0UH, t1UH, t2UH, t3UH, q0UH, q1UH, q2UH, q3UH = patternSelectorUH.select(dataUH, patternsUH)\n",
-    "    histCalSinglesUH.fill(singlesUH)\n",
-    "    histCalDoublesUH.fill(d0UH+d1UH+d2UH+d3UH)\n",
-    "    histCalTriplesUH.fill(t0UH+t1UH+t2UH+t3UH)\n",
-    "    histCalQuadsUH.fill(q0UH+q1UH+q2UH+q3UH)\n",
-    "    ctiCalUH.fill(singlesUH)\n",
-    "    \n",
-    "\n",
-    "for i,data in enumerate(reader.readChunks()):\n",
-    "        \n",
-    "    data = offsetCorrection.correct(data.astype(np.float32)) #correct for the offset        \n",
-    "    histCalOffsetCor.fill(data)\n",
-    "        \n",
-    "    \n",
-    "        \n",
-    "    dataLH = data[:x//2, :, :]\n",
-    "    dataUH = data[x//2:, :, :]\n",
-    "\n",
-    "    jobs.new(processLH, dataLH)\n",
-    "    jobs.new(processUH, dataUH)\n",
-    "    \n",
-    "\n",
-    "    print(\"Processed {} images.\".format((i+1)*chunkSize))"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "while len(jobs.running):\n",
-    "    sleep(5)\n",
-    "jobs.status()"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "print(\"******************LOWER HEMISPHERE******************\\n\")\n",
-    "\n",
-    "patternStatsLH = patternClassifierLH.getPatternStats()\n",
-    "fig = plt.figure(figsize=(15,15))\n",
-    "ax = fig.add_subplot(4,4,1)\n",
-    "sfields = [\"singles\", \"first singles\", \"clusters\"]\n",
-    "mfields = [\"doubles\", \"triples\", \"quads\"]\n",
-    "relativeOccurances =  []\n",
-    "labels = []\n",
-    "for i, f in enumerate(sfields):\n",
-    "    relativeOccurances.append(patternStatsLH[f])\n",
-    "    labels.append(f)\n",
-    "for i, f in enumerate(mfields):\n",
-    "    for k in range(len(patternStatsLH[f])):\n",
-    "        relativeOccurances.append(patternStatsLH[f][k])\n",
-    "        labels.append(f+\"(\"+str(k)+\")\")\n",
-    "relativeOccurances = np.array(relativeOccurances, np.float)\n",
-    "relativeOccurances/=np.sum(relativeOccurances)\n",
-    "pie = ax.pie(relativeOccurances, labels=labels, autopct='%1.1f%%', shadow=True)\n",
-    "ax.set_title(\"Pattern occurrence\")\n",
-    "# Set aspect ratio to be equal so that pie is drawn as a circle.\n",
-    "a = ax.axis('equal')\n",
-    "\n",
-    "smaps = [\"singlemap\", \"firstsinglemap\", \"clustermap\"]\n",
-    "for i, m in enumerate(smaps):\n",
-    "\n",
-    "    ax = fig.add_subplot(4,4,2+i)\n",
-    "\n",
-    "    pmap = ax.imshow(patternStatsLH[m], interpolation=\"nearest\", vmax=2*np.nanmedian(patternStatsLH[m]))\n",
-    "    ax.set_title(m)\n",
-    "    cb = fig.colorbar(pmap)\n",
-    "\n",
-    "mmaps = [\"doublemap\", \"triplemap\", \"quadmap\"]\n",
-    "k = 0\n",
-    "for i, m in enumerate(mmaps):\n",
-    "\n",
-    "    for j in range(4):\n",
-    "        ax = fig.add_subplot(4,4,2+len(smaps)+k)\n",
-    "        pmap = ax.imshow(patternStatsLH[m][j], interpolation=\"nearest\", vmax=2*np.median(patternStatsLH[m][j]))\n",
-    "        ax.set_title(m+\"(\"+str(j)+\")\")\n",
-    "        cb = fig.colorbar(pmap)\n",
-    "        k+=1"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "patternStatsUH = patternClassifierUH.getPatternStats()\n",
-    "fig = plt.figure(figsize=(15,15))\n",
-    "ax = fig.add_subplot(4,4,1)\n",
-    "sfields = [\"singles\", \"first singles\", \"clusters\"]\n",
-    "mfields = [\"doubles\", \"triples\", \"quads\"]\n",
-    "relativeOccurances =  []\n",
-    "labels = []\n",
-    "for i, f in enumerate(sfields):\n",
-    "    relativeOccurances.append(patternStatsUH[f])\n",
-    "    labels.append(f)\n",
-    "for i, f in enumerate(mfields):\n",
-    "    for k in range(len(patternStatsUH[f])):\n",
-    "        relativeOccurances.append(patternStatsUH[f][k])\n",
-    "        labels.append(f+\"(\"+str(k)+\")\")\n",
-    "relativeOccurances = np.array(relativeOccurances, np.float)\n",
-    "relativeOccurances/=np.sum(relativeOccurances)\n",
-    "pie = ax.pie(relativeOccurances, labels=labels, autopct='%1.1f%%', shadow=True)\n",
-    "ax.set_title(\"Pattern occurrence\")\n",
-    "# Set aspect ratio to be equal so that pie is drawn as a circle.\n",
-    "a = ax.axis('equal')\n",
-    "\n",
-    "smaps = [\"singlemap\", \"firstsinglemap\", \"clustermap\"]\n",
-    "for i, m in enumerate(smaps):\n",
-    "\n",
-    "    ax = fig.add_subplot(4,4,2+i)\n",
-    "\n",
-    "    pmap = ax.imshow(patternStatsUH[m], interpolation=\"nearest\", vmax=2*np.nanmedian(patternStatsUH[m]))\n",
-    "    ax.set_title(m)\n",
-    "    cb = fig.colorbar(pmap)\n",
-    "\n",
-    "mmaps = [\"doublemap\", \"triplemap\", \"quadmap\"]\n",
-    "k = 0\n",
-    "for i, m in enumerate(mmaps):\n",
-    "\n",
-    "    for j in range(4):\n",
-    "        ax = fig.add_subplot(4,4,2+len(smaps)+k)\n",
-    "        pmap = ax.imshow(patternStatsUH[m][j], interpolation=\"nearest\", vmax=np.median(patternStatsUH[m][j]))\n",
-    "        ax.set_title(m+\"(\"+str(j)+\")\")\n",
-    "        cb = fig.colorbar(pmap)\n",
-    "        k+=1\n",
-    "        \n",
-    "print(\"******************UPPER HEMISPHERE******************\\n\")        "
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "t0 = PrettyTable()\n",
-    "t0.title = \"Total number of Counts\"\n",
-    "t0.field_names = [\"Hemisphere\",\"Singles\", \"First-singles\", \"Clusters\"]\n",
-    "t0.add_row([\"LH\", patternStatsLH['singles'], patternStatsLH['first singles'], patternStatsLH['clusters']])\n",
-    "t0.add_row([\"UH\", patternStatsUH['singles'], patternStatsUH['first singles'], patternStatsUH['clusters']])\n",
-    "\n",
-    "print(t0)\n",
-    "\n",
-    "path= output_folder+'results.txt'\n",
-    "if local_output:\n",
-    "    with open(path,'at') as f:\n",
-    "        f.write(str(t0))\n",
-    "\n",
-    "t1 = PrettyTable()\n",
-    "\n",
-    "t1.field_names = [\"Index\",\"D-LH\", \"D-UH\", \"T-LH\", \"T-UH\", \"Q-LH\", \"Q-UH\"]\n",
-    "\n",
-    "t1.add_row([0, patternStatsLH['doubles'][0], patternStatsUH['doubles'][0], patternStatsLH['triples'][0], patternStatsUH['triples'][0], patternStatsLH['quads'][0], patternStatsUH['quads'][0]])\n",
-    "t1.add_row([1, patternStatsLH['doubles'][1], patternStatsUH['doubles'][1], patternStatsLH['triples'][1], patternStatsUH['triples'][1], patternStatsLH['quads'][1], patternStatsUH['quads'][1]])\n",
-    "t1.add_row([2, patternStatsLH['doubles'][2], patternStatsUH['doubles'][2], patternStatsLH['triples'][2], patternStatsUH['triples'][2], patternStatsLH['quads'][2], patternStatsUH['quads'][2]])\n",
-    "t1.add_row([3, patternStatsLH['doubles'][3], patternStatsUH['doubles'][3], patternStatsLH['triples'][3], patternStatsUH['triples'][3], patternStatsLH['quads'][3], patternStatsUH['quads'][3]])\n",
-    "\n",
-    "print(t1)\n",
-    "\n",
-    "path= output_folder+'results.txt'\n",
-    "if local_output:\n",
-    "    with open(path,'at') as f:\n",
-    "        f.write(str(t1))"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "doublesLH = patternStatsLH['doubles'][0] + patternStatsLH['doubles'][1] + patternStatsLH['doubles'][2] + patternStatsLH['doubles'][3]\n",
-    "triplesLH = patternStatsLH['triples'][0] + patternStatsLH['triples'][1] + patternStatsLH['triples'][2] + patternStatsLH['triples'][3]\n",
-    "quadsLH = patternStatsLH['quads'][0] + patternStatsLH['quads'][1] + patternStatsLH['quads'][2] + patternStatsLH['quads'][3]\n",
-    "allsinglesLH = patternStatsLH['singles'] + patternStatsLH['first singles']\n",
-    "eventsLH = allsinglesLH + doublesLH + triplesLH + quadsLH\n",
-    "\n",
-    "doublesUH = patternStatsUH['doubles'][0] + patternStatsUH['doubles'][1] + patternStatsUH['doubles'][2] + patternStatsUH['doubles'][3]\n",
-    "triplesUH = patternStatsUH['triples'][0] + patternStatsUH['triples'][1] + patternStatsUH['triples'][2] + patternStatsUH['triples'][3]\n",
-    "quadsUH = patternStatsUH['quads'][0] + patternStatsUH['quads'][1] + patternStatsUH['quads'][2] + patternStatsUH['quads'][3]\n",
-    "allsinglesUH = patternStatsUH['singles'] + patternStatsUH['first singles']\n",
-    "eventsUH = allsinglesUH + doublesUH + triplesUH + quadsUH\n",
-    "\n",
-    "reloccurLH = np.array([allsinglesLH/eventsLH, doublesLH/eventsLH, triplesLH/eventsLH, quadsLH/eventsLH])\n",
-    "reloccurUH = np.array([allsinglesUH/eventsUH, doublesUH/eventsUH, triplesUH/eventsUH, quadsUH/eventsUH])"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "t0 = PrettyTable()\n",
-    "t0.title = \"Relative Occurrence of Patterns\"\n",
-    "t0.field_names = [\"Hemisphere\",\"Singles\", \"Doubles\", \"Triples\", \"Quads\"]\n",
-    "t0.add_row([\"LH\", '{:0.1f}%'.format(reloccurLH[0]*100), '{:0.1f}%'.format(reloccurLH[1]*100), '{:0.1f}%'.format(reloccurLH[2]*100), '{:0.1f}%'.format(reloccurLH[3]*100)])\n",
-    "t0.add_row([\"UH\", '{:0.1f}%'.format(reloccurUH[0]*100), '{:0.1f}%'.format(reloccurUH[1]*100), '{:0.1f}%'.format(reloccurUH[2]*100), '{:0.1f}%'.format(reloccurUH[3]*100)])\n",
-    "\n",
-    "print(t0)\n",
-    "\n",
-    "if local_output:\n",
-    "    with open(path, 'at') as f:\n",
-    "        f.write(str(t0))"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "fig = plt.figure(figsize=(10,5))\n",
-    "ax = fig.add_subplot(1,2,1)\n",
-    "labels = ['singles', 'doubles', 'triples', 'quads']\n",
-    "pie = ax.pie(reloccurLH, labels=labels, autopct='%1.1f%%', shadow=True)\n",
-    "ax.set_title(\"Pattern occurrence LH\")\n",
-    "# Set aspect ratio to be equal so that pie is drawn as a circle.\n",
-    "a = ax.axis('equal')\n",
-    "ax = fig.add_subplot(1,2,2)\n",
-    "pie = ax.pie(reloccurUH, labels=labels, autopct='%1.1f%%', shadow=True)\n",
-    "ax.set_title(\"Pattern occurrence UH\")\n",
-    "# Set aspect ratio to be equal so that pie is drawn as a circle.\n",
-    "a = ax.axis('equal')"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "h1,e1,c1,s1 = histCalSinglesLH.get()\n",
-    "h,e,c,s = histCalPatternCorLH.get()\n",
-    "h2,e2,c2,s2 = histCalDoublesLH.get ()\n",
-    "h3,e3,c3,s3 = histCalTriplesLH.get ()\n",
-    "h4,e4,c4,s4 = histCalQuadsLH.get ()\n",
-    "\n",
-    "d = [{'x': c,\n",
-    "      'y': h,\n",
-    "      'y_err': np.sqrt(h[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'ecolor': 'navy',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'all events'},\n",
-    "     {'x': c1,\n",
-    "      'y': h1,\n",
-    "      'y_err': np.sqrt(h1[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'singles'},\n",
-    "     {'x': c2,\n",
-    "      'y': h2,\n",
-    "      'y_err': np.sqrt(h2[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'doubles'},\n",
-    "     {'x': c3,\n",
-    "      'y': h3,\n",
-    "      'y_err': np.sqrt(h3[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'triples'},\n",
-    "     {'x': c4,\n",
-    "      'y': h4,\n",
-    "      'y_err': np.sqrt(h4[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'quads'},\n",
-    "     \n",
-    "    ]\n",
-    "\n",
-    "if local_output:\n",
-    "    h=np.ma.getdata(h)\n",
-    "    np.savez(output_folder+'LH_all_events.npz', c,h)\n",
-    "\n",
-    "    h1=np.ma.getdata(h1)\n",
-    "    np.savez(output_folder+'LH_singles.npz', c1,h1)\n",
-    "\n",
-    "    h2=np.ma.getdata(h2)\n",
-    "    np.savez(output_folder+'LH_doubles.npz', c2,h2) \n",
-    "\n",
-    "    h3=np.ma.getdata(h3)\n",
-    "    np.savez(output_folder+'LH_triples.npz', c3,h3)\n",
-    "\n",
-    "    h4=np.ma.getdata(h4)\n",
-    "    np.savez(output_folder+'LH_quads.npz', c4,h4) \n",
-    "\n",
-    "\n",
-    "fig = xana.simplePlot(d, aspect=1, x_label='Energy (ADU) - LH', \n",
-    "                      y_label='Number of occurrences', figsize='1col',\n",
-    "                      y_log=True, x_range=(0,400),\n",
-    "                      legend='top-right-frame-2col')\n",
-    "\n",
-    "h1,e1,c1,s1 = histCalSinglesUH.get()\n",
-    "h,e,c,s = histCalPatternCorUH.get()\n",
-    "h2,e2,c2,s2 = histCalDoublesUH.get()\n",
-    "h3,e3,c3,s3 = histCalTriplesLH.get ()\n",
-    "h4,e4,c4,s4 = histCalQuadsLH.get ()\n",
-    "\n",
-    "d = [{'x': c,\n",
-    "      'y': h,\n",
-    "      'y_err': np.sqrt(h[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'ecolor': 'navy',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'all events'},\n",
-    "     {'x': c1,\n",
-    "      'y': h1,\n",
-    "      'y_err': np.sqrt(h1[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'singles'},\n",
-    "     {'x': c2,\n",
-    "      'y': h2,\n",
-    "      'y_err': np.sqrt(h2[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'doubles'},\n",
-    "     {'x': c3,\n",
-    "      'y': h3,\n",
-    "      'y_err': np.sqrt(h3[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'triples'},\n",
-    "     {'x': c4,\n",
-    "      'y': h4,\n",
-    "      'y_err': np.sqrt(h4[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'quads'},\n",
-    "    ]\n",
-    "     \n",
-    "\n",
-    "fig = xana.simplePlot(d, aspect=1, x_label='Energy (ADU) - UH', \n",
-    "                      y_label='Number of occurrences', figsize='1col',\n",
-    "                      y_log=True, x_range=(0,400),\n",
-    "                      legend='top-right-frame-2col')\n",
-    "\n",
-    "if local_output:\n",
-    "    h=np.ma.getdata(h)\n",
-    "    np.savez(output_folder+'UH_all_events.npz', c,h)\n",
-    "\n",
-    "    h1=np.ma.getdata(h1)\n",
-    "    np.savez(output_folder+'UH_singles.npz', c1,h1)\n",
-    "\n",
-    "    h2=np.ma.getdata(h2)\n",
-    "    np.savez(output_folder+'UH_doubles.npz', c2,h2) \n",
-    "\n",
-    "    h3=np.ma.getdata(h3)\n",
-    "    np.savez(output_folder+'UH_triples.npz', c3,h3)\n",
-    "\n",
-    "    h4=np.ma.getdata(h4)\n",
-    "    np.savez(output_folder+'UH_quads.npz', c4,h4) \n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### CTI and Relative Gain Corrections "
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "In the second iteration data are corrected for CTI and relative gain effects.\n",
-    "\n",
-    "When CTI is evaluated selected single events are plotted against their position in a row, for each column separately. Plotted points are fitted by linear function,$$y=m\\times x+b,$$\n",
-    "    where y-intercept is the expected value of the event and CTI is given as $$CTI=\\frac{m}{b}.$$\n",
-    "Relative gain is a relative difference in the amplification of charge due to different output amplifiers. It is calculated from the $b$ value of the linear fit as $$relative\\:gain=\\frac{b(i)}{\\frac{1}{N}\\sum_{0}^{N}b(i)}.$$"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "ctiLH, ctierrLH, relGainLH, relGainErrLH, chi2LH, pvalLH, fig = ctiCalLH.get(\n",
-    "                                              plot=False, axis=cti_Axis_LH)\n",
-    "\n",
-    "ctiUH, ctierrUH, relGainUH, relGainErrUH, chi2UH, pvalUH, fig = ctiCalUH.get(\n",
-    "                                              plot=False, axis=cti_Axis_UH)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "#Number of columns for which CTI fit did not converge:\n",
-    "ncc=np.count_nonzero(np.isnan(ctiLH))\n",
-    "print('Number of non-converging columns LH: {:0.0f}'.format(ncc))\n",
-    "\n",
-    "ncc=np.count_nonzero(np.isnan(ctiUH))\n",
-    "print('number of non-converging columns UH: {:0.0f}'.format(ncc))"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "if local_output:\n",
-    "    np.savez(output_folder+'ctiLH.npz', ctiLH,ctierrLH)\n",
-    "    np.savez(output_folder+'ctiUH.npz', ctiUH,ctierrUH)\n",
-    "\n",
-    "    np.savez(output_folder+'relGainLH.npz', relGainLH,relGainErrLH)\n",
-    "    np.savez(output_folder+'relGainUH.npz', relGainUH,relGainErrUH)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "ctiCorrectionLH = xcal.CTICorrection([x//2, y], \n",
-    "                                   ctiLH, axis = cti_Axis_LH, cores=cpuCores,\n",
-    "                                   nCells = memoryCells, gains=None,\n",
-    "                                  blockSize=blockSize, runParallel=run_parallel) \n",
-    "                                   \n",
-    "relGainCorrLH = xcal.RelativeGainCorrection([x//2, y], \n",
-    "                                          relGainLH, axis = cti_Axis_LH, \n",
-    "                                          cores=cpuCores,\n",
-    "                                          nCells = memoryCells, gains=None,\n",
-    "                                         blockSize=blockSize, runParallel=run_parallel)\n",
-    "\n",
-    "ctiCorrectionUH = xcal.CTICorrection([x//2, y], \n",
-    "                                   ctiUH, axis = cti_Axis_UH, cores=cpuCores,\n",
-    "                                   nCells = memoryCells, gains=None,\n",
-    "                                  blockSize=blockSize, runParallel=run_parallel) \n",
-    "                                   \n",
-    "relGainCorrUH = xcal.RelativeGainCorrection([x//2, y], \n",
-    "                                          relGainUH, axis = cti_Axis_UH, \n",
-    "                                          cores=cpuCores,\n",
-    "                                          nCells = memoryCells, gains=None,\n",
-    "                                         blockSize=blockSize, runParallel=run_parallel)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "plt.rcParams.update({'font.size': 18})"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Relative Gain"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "mask = relGainLH == 0\n",
-    "relGainMskdLH = np.ma.MaskedArray(relGainLH, mask)\n",
-    "\n",
-    "columns = np.arange(0,y)\n",
-    "\n",
-    "d1 = {'x': columns,\n",
-    "      'y': relGainMskdLH,\n",
-    "      'drawstyle': 'marker-only',\n",
-    "      'marker': '.'\n",
-    "     }\n",
-    "\n",
-    "fig = xana.simplePlot(d1, figsize='1col', \n",
-    "                    aspect=2, x_label = 'Columns', \n",
-    "                    y_label=\"Relative gain\",# - LH\", \n",
-    "                    y_log=False, x_range=(0,y),)\n",
-    "\n",
-    "#fig.savefig(output_folder+'RG_LH.png', dpi=300, bbox_inches='tight')\n",
-    "\n",
-    "mask = relGainUH == 0\n",
-    "relGainMskdUH = np.ma.MaskedArray(relGainUH, mask)\n",
-    "\n",
-    "d1 = {'x': columns,\n",
-    "      'y': relGainMskdUH,\n",
-    "      'drawstyle': 'marker-only',\n",
-    "      'marker': '.'\n",
-    "     }\n",
-    "\n",
-    "fig = xana.simplePlot(d1, figsize='1col', \n",
-    "                    aspect=2, x_label = 'Columns', \n",
-    "                    y_label=\"Relative gain\",# - UH\", \n",
-    "                    y_log=False, x_range=(0,y),)\n",
-    "\n",
-    "#fig.savefig(output_folder+'RG_UH.png', dpi=300, bbox_inches='tight')\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### CTI"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "import matplotlib as mpl\n",
-    "mpl.matplotlib_fname()\n",
-    "mpl.rc('text', usetex=True)\n",
-    "\n",
-    "d = {'x': columns,\n",
-    "     'y': ctiLH/10e-6,\n",
-    "     #'y_err': ctierr,\n",
-    "     'drawstyle': 'marker-only',\n",
-    "     'marker': '.',\n",
-    "     'color': 'crimson'\n",
-    "     }\n",
-    "\n",
-    "\n",
-    "fig = xana.simplePlot(d, figsize='1col', \n",
-    "                    aspect=2, x_label = 'Columns', \n",
-    "                   y_label=r'$\\displaystyle\\mathrm{\\frac{CTI}{10^{-6}}}$',# - LH', \n",
-    "                      y_log=True, y_range=(0,np.nanmax(ctiUH)/10e-6))\n",
-    "\n",
-    "#fig.savefig(output_folder+'CTI_LH.png', dpi=300, bbox_inches='tight')\n",
-    "\n",
-    "\n",
-    "d = {'x': columns,\n",
-    "     'y': ctiUH/10e-6,\n",
-    "     #'y_err': ctierr,\n",
-    "     'drawstyle': 'marker-only',\n",
-    "      'marker': '.',\n",
-    "     'color': 'crimson'\n",
-    "     }\n",
-    "\n",
-    "\n",
-    "fig = xana.simplePlot(d, figsize='1col', \n",
-    "                    aspect=2, x_label = 'Columns', \n",
-    "                    y_label=r'$\\displaystyle\\mathrm{\\frac{CTI}{10^{-6}}}$',# - UH', \n",
-    "                      y_log=True, y_range=(0,np.nanmax(ctiUH)/10e-6))\n",
-    "\n",
-    "#fig.savefig(output_folder+'CTI_UH.png', dpi=300, bbox_inches='tight')\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "mpl.rc('text', usetex=False)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Relative Gain Map with CTI"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "For the CTI correction, map has to be created, as one has to take into account number of charge shifts to the readout node. CTI map is calculated as $$CTI\\:map_{(x,y)}=(1+CTI_{(y)})^x,$$ where $CTI_{(y)}$ is the CTI value gained from calculator and $x$ refers to the rows of the FastCCD. It repesents the number of charge shifts needed to reach a readout node."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "plt.rcParams.update({'font.size': 12})"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "ctiMapLH = ctiCorrectionLH.correctionMap\n",
-    "relgainMapLH = relGainCorrLH.correctionMap\n",
-    "\n",
-    "ctiMapUH = ctiCorrectionUH.correctionMap\n",
-    "relgainMapUH = relGainCorrUH.correctionMap\n",
-    "\n",
-    "ctiMap = np.zeros((x,y,1))\n",
-    "ctiMap[:x//2,:] = ctiMapLH\n",
-    "ctiMap[x//2:,:] = ctiMapUH\n",
-    "\n",
-    "relgainMap = np.zeros((x,y,1))\n",
-    "relgainMap[:x//2,:] = relgainMapLH\n",
-    "relgainMap[x//2:,:] = relgainMapUH\n",
-    "\n",
-    "\n",
-    "mapsTest = np.zeros_like(relgainMap)\n",
-    "mapsTest[:x//2,:] = relgainMapLH*ctiMapLH\n",
-    "mapsTest[x//2:,:] = relgainMapUH*ctiMapUH\n",
-    "\n",
-    "maskCTI = ctiMap == 0 \n",
-    "maskRG = relgainMap == 0\n",
-    "ctiMap = np.ma.MaskedArray(ctiMap, maskCTI)\n",
-    "relgainMap = np.ma.MaskedArray(relgainMap, maskRG)\n",
-    "\n",
-    "fig = xana.heatmapPlot(mapsTest[:,:,0], vmin=0.95, vmax=1.1, x_range=(0,y), y_range=(0,x), \n",
-    "                       lut_label=\"Relative gain with CTI\", x_label=\"Columns\", y_label=\"Rows\", figsize='2col', aspect=1)\n",
-    "#fig.savefig(output_folder+'RGmapwithCTI.png', dpi=300, bbox_inches='tight')\n",
-    "\n",
-    "fig = xana.heatmapPlot(relgainMap[:,:,0], vmin=0.95, vmax=1.1, x_range=(0,y), y_range=(0,x), \n",
-    "                       lut_label=\"Relative gain\", x_label=\"Columns\", y_label=\"Rows\", figsize='2col', aspect=1)\n",
-    "#fig.savefig(output_folder+'RGmap.png', dpi=300, bbox_inches='tight')\n",
-    "\n",
-    "fig = xana.heatmapPlot(ctiMap[:,:,0], vmin=0.95, vmax=1.1, x_range=(0,y), y_range=(0,x), \n",
-    "                       lut_label=\"CTI\", x_label=\"Columns\", y_label=\"Rows\", figsize='2col', aspect=1)\n",
-    "#fig.savefig(output_folder+'CTImap.png', dpi=300, bbox_inches='tight')\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "valuesLH = [np.ma.std(relGainMskdLH), np.nanstd(ctiLH), np.nanmean(ctiLH), np.nanmax(ctiLH)]\n",
-    "valuesUH = [np.ma.std(relGainMskdUH), np.nanstd(ctiUH), np.nanmean(ctiUH), np.nanmax(ctiUH)]\n",
-    "\n",
-    "t0 = PrettyTable()\n",
-    "t0.field_names = [\"Hemisphere\",\"Rel. gain variation\", \"CTI variation\", \"Avg. CTI\", 'Max. CTI']\n",
-    "t0.add_row([\"LH\", '{:0.2E}'.format(valuesLH[0]), '{:0.2E}'.format(valuesLH[1]), '{:0.2E}'.format(valuesLH[2]), '{:0.2E}'.format(valuesLH[3])])\n",
-    "t0.add_row([\"UH\", '{:0.2E}'.format(valuesUH[0]), '{:0.2E}'.format(valuesUH[1]), '{:0.2E}'.format(valuesUH[2]), '{:0.2E}'.format(valuesUH[3])])\n",
-    "\n",
-    "print(t0)\n",
-    "\n",
-    "path= output_folder+'results.txt'\n",
-    "if local_output:\n",
-    "    with open(path,'at') as f:\n",
-    "        f.write(str(t0))"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "patternClassifierLH.reset()\n",
-    "patternClassifierUH.reset()"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "patternClassifierLH.setBadPixelMask(badPixelMap[:x//2, :])\n",
-    "patternClassifierUH.setBadPixelMask(badPixelMap[x//2:, :])\n",
-    "patternSelectorLH.setBadPixelMask(badPixelMap[:x//2, :])\n",
-    "patternSelectorUH.setBadPixelMask(badPixelMap[x//2:, :])\n",
-    "ctiCorrectionLH.setBadPixelMask(badPixelMap[:x//2, :])\n",
-    "ctiCorrectionUH.setBadPixelMask(badPixelMap[x//2:, :])\n",
-    "relGainCorrLH.setBadPixelMask(badPixelMap[:x//2, :])\n",
-    "relGainCorrUH.setBadPixelMask(badPixelMap[x//2:, :])"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "histCalCTICorLH = xcal.HistogramCalculator([x//2, y], \n",
-    "                                                bins=1000, \n",
-    "                                                range=[0, 1000],\n",
-    "                                                nCells=memoryCells, \n",
-    "                                                cores=cpuCores,\n",
-    "                                            blockSize=blockSize)\n",
-    "\n",
-    "histCalCTICorUH = xcal.HistogramCalculator([x//2, y], \n",
-    "                                                bins=1000, \n",
-    "                                                range=[0, 1000],\n",
-    "                                                nCells=memoryCells, \n",
-    "                                                cores=cpuCores,\n",
-    "                                            blockSize=blockSize)\n",
-    "\n",
-    "histCalSinglesLH_cticor = xcal.HistogramCalculator([x//2, y], \n",
-    "                                             bins=1000, \n",
-    "                                             range=[0, 1000],\n",
-    "                                             nCells=memoryCells, \n",
-    "                                             cores=cpuCores,\n",
-    "                                         blockSize=blockSize)\n",
-    "\n",
-    "histCalSinglesUH_cticor = xcal.HistogramCalculator([x//2, y], \n",
-    "                                             bins=1000, \n",
-    "                                             range=[0, 1000],\n",
-    "                                             nCells=memoryCells, \n",
-    "                                             cores=cpuCores,\n",
-    "                                         blockSize=blockSize)\n",
-    "\n",
-    "histCalDoublesLH_cticor = xcal.HistogramCalculator([x//2, y], \n",
-    "                                             bins=1000, \n",
-    "                                             range=[0, 1000],\n",
-    "                                             nCells=memoryCells, \n",
-    "                                             cores=cpuCores,\n",
-    "                                         blockSize=blockSize)\n",
-    "\n",
-    "histCalDoublesUH_cticor = xcal.HistogramCalculator([x//2, y], \n",
-    "                                             bins=1000, \n",
-    "                                             range=[0, 1000],\n",
-    "                                             nCells=memoryCells, \n",
-    "                                             cores=cpuCores,\n",
-    "                                         blockSize=blockSize)\n",
-    "\n",
-    "histCalTriplesLH_cticor = xcal.HistogramCalculator([x//2, y], \n",
-    "                                             bins=1000, \n",
-    "                                             range=[0, 1000],\n",
-    "                                             nCells=memoryCells, \n",
-    "                                             cores=cpuCores)\n",
-    "histCalTriplesUH_cticor = xcal.HistogramCalculator([x//2, y], \n",
-    "                                             bins=1000, \n",
-    "                                             range=[0, 1000],\n",
-    "                                             nCells=memoryCells, \n",
-    "                                             cores=cpuCores)\n",
-    "\n",
-    "histCalQuadsLH_cticor = xcal.HistogramCalculator([x//2, y], \n",
-    "                                             bins=1000, \n",
-    "                                             range=[0, 1000],\n",
-    "                                             nCells=memoryCells, \n",
-    "                                             cores=cpuCores)\n",
-    "histCalQuadsUH_cticor = xcal.HistogramCalculator([x//2, y], \n",
-    "                                             bins=1000, \n",
-    "                                             range=[0, 1000],\n",
-    "                                             nCells=memoryCells, \n",
-    "                                             cores=cpuCores)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "def processLH(dataLH):\n",
-    "    dataLH = cmCorrectionLH.correct(dataLH) #correct for the row common mode\n",
-    "    dataLH = ctiCorrectionLH.correct(dataLH)\n",
-    "    dataLH = relGainCorrLH.correct(dataLH)\n",
-    "    dataLH, patternsLH = patternClassifierLH.classify(dataLH)\n",
-    "    histCalCTICorLH.fill(dataLH)\n",
-    "    singlesLH, d0LH, d1LH, d2LH, d3LH, t0LH, t1LH, t2LH, t3LH, q0LH, q1LH, q2LH, q3LH = patternSelectorLH.select(dataLH, patternsLH)\n",
-    "        \n",
-    "    histCalSinglesLH_cticor.fill(singlesLH)        \n",
-    "    histCalDoublesLH_cticor.fill(d0LH+d1LH+d2LH+d3LH)  \n",
-    "    histCalTriplesLH_cticor.fill(t0LH+t1LH+t2LH+t3LH)    \n",
-    "    histCalQuadsLH_cticor.fill(q0LH+q1LH+q2LH+q3LH)\n",
-    "    \n",
-    "def processUH(dataUH):\n",
-    "    dataUH = cmCorrectionUH.correct(dataUH) #correct for the row common mode      \n",
-    "    dataUH = ctiCorrectionUH.correct(dataUH)\n",
-    "    dataUH = relGainCorrUH.correct(dataUH)\n",
-    "    dataUH, patternsUH = patternClassifierUH.classify(dataUH)\n",
-    "    histCalCTICorUH.fill(dataUH)\n",
-    "    singlesUH, d0UH, d1UH, d2UH, d3UH, t0UH, t1UH, t2UH, t3UH, q0UH, q1UH, q2UH, q3UH = patternSelectorUH.select(dataUH, patternsUH)\n",
-    "        \n",
-    "    histCalSinglesUH_cticor.fill(singlesUH)    \n",
-    "    histCalDoublesUH_cticor.fill(d0UH+d1UH+d2UH+d3UH)\n",
-    "    histCalTriplesUH_cticor.fill(t0UH+t1UH+t2UH+t3UH)    \n",
-    "    histCalQuadsUH_cticor.fill(q0UH+q1UH+q2UH+q3UH)\n",
-    "    \n",
-    "\n",
-    "for i,data in enumerate(reader.readChunks()):\n",
-    "        \n",
-    "    data = offsetCorrection.correct(data.astype(np.float32)) #correct for the offset                \n",
-    "    \n",
-    "    \n",
-    "    dataLH = data[:x//2, :, :]\n",
-    "    dataUH = data[x//2:, :, :]\n",
-    "    \n",
-    "    jobs.new(processLH, dataLH)\n",
-    "    jobs.new(processUH, dataUH)\n",
-    "    print(\"Processed {} images.\".format((i+1)*chunkSize))"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "while len(jobs.running):\n",
-    "    sleep(5)\n",
-    "jobs.status()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Plotting the Results"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "h,e,c,s = histCalCMCor.get()\n",
-    "h0L,e0L,c0L,s0L = histCalPatternCorLH.get()\n",
-    "h0U,e0U,c0U,s0U = histCalPatternCorUH.get()\n",
-    "h0 = h0L + h0U\n",
-    "h1L,e1L,c1L,s1L = histCalCTICorLH.get()\n",
-    "h1U,e1U,c1U,s1U = histCalCTICorUH.get()\n",
-    "h1 = h1L + h1U\n",
-    "hsL,esL,csL,ssL = histCalSinglesLH_cticor.get ()\n",
-    "hsU,esU,csU,ssU = histCalSinglesUH_cticor.get ()\n",
-    "hs = hsL + hsU\n",
-    "\n",
-    "\n",
-    "d = [{'x': c,\n",
-    "      'y': h,\n",
-    "      'y_err': np.sqrt(h[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'ecolor': 'navy',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'CM corr.'},\n",
-    "     {'x': c,\n",
-    "      'y': h0,\n",
-    "      'y_err': np.sqrt(h0[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'ecolor': 'green',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'Charge sharing corr.'},\n",
-    "     {'x': c,\n",
-    "      'y': h1,\n",
-    "      'y_err': np.sqrt(h1[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'ecolor': 'red',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'Rel. gain/CTI corr.'},\n",
-    "     {'x': c,\n",
-    "      'y': hs,\n",
-    "      'y_err': np.sqrt(hs[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'ecolor': 'red',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'Single events'},\n",
-    "     ]\n",
-    "     \n",
-    "\n",
-    "fig = xana.simplePlot(d, aspect=1, x_label='Energy(ADU)', \n",
-    "                      y_label='Number of occurrences', figsize='1col',\n",
-    "                      y_log=True, x_range=(0,400),\n",
-    "                      legend='top-center-frame-1col')"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false,
-    "scrolled": false
-   },
-   "outputs": [],
-   "source": [
-    "h1,e1,c1,s1 = histCalSinglesLH_cticor.get()\n",
-    "h,e,c,s = histCalCTICorLH.get()\n",
-    "h2,e2,c2,s2 = histCalDoublesLH_cticor.get ()\n",
-    "h3,e3,c3,s3 = histCalTriplesLH_cticor.get ()\n",
-    "h4,e4,c4,s4 = histCalQuadsLH_cticor.get ()\n",
-    "\n",
-    "d = [{'x': c,\n",
-    "      'y': h,\n",
-    "      'y_err': np.sqrt(h[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'ecolor': 'navy',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'all events'},\n",
-    "     {'x': c1,\n",
-    "      'y': h1,\n",
-    "      'y_err': np.sqrt(h1[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'singles'},\n",
-    "     {'x': c2,\n",
-    "      'y': h2,\n",
-    "      'y_err': np.sqrt(h2[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'doubles'},\n",
-    "     {'x': c3,\n",
-    "      'y': h3,\n",
-    "      'y_err': np.sqrt(h3[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'triples'},\n",
-    "     {'x': c4,\n",
-    "      'y': h4,\n",
-    "      'y_err': np.sqrt(h4[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'quads'},\n",
-    "     \n",
-    "    ]\n",
-    "if local_output:\n",
-    "    h=np.ma.getdata(h)\n",
-    "    np.savez(output_folder+'LH_all_events_cticor.npz', c,h)\n",
-    "\n",
-    "    h1=np.ma.getdata(h1)\n",
-    "    np.savez(output_folder+'LH_singles_cticor.npz', c1,h1)\n",
-    "\n",
-    "    h2=np.ma.getdata(h2)\n",
-    "    np.savez(output_folder+'LH_doubles_cticor.npz', c2,h2) \n",
-    "\n",
-    "    h3=np.ma.getdata(h3)\n",
-    "    np.savez(output_folder+'LH_triples_cticor.npz', c3,h3)\n",
-    "\n",
-    "    h4=np.ma.getdata(h4)\n",
-    "    np.savez(output_folder+'LH_quads_cticor.npz', c4,h4) \n",
-    "\n",
-    "\n",
-    "fig = xana.simplePlot(d, aspect=1, x_label='Energy (ADU) - LH', \n",
-    "                      y_label='Number of occurrences', figsize='1col',\n",
-    "                      y_log=True, x_range=(0,400),\n",
-    "                      legend='top-right-frame-2col')\n",
-    "\n",
-    "h1,e1,c1,s1 = histCalSinglesUH_cticor.get()\n",
-    "h,e,c,s = histCalCTICorUH.get()\n",
-    "h2,e2,c2,s2 = histCalDoublesUH_cticor.get()\n",
-    "h3,e3,c3,s3 = histCalTriplesLH_cticor.get ()\n",
-    "h4,e4,c4,s4 = histCalQuadsLH_cticor.get ()\n",
-    "\n",
-    "d = [{'x': c,\n",
-    "      'y': h,\n",
-    "      'y_err': np.sqrt(h[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'ecolor': 'navy',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'all events'},\n",
-    "     {'x': c1,\n",
-    "      'y': h1,\n",
-    "      'y_err': np.sqrt(h1[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'singles'},\n",
-    "     {'x': c2,\n",
-    "      'y': h2,\n",
-    "      'y_err': np.sqrt(h2[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'doubles'},\n",
-    "     {'x': c3,\n",
-    "      'y': h3,\n",
-    "      'y_err': np.sqrt(h3[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'triples'},\n",
-    "     {'x': c4,\n",
-    "      'y': h4,\n",
-    "      'y_err': np.sqrt(h4[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'quads'},\n",
-    "    ]\n",
-    "     \n",
-    "\n",
-    "fig = xana.simplePlot(d, aspect=1, x_label='Energy (ADU) - UH', \n",
-    "                      y_label='Number of occurrences', figsize='1col',\n",
-    "                      y_log=True, x_range=(0,400),\n",
-    "                      legend='top-right-frame-2col')\n",
-    "\n",
-    "if local_output:\n",
-    "    h=np.ma.getdata(h)\n",
-    "    np.savez(output_folder+'UH_all_events_cticor.npz', c,h)\n",
-    "\n",
-    "    h1=np.ma.getdata(h1)\n",
-    "    np.savez(output_folder+'UH_singles_cticor.npz', c1,h1)\n",
-    "\n",
-    "    h2=np.ma.getdata(h2)\n",
-    "    np.savez(output_folder+'UH_doubles_cticor.npz', c2,h2) \n",
-    "\n",
-    "    h3=np.ma.getdata(h3)\n",
-    "    np.savez(output_folder+'UH_triples_cticor.npz', c3,h3)\n",
-    "\n",
-    "    h4=np.ma.getdata(h4)\n",
-    "    np.savez(output_folder+'UH_quads_cticor.npz', c4,h4) "
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "h,e,c,s = histCalSinglesLH.get()\n",
-    "h1,e1,c1,s1 = histCalSinglesLH_cticor.get()\n",
-    "h2,e2,c2,s2 = histCalSinglesUH.get()\n",
-    "h3,e3,c3,s3 = histCalSinglesUH_cticor.get()\n",
-    "\n",
-    "d = [{'x': c,\n",
-    "      'y': h,\n",
-    "      'y_err': np.sqrt(h[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'ecolor': 'navy',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'Signels LH'},\n",
-    "     {'x': c1,\n",
-    "      'y': h1,\n",
-    "      'y_err': np.sqrt(h1[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'Singles LH corr.'},\n",
-    "     {'x': c2,\n",
-    "      'y': h2,\n",
-    "      'y_err': np.sqrt(h2[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'Singles UH'},\n",
-    "     {'x': c3,\n",
-    "      'y': h3,\n",
-    "      'y_err': np.sqrt(h3[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'Singles UH corr.'},\n",
-    "     ]\n",
-    "\n",
-    "fig = xana.simplePlot(d, aspect=1, x_label='Energy (ADU)', \n",
-    "                      y_label='Number of occurrences', figsize='1col',\n",
-    "                      y_log=True, x_range=(0,400),\n",
-    "                      legend='top-right-frame-2col')"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "patternStatsLH = patternClassifierLH.getPatternStats()\n",
-    "patternStatsUH = patternClassifierUH.getPatternStats()\n",
-    "\n",
-    "\n",
-    "t0 = PrettyTable()\n",
-    "t0.title = \"Total number of counts after all corrections\"\n",
-    "t0.field_names = [\"Hemisphere\",\"Singles\", \"First-singles\", \"Clusters\"]\n",
-    "t0.add_row([\"LH\", patternStatsLH['singles'], patternStatsLH['first singles'], patternStatsLH['clusters']])\n",
-    "t0.add_row([\"UH\", patternStatsUH['singles'], patternStatsUH['first singles'], patternStatsUH['clusters']])\n",
-    "\n",
-    "print(t0)\n",
-    "\n",
-    "path= output_folder+'results.txt'\n",
-    "if local_output:\n",
-    "    with open(path,'at') as f:\n",
-    "        f.write(str(t0))\n",
-    "\n",
-    "t1 = PrettyTable()\n",
-    "\n",
-    "t1.field_names = [\"Index\",\"D-LH\", \"D-UH\", \"T-LH\", \"T-UH\", \"Q-LH\", \"Q-UH\"]\n",
-    "\n",
-    "t1.add_row([0, patternStatsLH['doubles'][0], patternStatsUH['doubles'][0], patternStatsLH['triples'][0], patternStatsUH['triples'][0], patternStatsLH['quads'][0], patternStatsUH['quads'][0]])\n",
-    "t1.add_row([1, patternStatsLH['doubles'][1], patternStatsUH['doubles'][1], patternStatsLH['triples'][1], patternStatsUH['triples'][1], patternStatsLH['quads'][1], patternStatsUH['quads'][1]])\n",
-    "t1.add_row([2, patternStatsLH['doubles'][2], patternStatsUH['doubles'][2], patternStatsLH['triples'][2], patternStatsUH['triples'][2], patternStatsLH['quads'][2], patternStatsUH['quads'][2]])\n",
-    "t1.add_row([3, patternStatsLH['doubles'][3], patternStatsUH['doubles'][3], patternStatsLH['triples'][3], patternStatsUH['triples'][3], patternStatsLH['quads'][3], patternStatsUH['quads'][3]])\n",
-    "\n",
-    "print(t1)\n",
-    "path= output_folder+'results.txt'\n",
-    "if local_output:\n",
-    "    with open(path,'at') as f:\n",
-    "        f.write(str(t1))"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "def lin_fit(peak_pos_adu, peak_pos_eV, peak_pos_errors, m, print_level):\n",
-    "    def linear(x, m):\n",
-    "        return m*x\n",
-    "\n",
-    "    def linearWrap(m):\n",
-    "        return np.sum(((linear(peak_pos_adu[1:], m)\n",
-    "            - peak_pos_eV[1:]) / (peak_pos_errors)) ** 2)\n",
-    "\n",
-    "    pparm = dict(throw_nan=False, pedantic=False, print_level=print_level)\n",
-    "    pparm['m'] = m\n",
-    "\n",
-    "\n",
-    "    m = Minuit(linearWrap, **pparm)\n",
-    "    fmin = m.migrad()\n",
-    "    ferr = m.minos(maxcall = 10000)\n",
-    "    res = m.values\n",
-    "\n",
-    "    fitv = res['m']*peak_pos_adu\n",
-    "    #Gain of the detector\n",
-    "    gain = res['m']\n",
-    "    gainErr = np.sqrt(ferr['m']['lower']**2\n",
-    "                  +ferr['m']['upper']**2) #error\n",
-    "\n",
-    "    d=[{'x':peak_pos_adu,\n",
-    "        'y':peak_pos_eV,\n",
-    "        'drawstyle': 'marker-only',\n",
-    "        'marker': 'o'\n",
-    "       },\n",
-    "       {'x':peak_pos_adu,\n",
-    "        'y':fitv,\n",
-    "        'drawstyle': 'line'\n",
-    "       }]\n",
-    "\n",
-    "    fig = xana.simplePlot(d, aspect=2, x_label='Energy (ADU)',\n",
-    "                          y_label='Energy (eV)', figsize='1col',\n",
-    "                          x_range=(0,325), y_range=(0,7e3))\n",
-    "    return gain, gainErr"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "def Gauss_fit(CTIcorrHist, mu1, sigma1, mu2, sigma2, mu3, sigma3, mu1_limL, mu1_limH, mu2_limL, mu2_limH, mu3_limL, mu3_limH):\n",
-    "    h1,e1,c1,s1 = CTIcorrHist.get()\n",
-    "    hist = h1[cti_lim_low:cti_lim_high].astype(np.float64)\n",
-    "    bin_centres = c1[cti_lim_low:cti_lim_high]\n",
-    "\n",
-    "    hnonzeros = hist != 0 \n",
-    "\n",
-    "\n",
-    "    def multiGauss(x, s, *p):\n",
-    "            r = 0.\n",
-    "            for i in range(s):\n",
-    "                A, mu, sigma = p[i*3:(i+1)*3]\n",
-    "                r += A*np.exp(-(x-mu)**2/(2.*sigma**2))\n",
-    "            return r\n",
-    "\n",
-    "    def multiGaussWrap(A1, mu1, sigma1, A2, mu2, sigma2, A3, mu3, sigma3):\n",
-    "        return np.sum(((multiGauss(bin_centres[hnonzeros],3, A1, mu1, sigma1, A2, mu2, sigma2, A3, mu3, sigma3)\n",
-    "                        - hist[hnonzeros]) / np.sqrt(hist[hnonzeros])) ** 2)\n",
-    "\n",
-    "\n",
-    "    pparm = dict(throw_nan=False, pedantic=False, print_level=1)\n",
-    "\n",
-    "    pparm['A1'] = 10000\n",
-    "    pparm['limit_A1'] = (100., 1e6)\n",
-    "    pparm['mu1'] = mu1\n",
-    "    pparm['limit_mu1'] = (mu1_limL, mu1_limH)\n",
-    "    pparm['sigma1'] = sigma1\n",
-    "    pparm['limit_sigma1'] = (1., 25)\n",
-    "\n",
-    "    pparm['A2'] = 1000.\n",
-    "    pparm['limit_A2'] = (30., 5e4)\n",
-    "    pparm['mu2'] = mu2\n",
-    "    pparm['limit_mu2'] = (mu2_limL, mu2_limH)\n",
-    "    pparm['sigma2'] = sigma2\n",
-    "    pparm['limit_sigma2'] = (1., 50)\n",
-    "\n",
-    "    pparm['A3'] = 200.\n",
-    "    pparm['limit_A3'] = (1., 1e5)\n",
-    "    pparm['mu3'] = mu3\n",
-    "    pparm['limit_mu3'] = (mu3_limL, mu3_limH)\n",
-    "    pparm['sigma3'] = sigma3   \n",
-    "    pparm['limit_sigma3'] = (1., 50)\n",
-    "\n",
-    "    m = Minuit(multiGaussWrap, **pparm)\n",
-    "    fmin = m.migrad()\n",
-    "    ferr = m.minos()\n",
-    "    res = m.values\n",
-    "\n",
-    "    #Extracting information about sigma and its error for later use\n",
-    "    sigma1 = res['sigma1']\n",
-    "    Errorsigma = np.array([ferr['sigma1']['lower'], \n",
-    "                           ferr['sigma1']['upper']])\n",
-    "    sigma1err = np.sqrt(Errorsigma[0]**2+Errorsigma[1]**2)\n",
-    "    print('sigma1 = ', sigma1)\n",
-    "    print('sigma1err = ', sigma1err)\n",
-    "\n",
-    "    x_fit = np.arange(cti_lim_low,cti_lim_high)\n",
-    "    p = [res['A1'], res['mu1'], res['sigma1'], res['A2'], res['mu2'], res['sigma2'], res['A3'], res['mu3'], res['sigma3']]\n",
-    "    y_fit = multiGauss(x_fit, 3, *p)\n",
-    "\n",
-    "    d = [{'x': bin_centres,\n",
-    "         'y': hist,\n",
-    "         'drawstyle':'steps-mid',\n",
-    "         },\n",
-    "         {'x': x_fit,\n",
-    "         'y': multiGauss(x_fit, 1, res['A1'], res['mu1'], res['sigma1']),\n",
-    "         'drawstyle':'line',\n",
-    "         },\n",
-    "         {'x': x_fit,\n",
-    "         'y': multiGauss(x_fit, 1, res['A2'], res['mu2'], res['sigma2']),\n",
-    "         'drawstyle':'line',\n",
-    "         },\n",
-    "         {'x': x_fit,\n",
-    "          'y': y_fit,\n",
-    "          'y2': (hist-y_fit)/np.sqrt(hist),\n",
-    "          'color': 'red',\n",
-    "          'drawstyle':'line',\n",
-    "          'drawstyle2': 'steps-mid'\n",
-    "         }]\n",
-    "\n",
-    "\n",
-    "    fig = xana.simplePlot(d, figsize='1col', aspect=1, x_label = 'ADU', y_label=\"counts\", secondpanel=True, y_log=False)\n",
-    "\n",
-    "    peak_pos_adu = np.array([0, res['mu1'], res['mu2']])\n",
-    "    peak_pos_errors = [np.array([ferr['mu1']['lower'], \n",
-    "                           ferr['mu2']['lower']]),\n",
-    "                 np.array([ferr['mu1']['upper'], \n",
-    "                           ferr['mu2']['upper']])]\n",
-    "    \n",
-    "    return sigma1, sigma1err, peak_pos_adu, peak_pos_errors"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Evaluating gain and energy resolution for LH"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "sigma1, sigma1err, peak_pos_adu, peak_pos_errors = Gauss_fit(histCalSinglesLH_cticor, 265., 15., 290., 20., 230., 15., 265., 275., 280., 310., 230., 265.)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "peak_pos_eV = np.array([0, 5898.75, 6490.45])\n",
-    "\n",
-    "gain, gainErr = lin_fit(peak_pos_adu, peak_pos_eV, peak_pos_errors, m=1, print_level=0)\n",
-    "print(\"gain eV/ADU = %.5f +- %.5f\\n\" %(gain, gainErr))\n",
-    "\n",
-    "#Energy resolution @5.89 keV\n",
-    "#K-alpha sigma/slope of linear fit (result: sigma)\n",
-    "eres =  sigma1/(1./gain) \n",
-    "eres_error = (np.sqrt(sigma1err**2 + gainErr**2))*gain\n",
-    "\n",
-    "print(\"Energy resolution: sigma = %.5f +- %.5f eV\" %(eres, eres_error))\n",
-    "print(\"Energy resolution: FWHM = %.5f eV\\n\" %(eres*2.3548))\n",
-    "\n",
-    "#Detector resolution\n",
-    "F = 0.115\n",
-    "E = 5.89e3\n",
-    "\n",
-    "det_res = eres - np.sqrt(F*E*3.66)\n",
-    "print('Detector resolution: sigma = %.5f eV' %det_res)\n",
-    "print('Detector resolution: FWHM = %.5f eV\\n' %(det_res*2.3548))\n",
-    "\n",
-    "output_val = np.array([['Gain LH [eV/ADU] = ', '{:0.2f}'.format(gain)],\n",
-    "                       ['Gain error LH [eV/ADU] = ', '{:0.2E}'.format(gainErr)],\n",
-    "                       ['Energy resolution LH: sigma [eV] = ', '{:0.2f}'.format(eres)],\n",
-    "                       ['Sigma error LH [eV] = ', '{:0.2f}'.format(eres_error)],\n",
-    "                       ['FWHM LH [eV] = ', '{:0.2f}'.format(eres*2.3548)],\n",
-    "                       ['Detector resolution LH: sigma [eV] = ', '{:0.2f}'.format(det_res)]])\n",
-    "\n",
-    "path= output_folder+'results.txt'\n",
-    "if local_output:\n",
-    "    with open(path,'a') as f:\n",
-    "        f.write('\\n')\n",
-    "        np.savetxt(f, output_val, fmt='%s', delimiter=' ', newline='\\n')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Evaluating gain and energy resolution for UH"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "sigma1, sigma1err, peak_pos_adu, peak_pos_errors = Gauss_fit(histCalSinglesUH_cticor, 265., 15., 290., 20., 230., 15., 265., 275., 280., 310., 230., 265.)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "gain, gainErr = lin_fit(peak_pos_adu, peak_pos_eV, peak_pos_errors, m=1, print_level=0)\n",
-    "print(\"gain eV/ADU = %.5f +- %.5f\\n\" %(gain, gainErr))\n",
-    "\n",
-    "#Energy resolution @5.89 keV\n",
-    "#K-alpha sigma/slope of linear fit (result: sigma)\n",
-    "eres =  sigma1/(1./gain) \n",
-    "eres_error = (np.sqrt(sigma1err**2 + gainErr**2))*gain\n",
-    "\n",
-    "print(\"Energy resolution: sigma = %.5f +- %.5f eV\" %(eres, eres_error))\n",
-    "print(\"Energy resolution: FWHM = %.5f eV\\n\" %(eres*2.3548))\n",
-    "\n",
-    "#Detector resolution\n",
-    "F = 0.115\n",
-    "E = 5.89e3\n",
-    "\n",
-    "det_res = eres - np.sqrt(F*E*3.66)\n",
-    "print('Detector resolution: sigma = %.5f eV' %det_res)\n",
-    "print('Detector resolution: FWHM = %.5f eV\\n' %(det_res*2.3548))\n",
-    "\n",
-    "output_val = np.array([['Gain UH [eV/ADU] = ', '{:0.2f}'.format(gain)],\n",
-    "                       ['Gain error UH [eV/ADU] = ', '{:0.2E}'.format(gainErr)],\n",
-    "                       ['Energy resolution UH: sigma [eV] = ', '{:0.2f}'.format(eres)],\n",
-    "                       ['Sigma error UH [eV] = ', '{:0.2f}'.format(eres_error)],\n",
-    "                       ['FWHM UH [eV] = ', '{:0.2f}'.format(eres*2.3548)],\n",
-    "                       ['Detector resolution UH: sigma [eV] = ', '{:0.2f}'.format(det_res)]])\n",
-    "\n",
-    "path= output_folder+'results.txt'\n",
-    "if local_output:\n",
-    "    with open(path,'a') as f:\n",
-    "        f.write('\\n')\n",
-    "        np.savetxt(f, output_val, fmt='%s', delimiter=' ', newline='\\n')"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "from iCalibrationDB import ConstantMetaData, Constants, Conditions, Detectors, Versions\n",
-    "from iCalibrationDB.detectors import DetectorTypes"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "## relative gain\n",
-    "\n",
-    "metadata = ConstantMetaData()\n",
-    "relgain = Constants.CCD(DetectorTypes.fastCCD).RelativeGain()\n",
-    "relgain.data = relgainMap.data\n",
-    "metadata.calibration_constant = relgain\n",
-    "\n",
-    "# set the operating condition\n",
-    "condition = Conditions.Illuminated.CCD(bias_voltage=bias_voltage,\n",
-    "                                       photon_energy=5.9,\n",
-    "                                       integration_time=integration_time,\n",
-    "                                       gain_setting=gain_setting)\n",
-    "device = Detectors.fastCCD1\n",
-    "\n",
-    "\n",
-    "metadata.detector_condition = condition\n",
-    "\n",
-    "# specify the a version for this constant\n",
-    "metadata.calibration_constant_version = Versions.Now(device=device)\n",
-    "metadata.send(cal_db_interface)\n",
-    "\n",
-    "\n",
-    "## CTI gain\n",
-    "\n",
-    "metadata = ConstantMetaData()\n",
-    "cti = Constants.CCD(DetectorTypes.fastCCD).CTI()\n",
-    "cti.data = ctiMap.data\n",
-    "metadata.calibration_constant = cti\n",
-    "\n",
-    "# set the operating condition\n",
-    "condition = Conditions.Illuminated.CCD(bias_voltage=bias_voltage,\n",
-    "                                       photon_energy=5.9,\n",
-    "                                       integration_time=integration_time,\n",
-    "                                       gain_setting=gain_setting)\n",
-    "device = Detectors.fastCCD1\n",
-    "\n",
-    "\n",
-    "metadata.detector_condition = condition\n",
-    "\n",
-    "# specify the a version for this constant\n",
-    "metadata.calibration_constant_version = Versions.Now(device=device)\n",
-    "metadata.send(cal_db_interface)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": []
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.6.6"
-  },
-  "latex_envs": {
-   "LaTeX_envs_menu_present": true,
-   "autocomplete": true,
-   "bibliofile": "biblio.bib",
-   "cite_by": "apalike",
-   "current_citInitial": 1,
-   "eqLabelWithNumbers": true,
-   "eqNumInitial": 1,
-   "hotkeys": {
-    "equation": "Ctrl-E",
-    "itemize": "Ctrl-I"
-   },
-   "labels_anchors": false,
-   "latex_user_defs": false,
-   "report_style_numbering": false,
-   "user_envs_cfg": false
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 1
-}
diff --git a/notebooks/FastCCD/Characterize_FlatFields_NewDAQ_FastCCD_NBC.ipynb b/notebooks/FastCCD/Characterize_FlatFields_NewDAQ_FastCCD_NBC.ipynb
deleted file mode 100644
index d6094c5ab..000000000
--- a/notebooks/FastCCD/Characterize_FlatFields_NewDAQ_FastCCD_NBC.ipynb
+++ /dev/null
@@ -1,1542 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "# FastCCD Flat-Field Characterization ##\n",
-    "\n",
-    "Author: I. Klačková, Version 1.0\n",
-    "\n",
-    "The following notebook provides flat-field image analysis of the FastCCD detector.\n",
-    "\n",
-    "As a prerequisite, dark maps are required for the analysis to be performed. These can be obtained by running **FastCCD Dark Characterization**.\n",
-    "\n",
-    "Flat-field characterization provides analysis of a detector gain and other CCD-related effects. "
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "in_folder = \"/gpfs/exfel/data/user/klackova/FCCDdata/test\"\n",
-    "out_folder = '/gpfs/exfel/data/user/klackova/FCCDdata/test/' #output folder\n",
-    "dark_maps = '/gpfs/exfel/data/user/klackova/FCCDdata/test/DarkMaps.h5' #file containing dark maps\n",
-    "#files with flat-field images\n",
-    "path_template = 'RAW-R{:04d}-FASTCCD00-S000{{:02d}}.h5'\n",
-    "run = 779\n",
-    "h5path = '/INSTRUMENT/fastCCDDAQ:daqOutput/data/image/pixels'\n",
-    "number_flatField_frames = 17\n",
-    "cluster_profile = \"noDB\" #ipcluster profile to use\n",
-    "operation_mode = \"FF\" #or \"FF\". FS stands for frame-store and FF for full-frame opeartion\n",
-    "split_evt_primary_threshold = 7. # primary threshold for split event classification in terms of n sigma noise\n",
-    "split_evt_secondary_threshold = 4. # secondary threshold for split event classification in terms of n sigma noise\n",
-    "split_evt_mip_threshold = 1000. # MIP threshold for event classification\n",
-    "tag_101_LH = 0 #if 1 tagging first singles for upper hemisphere, if 0 then are not evaluated\n",
-    "tag_101_UH = 0 #if 2 tagging first singles for upper hemisphere, if 0 then are not evaluated\n",
-    "cti_Axis_LH = 1 #CTI axis index for lower hemisphere\n",
-    "cti_Axis_UH = 0 #CTI axis indes for upper hemisphere\n",
-    "cti_lim_low = 220 #lower limit in ADU for events used in CTI calculator\n",
-    "cti_lim_high = 350 #upper limit in ADU for events used in CTI calculator"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "import XFELDetAna.xfelprofiler as xprof\n",
-    "\n",
-    "profiler = xprof.Profiler()\n",
-    "profiler.disable()\n",
-    "from XFELDetAna.util import env\n",
-    "env.iprofile = cluster_profile\n",
-    "\n",
-    "import warnings\n",
-    "warnings.filterwarnings('ignore')\n",
-    "\n",
-    "from XFELDetAna import xfelpycaltools as xcal\n",
-    "from XFELDetAna import xfelpyanatools as xana\n",
-    "from XFELDetAna.plotting.util import prettyPlotting\n",
-    "prettyPlotting=True\n",
-    "from XFELDetAna.xfelreaders import ChunkReader, fastccdreaderh5\n",
-    "\n",
-    "import numpy as np\n",
-    "import h5py\n",
-    "import matplotlib.pyplot as plt\n",
-    "from iminuit import Minuit\n",
-    "\n",
-    "import time\n",
-    "import copy\n",
-    "\n",
-    "from prettytable import PrettyTable\n",
-    "\n",
-    "%matplotlib inline\n",
-    "\n",
-    "def nImagesOrLimit(nImages, limit):\n",
-    "    if limit == 0:\n",
-    "        return nImages\n",
-    "    else:\n",
-    "        return min(nImages, limit)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "if operation_mode == \"FS\":\n",
-    "    x = 960 # rows of the FastCCD to analyze in FS mode \n",
-    "    y = 960 # columns of the FastCCD to analyze in FS mode \n",
-    "    print('\\nYou are analyzing data in FS mode.')\n",
-    "else:\n",
-    "    x = 1920 # rows of the FastCCD to analyze in FF mode \n",
-    "    y = 960 # columns of the FastCCD to analyze in FF mode\n",
-    "    print('\\nYou are analyzing data in FF mode.')\n",
-    "    \n",
-    "ped_dir = \"{}/r{:04d}\".format(in_folder, run)\n",
-    "fp_name = path_template.format(run)\n",
-    "fp_path = '{}/{}'.format(ped_dir, fp_name)\n",
-    "\n",
-    "print(\"Reading data from: {}\\n\".format(fp_path))\n",
-    "print(\"Run is: {}\".format(run))\n",
-    "print(\"HDF5 path: {}\".format(h5path))"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "filename = fp_path.format(3)\n",
-    "sensorSize = [x, y]\n",
-    "chunkSize = 100 #Number of images to read per chunk\n",
-    "blockSize = [sensorSize[0]//4, sensorSize[1]//4] #Sensor area will be analysed according to blocksize\n",
-    "xcal.defaultBlockSize = blockSize\n",
-    "cpuCores = 8 #Specifies the number of running cpu cores\n",
-    "memoryCells = 1 #FastCCD has 1 memory cell\n",
-    "#Specifies total number of images to proceed\n",
-    "nImages = fastccdreaderh5.getDataSize(filename, h5path)[2] \n",
-    "nImages = nImagesOrLimit(nImages, number_flatField_frames)\n",
-    "print(\"\\nNumber of flat-field images to analyze: \",nImages)\n",
-    "\n",
-    "commonModeBlockSize = blockSize\n",
-    "commonModeAxisR = 'row'#Axis along which common mode will be calculated\n",
-    "run_parallel = True\n",
-    "profile = False\n",
-    "\n",
-    "with h5py.File(filename, 'r') as f:\n",
-    "    bias_voltage = int(f['/RUN/fastCCDControl/biasclock/bias/value'][0])\n",
-    "    det_gain = int(f['/RUN/fastCCDControl/exposure/gain/value'][0])\n",
-    "    print(\"Bias voltage is {} V\".format(bias_voltage))\n",
-    "    print(\"Detector gain is set to x{}\".format(det_gain))\n",
-    "\n",
-    "reader = ChunkReader(filename, fastccdreaderh5.readData, \n",
-    "                     nImages, chunkSize, \n",
-    "                     path = h5path, \n",
-    "                     pixels_x = x,\n",
-    "                     pixels_y = y)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "As a first step, dark maps have to be loaded."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "f = h5py.File(dark_maps, 'r')\n",
-    "offsetMap = np.array(f[\"/offsetMap\"])\n",
-    "noiseMap = np.array(f[\"/noiseMap\"])\n",
-    "badPixelMap = np.array(f[\"/badPixelMap\"])\n",
-    "f.close()\n",
-    "\n",
-    "offsetMap = np.ma.MaskedArray(offsetMap, badPixelMap)\n",
-    "noiseMap = np.ma.MaskedArray(noiseMap, badPixelMap)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "#************************Calculators************************#\n",
-    "\n",
-    "offsetCorrection = xcal.OffsetCorrection([x, y], \n",
-    "                                          offsetMap, \n",
-    "                                          nCells = memoryCells, \n",
-    "                                          cores=cpuCores, gains=None,\n",
-    "                                        blockSize=blockSize)\n",
-    "\n",
-    "cmCorrection = xcal.CommonModeCorrection([x, y], \n",
-    "                                         commonModeBlockSize, \n",
-    "                                         commonModeAxisR,\n",
-    "                                         nCells = memoryCells, \n",
-    "                                         noiseMap = noiseMap,\n",
-    "                                        runParallel=run_parallel,\n",
-    "                                        stats=True)\n",
-    "\n",
-    "patternClassifierLH = xcal.PatternClassifier([x//2, y], \n",
-    "                                              noiseMap[:x//2, :], \n",
-    "                                              split_evt_primary_threshold, \n",
-    "                                             split_evt_secondary_threshold,\n",
-    "                                             split_evt_mip_threshold,\n",
-    "                                              tagFirstSingles = tag_101_LH, \n",
-    "                                              nCells=memoryCells, \n",
-    "                                              cores=cpuCores, \n",
-    "                                              allowElongated = False,\n",
-    "                                          blockSize=blockSize,\n",
-    "                                            runParallel=run_parallel)\n",
-    "\n",
-    "patternClassifierUH = xcal.PatternClassifier([x//2, y], \n",
-    "                                              noiseMap[x//2:, :], \n",
-    "                                              split_evt_primary_threshold, \n",
-    "                                              split_evt_secondary_threshold,\n",
-    "                                              split_evt_mip_threshold,\n",
-    "                                              tagFirstSingles = tag_101_UH, \n",
-    "                                              nCells=memoryCells, \n",
-    "                                              cores=cpuCores, \n",
-    "                                              allowElongated = False,\n",
-    "                                          blockSize=blockSize,\n",
-    "                                            runParallel=run_parallel)\n",
-    "\n",
-    "patternSelectorLH = xcal.PatternSelector([x//2, y], \n",
-    "                                          selectionList = [100, 200, 201, 202, 203,\n",
-    "                                                           300, 301, 302, 303,\n",
-    "                                                           400, 401, 402, 403],\n",
-    "                                      blockSize=blockSize, runParallel=run_parallel)\n",
-    "patternSelectorUH = xcal.PatternSelector([x//2, y], \n",
-    "                                          selectionList = [100, 200, 201, 202, 203,\n",
-    "                                                          300, 301, 302, 303,\n",
-    "                                                           400, 401, 402, 403],\n",
-    "                                      blockSize=blockSize, runParallel=run_parallel)\n",
-    "\n",
-    "ctiCalLH = xcal.CTICalculator([x//2, y], \n",
-    "                               noiseMap[:x//2, :], \n",
-    "                               axis=cti_Axis_LH, \n",
-    "                               nCells=memoryCells,\n",
-    "                               cores=cpuCores,\n",
-    "                               absLow=cti_lim_low, absHigh=cti_lim_high, gains=None,\n",
-    "                           blockSize=blockSize, runParallel=run_parallel)\n",
-    "\n",
-    "ctiCalUH = xcal.CTICalculator([x//2, y], \n",
-    "                               noiseMap[x//2:, :], \n",
-    "                               axis=cti_Axis_UH, \n",
-    "                               nCells=memoryCells,\n",
-    "                               cores=cpuCores,\n",
-    "                               absLow=cti_lim_low, absHigh=cti_lim_high, gains=None,\n",
-    "                           blockSize=blockSize, runParallel=run_parallel)\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "#*********Setting bad pixel mask***************#\n",
-    "\n",
-    "patternClassifierLH.setBadPixelMask(badPixelMap[:x//2, :])\n",
-    "patternClassifierUH.setBadPixelMask(badPixelMap[x//2:, :])\n",
-    "patternSelectorLH.setBadPixelMask(badPixelMap[:x//2, :])\n",
-    "patternSelectorUH.setBadPixelMask(badPixelMap[x//2:, :])\n",
-    "ctiCalLH.setBadPixelMask(badPixelMap[:x//2, :])\n",
-    "ctiCalUH.setBadPixelMask(badPixelMap[x//2:, :])"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "#*****************Histogram Calculators******************#\n",
-    "\n",
-    "histCalOffsetCor = xcal.HistogramCalculator([x, y], \n",
-    "                                            bins=1000, \n",
-    "                                            range=[0, 1000],\n",
-    "                                            nCells=memoryCells, \n",
-    "                                            cores=cpuCores,\n",
-    "                                           blockSize=blockSize)\n",
-    "\n",
-    "histCalCMCor = xcal.HistogramCalculator([x, y], bins=1000, \n",
-    "                                        range=[0, 1000], \n",
-    "                                        cores=cpuCores,\n",
-    "                                        memoryCells=memoryCells)\n",
-    "\n",
-    "histCalPatternCorLH = xcal.HistogramCalculator([x//2, y], \n",
-    "                                                bins=1000, \n",
-    "                                                range=[0, 1000],\n",
-    "                                                nCells=memoryCells, \n",
-    "                                                cores=cpuCores,\n",
-    "                                            blockSize=blockSize)\n",
-    "\n",
-    "histCalPatternCorUH = xcal.HistogramCalculator([x//2, y], \n",
-    "                                                bins=1000, \n",
-    "                                                range=[0, 1000],\n",
-    "                                                nCells=memoryCells, \n",
-    "                                                cores=cpuCores,\n",
-    "                                            blockSize=blockSize)\n",
-    "\n",
-    "histCalSinglesLH = xcal.HistogramCalculator([x//2, y], \n",
-    "                                             bins=1000, \n",
-    "                                             range=[0, 1000],\n",
-    "                                             nCells=memoryCells, \n",
-    "                                             cores=cpuCores,\n",
-    "                                         blockSize=blockSize)\n",
-    "\n",
-    "histCalSinglesUH = xcal.HistogramCalculator([x//2, y], \n",
-    "                                             bins=1000, \n",
-    "                                             range=[0, 1000],\n",
-    "                                             nCells=memoryCells, \n",
-    "                                             cores=cpuCores,\n",
-    "                                         blockSize=blockSize)\n",
-    "\n",
-    "histCalDoublesLH = xcal.HistogramCalculator([x//2, y], \n",
-    "                                             bins=1000, \n",
-    "                                             range=[0, 1000],\n",
-    "                                             nCells=memoryCells, \n",
-    "                                             cores=cpuCores,\n",
-    "                                         blockSize=blockSize)\n",
-    "\n",
-    "histCalDoublesUH = xcal.HistogramCalculator([x//2, y], \n",
-    "                                             bins=1000, \n",
-    "                                             range=[0, 1000],\n",
-    "                                             nCells=memoryCells, \n",
-    "                                             cores=cpuCores,\n",
-    "                                         blockSize=blockSize)\n",
-    "\n",
-    "histCalTriplesLH = xcal.HistogramCalculator([x//2, y], \n",
-    "                                             bins=1000, \n",
-    "                                             range=[0, 1000],\n",
-    "                                             nCells=memoryCells, \n",
-    "                                             cores=cpuCores)\n",
-    "histCalTriplesUH = xcal.HistogramCalculator([x//2, y], \n",
-    "                                             bins=1000, \n",
-    "                                             range=[0, 1000],\n",
-    "                                             nCells=memoryCells, \n",
-    "                                             cores=cpuCores)\n",
-    "\n",
-    "histCalQuadsLH = xcal.HistogramCalculator([x//2, y], \n",
-    "                                             bins=1000, \n",
-    "                                             range=[0, 1000],\n",
-    "                                             nCells=memoryCells, \n",
-    "                                             cores=cpuCores)\n",
-    "histCalQuadsUH = xcal.HistogramCalculator([x//2, y], \n",
-    "                                             bins=1000, \n",
-    "                                             range=[0, 1000],\n",
-    "                                             nCells=memoryCells, \n",
-    "                                             cores=cpuCores)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "The very first step when analyzing illuminated data is correction for the dark effects (offset, common mode).  \n",
-    "\n",
-    "The next step is correction for charge sharing effects between neighbouring pixels. PatternClassifier performs classification according to event patterns with different multiplicities, defined as the number of pattern consituents. Valid patterns can range from one to four constituents. For the classification, event thresholds are used. There are three types of thresholds: a *primary threshold*, a *secondary threshold* and an *upper threshold*. \n",
-    "\n",
-    "Primary and secondary thresholds are given in $n\\times\\sigma$, where $\\sigma$ is the noise value in the pixel. First, the values in all pixels are checked against primary threshold. Values exceeding $prim.threshold\\times\\sigma$ are considered as events. Adjacent piexls (to pixel contaning photon event) are checked against secondary threshold, if they exceed value $sec.threshold\\times\\sigma$ they are considered one pattern. The upper event threhsold corresponds to a summed value of all pattern constituents. If the summed value is below the upper threshold, the event pattern is considered valid. \n",
-    "\n",
-    "Next option is tracking first single events, which are used for CTI analysis. These are single pixel events nearest to the readout node. As there are readout ports on the both sides of the sensor, lower and upper halves (hemispheres) are analysed separately. For the FastCCD two options are used for tracking: for the lower half $tagFirstSingles = 1$ and for the upper half $tagFirstSingles = 2$. If value is set to 0, first singles are not evaluated.\n",
-    "\n",
-    "PatternClassifier returns information about the summed energy (ADU) of all pattern consituents and about multiplicity of each pattern.\n",
-    "\n",
-    "PatternSelector is used to select only patterns of interest based on pattern indices.\n",
-    "\n",
-    "To evaluate CTI (Charge Transfer Inefficiency) and relative gain effects, CTICalculator is used. Calculator needs  axis along which it should be calculated (axis=1 for columns) and lower and upper limit in ADU to select only events around the expcted peak values."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "for i,data in enumerate(reader.readChunks()):\n",
-    "        \n",
-    "    data = offsetCorrection.correct(data.astype(np.float32)) #correct for the offset        \n",
-    "    histCalOffsetCor.fill(data)\n",
-    "        \n",
-    "    data = cmCorrection.correct(data) #correct for the row common mode\n",
-    "    histCalCMCor.fill(data)   \n",
-    "        \n",
-    "    dataLH = data[:x//2, :, :]\n",
-    "    dataUH = data[x//2:, :, :]\n",
-    "\n",
-    "\n",
-    "    #*************Pattern Classification*************#\n",
-    "    dataLH, patternsLH = patternClassifierLH.classify(dataLH)\n",
-    "    dataUH, patternsUH = patternClassifierUH.classify(dataUH)\n",
-    "        \n",
-    "    histCalPatternCorLH.fill(dataLH)\n",
-    "    histCalPatternCorUH.fill(dataUH)\n",
-    "\n",
-    "    #*************Pattern Selection*************#\n",
-    "\n",
-    "    singlesLH, d0LH, d1LH, d2LH, d3LH, t0LH, t1LH, t2LH, t3LH, q0LH, q1LH, q2LH, q3LH = patternSelectorLH.select(dataLH, patternsLH)\n",
-    "        \n",
-    "    histCalSinglesLH.fill(singlesLH)\n",
-    "        \n",
-    "    histCalDoublesLH.fill(d0LH)\n",
-    "    histCalDoublesLH.fill(d1LH)\n",
-    "    histCalDoublesLH.fill(d2LH)\n",
-    "    histCalDoublesLH.fill(d3LH)\n",
-    "    \n",
-    "    histCalTriplesLH.fill(t0LH)\n",
-    "    histCalTriplesLH.fill(t1LH)\n",
-    "    histCalTriplesLH.fill(t2LH)\n",
-    "    histCalTriplesLH.fill(t3LH)\n",
-    "    \n",
-    "    histCalQuadsLH.fill(q0LH)\n",
-    "    histCalQuadsLH.fill(q1LH)\n",
-    "    histCalQuadsLH.fill(q2LH)\n",
-    "    histCalQuadsLH.fill(q3LH)\n",
-    "        \n",
-    "    singlesUH, d0UH, d1UH, d2UH, d3UH, t0UH, t1UH, t2UH, t3UH, q0UH, q1UH, q2UH, q3UH = patternSelectorUH.select(dataUH, patternsUH)\n",
-    "        \n",
-    "    histCalSinglesUH.fill(singlesUH)\n",
-    "    \n",
-    "    histCalDoublesUH.fill(d0UH)\n",
-    "    histCalDoublesUH.fill(d1UH)\n",
-    "    histCalDoublesUH.fill(d2UH)\n",
-    "    histCalDoublesUH.fill(d3UH)\n",
-    "    \n",
-    "    histCalTriplesUH.fill(t0UH)\n",
-    "    histCalTriplesUH.fill(t1UH)\n",
-    "    histCalTriplesUH.fill(t2UH)\n",
-    "    histCalTriplesUH.fill(t3UH)\n",
-    "    \n",
-    "    histCalQuadsUH.fill(q0UH)\n",
-    "    histCalQuadsUH.fill(q1UH)\n",
-    "    histCalQuadsUH.fill(q2UH)\n",
-    "    histCalQuadsUH.fill(q3UH)  \n",
-    " \n",
-    "    #*************CTI*************#\n",
-    "\n",
-    "    ctiCalLH.fill(singlesLH)\n",
-    "    ctiCalUH.fill(singlesUH)\n",
-    "\n",
-    "    print(\"Processed {} images.\".format((i+1)*chunkSize))"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "print(\"******************LOWER HEMISPHERE******************\\n\")\n",
-    "\n",
-    "patternStatsLH = patternClassifierLH.getPatternStats()\n",
-    "fig = plt.figure(figsize=(15,15))\n",
-    "ax = fig.add_subplot(4,4,1)\n",
-    "sfields = [\"singles\", \"first singles\", \"clusters\"]\n",
-    "mfields = [\"doubles\", \"triples\", \"quads\"]\n",
-    "relativeOccurances =  []\n",
-    "labels = []\n",
-    "for i, f in enumerate(sfields):\n",
-    "    relativeOccurances.append(patternStatsLH[f])\n",
-    "    labels.append(f)\n",
-    "for i, f in enumerate(mfields):\n",
-    "    for k in range(len(patternStatsLH[f])):\n",
-    "        relativeOccurances.append(patternStatsLH[f][k])\n",
-    "        labels.append(f+\"(\"+str(k)+\")\")\n",
-    "relativeOccurances = np.array(relativeOccurances, np.float)\n",
-    "relativeOccurances/=np.sum(relativeOccurances)\n",
-    "pie = ax.pie(relativeOccurances, labels=labels, autopct='%1.1f%%', shadow=True)\n",
-    "ax.set_title(\"Pattern occurrence\")\n",
-    "# Set aspect ratio to be equal so that pie is drawn as a circle.\n",
-    "a = ax.axis('equal')\n",
-    "\n",
-    "smaps = [\"singlemap\", \"firstsinglemap\", \"clustermap\"]\n",
-    "for i, m in enumerate(smaps):\n",
-    "\n",
-    "    ax = fig.add_subplot(4,4,2+i)\n",
-    "\n",
-    "    pmap = ax.imshow(patternStatsLH[m], interpolation=\"nearest\", vmax=2*np.nanmedian(patternStatsLH[m]))\n",
-    "    ax.set_title(m)\n",
-    "    cb = fig.colorbar(pmap)\n",
-    "\n",
-    "mmaps = [\"doublemap\", \"triplemap\", \"quadmap\"]\n",
-    "k = 0\n",
-    "for i, m in enumerate(mmaps):\n",
-    "\n",
-    "    for j in range(4):\n",
-    "        ax = fig.add_subplot(4,4,2+len(smaps)+k)\n",
-    "        pmap = ax.imshow(patternStatsLH[m][j], interpolation=\"nearest\", vmax=2*np.median(patternStatsLH[m][j]))\n",
-    "        ax.set_title(m+\"(\"+str(j)+\")\")\n",
-    "        cb = fig.colorbar(pmap)\n",
-    "        k+=1"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "patternStatsUH = patternClassifierUH.getPatternStats()\n",
-    "fig = plt.figure(figsize=(15,15))\n",
-    "ax = fig.add_subplot(4,4,1)\n",
-    "sfields = [\"singles\", \"first singles\", \"clusters\"]\n",
-    "mfields = [\"doubles\", \"triples\", \"quads\"]\n",
-    "relativeOccurances =  []\n",
-    "labels = []\n",
-    "for i, f in enumerate(sfields):\n",
-    "    relativeOccurances.append(patternStatsUH[f])\n",
-    "    labels.append(f)\n",
-    "for i, f in enumerate(mfields):\n",
-    "    for k in range(len(patternStatsUH[f])):\n",
-    "        relativeOccurances.append(patternStatsUH[f][k])\n",
-    "        labels.append(f+\"(\"+str(k)+\")\")\n",
-    "relativeOccurances = np.array(relativeOccurances, np.float)\n",
-    "relativeOccurances/=np.sum(relativeOccurances)\n",
-    "pie = ax.pie(relativeOccurances, labels=labels, autopct='%1.1f%%', shadow=True)\n",
-    "ax.set_title(\"Pattern occurrence\")\n",
-    "# Set aspect ratio to be equal so that pie is drawn as a circle.\n",
-    "a = ax.axis('equal')\n",
-    "\n",
-    "smaps = [\"singlemap\", \"firstsinglemap\", \"clustermap\"]\n",
-    "for i, m in enumerate(smaps):\n",
-    "\n",
-    "    ax = fig.add_subplot(4,4,2+i)\n",
-    "\n",
-    "    pmap = ax.imshow(patternStatsUH[m], interpolation=\"nearest\", vmax=2*np.nanmedian(patternStatsUH[m]))\n",
-    "    ax.set_title(m)\n",
-    "    cb = fig.colorbar(pmap)\n",
-    "\n",
-    "mmaps = [\"doublemap\", \"triplemap\", \"quadmap\"]\n",
-    "k = 0\n",
-    "for i, m in enumerate(mmaps):\n",
-    "\n",
-    "    for j in range(4):\n",
-    "        ax = fig.add_subplot(4,4,2+len(smaps)+k)\n",
-    "        pmap = ax.imshow(patternStatsUH[m][j], interpolation=\"nearest\", vmax=np.median(patternStatsUH[m][j]))\n",
-    "        ax.set_title(m+\"(\"+str(j)+\")\")\n",
-    "        cb = fig.colorbar(pmap)\n",
-    "        k+=1\n",
-    "        \n",
-    "print(\"******************UPPER HEMISPHERE******************\\n\")        "
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "t0 = PrettyTable()\n",
-    "t0.title = \"Total number of Counts\"\n",
-    "t0.field_names = [\"Hemisphere\",\"Singles\", \"First-singles\", \"Clusters\"]\n",
-    "t0.add_row([\"LH\", patternStatsLH['singles'], patternStatsLH['first singles'], patternStatsLH['clusters']])\n",
-    "t0.add_row([\"UH\", patternStatsUH['singles'], patternStatsUH['first singles'], patternStatsUH['clusters']])\n",
-    "\n",
-    "print(t0)\n",
-    "\n",
-    "t1 = PrettyTable()\n",
-    "\n",
-    "t1.field_names = [\"Index\",\"D-LH\", \"D-UH\", \"T-LH\", \"T-UH\", \"Q-LH\", \"Q-UH\"]\n",
-    "\n",
-    "t1.add_row([0, patternStatsLH['doubles'][0], patternStatsUH['doubles'][0], patternStatsLH['triples'][0], patternStatsUH['triples'][0], patternStatsLH['quads'][0], patternStatsUH['quads'][0]])\n",
-    "t1.add_row([1, patternStatsLH['doubles'][1], patternStatsUH['doubles'][1], patternStatsLH['triples'][1], patternStatsUH['triples'][1], patternStatsLH['quads'][1], patternStatsUH['quads'][1]])\n",
-    "t1.add_row([2, patternStatsLH['doubles'][2], patternStatsUH['doubles'][2], patternStatsLH['triples'][2], patternStatsUH['triples'][2], patternStatsLH['quads'][2], patternStatsUH['quads'][2]])\n",
-    "t1.add_row([3, patternStatsLH['doubles'][3], patternStatsUH['doubles'][3], patternStatsLH['triples'][3], patternStatsUH['triples'][3], patternStatsLH['quads'][3], patternStatsUH['quads'][3]])\n",
-    "\n",
-    "print(t1)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "doublesLH = patternStatsLH['doubles'][0] + patternStatsLH['doubles'][1] + patternStatsLH['doubles'][2] + patternStatsLH['doubles'][3]\n",
-    "triplesLH = patternStatsLH['triples'][0] + patternStatsLH['triples'][1] + patternStatsLH['triples'][2] + patternStatsLH['triples'][3]\n",
-    "quadsLH = patternStatsLH['quads'][0] + patternStatsLH['quads'][1] + patternStatsLH['quads'][2] + patternStatsLH['quads'][3]\n",
-    "allsinglesLH = patternStatsLH['singles'] + patternStatsLH['first singles']\n",
-    "eventsLH = allsinglesLH + doublesLH + triplesLH + quadsLH\n",
-    "\n",
-    "doublesUH = patternStatsUH['doubles'][0] + patternStatsUH['doubles'][1] + patternStatsUH['doubles'][2] + patternStatsUH['doubles'][3]\n",
-    "triplesUH = patternStatsUH['triples'][0] + patternStatsUH['triples'][1] + patternStatsUH['triples'][2] + patternStatsUH['triples'][3]\n",
-    "quadsUH = patternStatsUH['quads'][0] + patternStatsUH['quads'][1] + patternStatsUH['quads'][2] + patternStatsUH['quads'][3]\n",
-    "allsinglesUH = patternStatsUH['singles'] + patternStatsUH['first singles']\n",
-    "eventsUH = allsinglesUH + doublesUH + triplesUH + quadsUH\n",
-    "\n",
-    "reloccurLH = np.array([allsinglesLH/eventsLH, doublesLH/eventsLH, triplesLH/eventsLH, quadsLH/eventsLH])\n",
-    "reloccurUH = np.array([allsinglesUH/eventsUH, doublesUH/eventsUH, triplesUH/eventsUH, quadsUH/eventsUH])"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "fig = plt.figure(figsize=(10,5))\n",
-    "ax = fig.add_subplot(1,2,1)\n",
-    "labels = ['singles', 'doubles', 'triples', 'quads']\n",
-    "pie = ax.pie(reloccurLH, labels=labels, autopct='%1.1f%%', shadow=True)\n",
-    "ax.set_title(\"Pattern occurrence LH\")\n",
-    "# Set aspect ratio to be equal so that pie is drawn as a circle.\n",
-    "a = ax.axis('equal')\n",
-    "ax = fig.add_subplot(1,2,2)\n",
-    "pie = ax.pie(reloccurUH, labels=labels, autopct='%1.1f%%', shadow=True)\n",
-    "ax.set_title(\"Pattern occurrence UH\")\n",
-    "# Set aspect ratio to be equal so that pie is drawn as a circle.\n",
-    "a = ax.axis('equal')"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "h1,e1,c1,s1 = histCalSinglesLH.get()\n",
-    "h,e,c,s = histCalPatternCorLH.get()\n",
-    "h2,e2,c2,s2 = histCalDoublesLH.get ()\n",
-    "h3,e3,c3,s3 = histCalTriplesLH.get ()\n",
-    "h4,e4,c4,s4 = histCalQuadsLH.get ()\n",
-    "\n",
-    "d = [{'x': c,\n",
-    "      'y': h,\n",
-    "      'y_err': np.sqrt(h[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'ecolor': 'navy',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'all events'},\n",
-    "     {'x': c1,\n",
-    "      'y': h1,\n",
-    "      'y_err': np.sqrt(h1[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'singles'},\n",
-    "     {'x': c2,\n",
-    "      'y': h2,\n",
-    "      'y_err': np.sqrt(h2[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'doubles'},\n",
-    "     {'x': c3,\n",
-    "      'y': h3,\n",
-    "      'y_err': np.sqrt(h3[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'triples'},\n",
-    "     {'x': c4,\n",
-    "      'y': h4,\n",
-    "      'y_err': np.sqrt(h4[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'quads'},\n",
-    "     \n",
-    "    ]\n",
-    " \n",
-    "h=np.ma.getdata(h)\n",
-    "np.savez(out_folder+'LH_all_events.npz', c,h)\n",
-    "\n",
-    "h1=np.ma.getdata(h1)\n",
-    "np.savez(out_folder+'LH_singles.npz', c1,h1)\n",
-    "\n",
-    "h2=np.ma.getdata(h2)\n",
-    "np.savez(out_folder+'LH_doubles.npz', c2,h2) \n",
-    "\n",
-    "h3=np.ma.getdata(h3)\n",
-    "np.savez(out_folder+'LH_triples.npz', c3,h3)\n",
-    "\n",
-    "h4=np.ma.getdata(h4)\n",
-    "np.savez(out_folder+'LH_quads.npz', c4,h4) \n",
-    "    \n",
-    "\n",
-    "fig = xana.simplePlot(d, aspect=1, x_label='Energy (ADU) - LH', \n",
-    "                      y_label='Number of occurrences', figsize='1col',\n",
-    "                      y_log=True, x_range=(0,400),\n",
-    "                      legend='top-right-frame-2col')\n",
-    "\n",
-    "h1,e1,c1,s1 = histCalSinglesUH.get()\n",
-    "h,e,c,s = histCalPatternCorUH.get()\n",
-    "h2,e2,c2,s2 = histCalDoublesUH.get()\n",
-    "h3,e3,c3,s3 = histCalTriplesLH.get ()\n",
-    "h4,e4,c4,s4 = histCalQuadsLH.get ()\n",
-    "\n",
-    "d = [{'x': c,\n",
-    "      'y': h,\n",
-    "      'y_err': np.sqrt(h[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'ecolor': 'navy',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'all events'},\n",
-    "     {'x': c1,\n",
-    "      'y': h1,\n",
-    "      'y_err': np.sqrt(h1[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'singles'},\n",
-    "     {'x': c2,\n",
-    "      'y': h2,\n",
-    "      'y_err': np.sqrt(h2[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'doubles'},\n",
-    "     {'x': c3,\n",
-    "      'y': h3,\n",
-    "      'y_err': np.sqrt(h3[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'triples'},\n",
-    "     {'x': c4,\n",
-    "      'y': h4,\n",
-    "      'y_err': np.sqrt(h4[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'quads'},\n",
-    "    ]\n",
-    "     \n",
-    "\n",
-    "fig = xana.simplePlot(d, aspect=1, x_label='Energy (ADU) - UH', \n",
-    "                      y_label='Number of occurrences', figsize='1col',\n",
-    "                      y_log=True, x_range=(0,400),\n",
-    "                      legend='top-right-frame-2col')\n",
-    "\n",
-    "h=np.ma.getdata(h)\n",
-    "np.savez(out_folder+'UH_all_events.npz', c,h)\n",
-    "\n",
-    "h1=np.ma.getdata(h1)\n",
-    "np.savez(out_folder+'UH_singles.npz', c1,h1)\n",
-    "\n",
-    "h2=np.ma.getdata(h2)\n",
-    "np.savez(out_folder+'UH_doubles.npz', c2,h2) \n",
-    "\n",
-    "h3=np.ma.getdata(h3)\n",
-    "np.savez(out_folder+'UH_triples.npz', c3,h3)\n",
-    "\n",
-    "h4=np.ma.getdata(h4)\n",
-    "np.savez(out_folder+'UH_quads.npz', c4,h4) \n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### CTI and Relative Gain Corrections "
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "In the second iteration data are corrected for CTI and relative gain effects.\n",
-    "\n",
-    "When CTI is evaluated selected single events are plotted against their position in a row, for each column separately. Plotted points are fitted by linear function,$$y=m\\times x+b,$$\n",
-    "    where y-intercept is the expected value of the event and CTI is given as $$CTI=\\frac{m}{b}.$$\n",
-    "Relative gain is a relative difference in the amplification of charge due to different output amplifiers. It is calculated from the $b$ value of the linear fit as $$relative\\:gain=\\frac{b(i)}{\\frac{1}{N}\\sum_{0}^{N}b(i)}.$$"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "ctiLH, ctierrLH, relGainLH, relGainErrLH, chi2LH, pvalLH, fig = ctiCalLH.get(\n",
-    "                                              plot=False, axis=cti_Axis_LH)\n",
-    "\n",
-    "ctiUH, ctierrUH, relGainUH, relGainErrUH, chi2UH, pvalUH, fig = ctiCalUH.get(\n",
-    "                                              plot=False, axis=cti_Axis_UH)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "#Number of columns for which CTI fit did not converge:\n",
-    "ncc=np.count_nonzero(np.isnan(ctiLH))\n",
-    "print('Number of non-converging columns LH: {:0.0f}'.format(ncc))\n",
-    "\n",
-    "ncc=np.count_nonzero(np.isnan(ctiUH))\n",
-    "print('number of non-converging columns UH: {:0.0f}'.format(ncc))"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "np.savez(out_folder+'ctiLH.npz', ctiLH,ctierrLH)\n",
-    "np.savez(out_folder+'ctiUH.npz', ctiUH,ctierrUH)\n",
-    "\n",
-    "np.savez(out_folder+'relGainLH.npz', relGainLH,relGainErrLH)\n",
-    "np.savez(out_folder+'relGainUH.npz', relGainUH,relGainErrUH)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "ctiCorrectionLH = xcal.CTICorrection([x//2, y], \n",
-    "                                   ctiLH, axis = cti_Axis_LH, cores=cpuCores,\n",
-    "                                   nCells = memoryCells, gains=None,\n",
-    "                                  blockSize=blockSize, runParallel=run_parallel) \n",
-    "                                   \n",
-    "relGainCorrLH = xcal.RelativeGainCorrection([x//2, y], \n",
-    "                                          relGainLH, axis = cti_Axis_LH, \n",
-    "                                          cores=cpuCores,\n",
-    "                                          nCells = memoryCells, gains=None,\n",
-    "                                         blockSize=blockSize, runParallel=run_parallel)\n",
-    "\n",
-    "ctiCorrectionUH = xcal.CTICorrection([x//2, y], \n",
-    "                                   ctiUH, axis = cti_Axis_UH, cores=cpuCores,\n",
-    "                                   nCells = memoryCells, gains=None,\n",
-    "                                  blockSize=blockSize, runParallel=run_parallel) \n",
-    "                                   \n",
-    "relGainCorrUH = xcal.RelativeGainCorrection([x//2, y], \n",
-    "                                          relGainUH, axis = cti_Axis_UH, \n",
-    "                                          cores=cpuCores,\n",
-    "                                          nCells = memoryCells, gains=None,\n",
-    "                                         blockSize=blockSize, runParallel=run_parallel)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Relative Gain"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "mask = relGainLH == 0\n",
-    "relGainMskdLH = np.ma.MaskedArray(relGainLH, mask)\n",
-    "\n",
-    "columns = np.arange(0,y)\n",
-    "\n",
-    "d1 = {'x': columns,\n",
-    "      'y': relGainMskdLH,\n",
-    "      'drawstyle': 'marker-only',\n",
-    "      'marker': '.'\n",
-    "     }\n",
-    "\n",
-    "fig = xana.simplePlot(d1, figsize='1col', \n",
-    "                    aspect=2, x_label = 'Columns', \n",
-    "                    y_label=\"Relative gain - LH\", \n",
-    "                    y_log=False, x_range=(0,y),)\n",
-    "\n",
-    "mask = relGainUH == 0\n",
-    "relGainMskdUH = np.ma.MaskedArray(relGainUH, mask)\n",
-    "\n",
-    "d1 = {'x': columns,\n",
-    "      'y': relGainMskdUH,\n",
-    "      'drawstyle': 'marker-only',\n",
-    "      'marker': '.'\n",
-    "     }\n",
-    "\n",
-    "fig = xana.simplePlot(d1, figsize='1col', \n",
-    "                    aspect=2, x_label = 'Columns', \n",
-    "                    y_label=\"Relative gain - UH\", \n",
-    "                    y_log=False, x_range=(0,y),)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### CTI"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "import matplotlib as mpl\n",
-    "mpl.matplotlib_fname()\n",
-    "mpl.rc('text', usetex=True)\n",
-    "\n",
-    "d = {'x': columns,\n",
-    "     'y': ctiLH/10e-5,\n",
-    "     #'y_err': ctierr,\n",
-    "     'drawstyle': 'marker-only',\n",
-    "     'marker': '.',\n",
-    "     'color': 'crimson'\n",
-    "     }\n",
-    "\n",
-    "\n",
-    "fig = xana.simplePlot(d, figsize='1col', \n",
-    "                    aspect=2, x_label = 'Columns', \n",
-    "                   y_label=r'$\\displaystyle\\mathrm{\\frac{CTI}{10^{-5}}}$ - LH', y_log=True, y_range=(0,np.nanmax(ctiUH)/10e-5))\n",
-    "                          \n",
-    "d = {'x': columns,\n",
-    "     'y': ctiUH/10e-5,\n",
-    "     #'y_err': ctierr,\n",
-    "     'drawstyle': 'marker-only',\n",
-    "      'marker': '.',\n",
-    "     'color': 'crimson'\n",
-    "     }\n",
-    "\n",
-    "\n",
-    "fig = xana.simplePlot(d, figsize='1col', \n",
-    "                    aspect=2, x_label = 'Columns', \n",
-    "                    y_label=r'$\\displaystyle\\mathrm{\\frac{CTI}{10^{-5}}}$ - UH', y_log=True, y_range=(0,np.nanmax(ctiUH)/10e-5))"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "mpl.rc('text', usetex=False)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Relative Gain Map with CTI"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "For the CTI correction, map has to be created, as one has to take into account number of charge shifts to the readout node. CTI map is calculated as $$CTI\\:map_{(x,y)}=(1+CTI_{(y)})^x,$$ where $CTI_{(y)}$ is the CTI value gained from calculator and $x$ refers to the rows of the FastCCD. It repesents the number of charge shifts needed to reach a readout node."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "ctiMapLH = ctiCorrectionLH.correctionMap\n",
-    "relgainMapLH = relGainCorrLH.correctionMap\n",
-    "\n",
-    "ctiMapUH = ctiCorrectionUH.correctionMap\n",
-    "relgainMapUH = relGainCorrUH.correctionMap\n",
-    "\n",
-    "ctiMap = np.zeros((x,y,1))\n",
-    "ctiMap[:x//2,:] = ctiMapLH\n",
-    "ctiMap[x//2:,:] = ctiMapUH\n",
-    "\n",
-    "relgainMap = np.zeros((x,y,1))\n",
-    "relgainMap[:x//2,:] = relgainMapLH\n",
-    "relgainMap[x//2:,:] = relgainMapUH\n",
-    "\n",
-    "\n",
-    "mapsTest = np.zeros_like(relgainMap)\n",
-    "mapsTest[:x//2,:] = relgainMapLH*ctiMapLH\n",
-    "mapsTest[x//2:,:] = relgainMapUH*ctiMapUH\n",
-    "\n",
-    "fig = xana.heatmapPlot(mapsTest[:,:,0], vmin=0.95, vmax=1.1, x_range=(0,y), y_range=(0,x), \n",
-    "                       lut_label=\"Relative gain map\\nwith CTI\", x_label=\"Columns\", y_label=\"Rows\", figsize='2col')"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "valuesLH = [np.ma.std(relGainMskdLH), np.nanstd(ctiLH), np.nanmean(ctiLH), np.nanmax(ctiLH)]\n",
-    "valuesUH = [np.ma.std(relGainMskdUH), np.nanstd(ctiUH), np.nanmean(ctiUH), np.nanmax(ctiUH)]\n",
-    "\n",
-    "t0 = PrettyTable()\n",
-    "t0.field_names = [\"Hemisphere\",\"Rel. gain variation\", \"CTI variation\", \"Avg. CTI\", 'Max. CTI']\n",
-    "t0.add_row([\"LH\", '{:0.2E}'.format(valuesLH[0]), '{:0.2E}'.format(valuesLH[1]), '{:0.2E}'.format(valuesLH[2]), '{:0.2E}'.format(valuesLH[3])])\n",
-    "t0.add_row([\"UH\", '{:0.2E}'.format(valuesUH[0]), '{:0.2E}'.format(valuesUH[1]), '{:0.2E}'.format(valuesUH[2]), '{:0.2E}'.format(valuesUH[3])])\n",
-    "\n",
-    "print(t0)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "cmCorrection.reset()\n",
-    "patternClassifierLH.reset()\n",
-    "patternClassifierUH.reset()"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "patternClassifierLH.setBadPixelMask(badPixelMap[:x//2, :])\n",
-    "patternClassifierUH.setBadPixelMask(badPixelMap[x//2:, :])\n",
-    "patternSelectorLH.setBadPixelMask(badPixelMap[:x//2, :])\n",
-    "patternSelectorUH.setBadPixelMask(badPixelMap[x//2:, :])\n",
-    "ctiCorrectionLH.setBadPixelMask(badPixelMap[:x//2, :])\n",
-    "ctiCorrectionUH.setBadPixelMask(badPixelMap[x//2:, :])\n",
-    "relGainCorrLH.setBadPixelMask(badPixelMap[:x//2, :])\n",
-    "relGainCorrUH.setBadPixelMask(badPixelMap[x//2:, :])"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "histCalCTICorLH = xcal.HistogramCalculator([x//2, y], \n",
-    "                                                bins=1000, \n",
-    "                                                range=[0, 1000],\n",
-    "                                                nCells=memoryCells, \n",
-    "                                                cores=cpuCores,\n",
-    "                                            blockSize=blockSize)\n",
-    "\n",
-    "histCalCTICorUH = xcal.HistogramCalculator([x//2, y], \n",
-    "                                                bins=1000, \n",
-    "                                                range=[0, 1000],\n",
-    "                                                nCells=memoryCells, \n",
-    "                                                cores=cpuCores,\n",
-    "                                            blockSize=blockSize)\n",
-    "\n",
-    "histCalSinglesLH_cticor = xcal.HistogramCalculator([x//2, y], \n",
-    "                                             bins=1000, \n",
-    "                                             range=[0, 1000],\n",
-    "                                             nCells=memoryCells, \n",
-    "                                             cores=cpuCores,\n",
-    "                                         blockSize=blockSize)\n",
-    "\n",
-    "histCalSinglesUH_cticor = xcal.HistogramCalculator([x//2, y], \n",
-    "                                             bins=1000, \n",
-    "                                             range=[0, 1000],\n",
-    "                                             nCells=memoryCells, \n",
-    "                                             cores=cpuCores,\n",
-    "                                         blockSize=blockSize)\n",
-    "\n",
-    "histCalDoublesLH_cticor = xcal.HistogramCalculator([x//2, y], \n",
-    "                                             bins=1000, \n",
-    "                                             range=[0, 1000],\n",
-    "                                             nCells=memoryCells, \n",
-    "                                             cores=cpuCores,\n",
-    "                                         blockSize=blockSize)\n",
-    "\n",
-    "histCalDoublesUH_cticor = xcal.HistogramCalculator([x//2, y], \n",
-    "                                             bins=1000, \n",
-    "                                             range=[0, 1000],\n",
-    "                                             nCells=memoryCells, \n",
-    "                                             cores=cpuCores,\n",
-    "                                         blockSize=blockSize)\n",
-    "\n",
-    "histCalTriplesLH_cticor = xcal.HistogramCalculator([x//2, y], \n",
-    "                                             bins=1000, \n",
-    "                                             range=[0, 1000],\n",
-    "                                             nCells=memoryCells, \n",
-    "                                             cores=cpuCores)\n",
-    "histCalTriplesUH_cticor = xcal.HistogramCalculator([x//2, y], \n",
-    "                                             bins=1000, \n",
-    "                                             range=[0, 1000],\n",
-    "                                             nCells=memoryCells, \n",
-    "                                             cores=cpuCores)\n",
-    "\n",
-    "histCalQuadsLH_cticor = xcal.HistogramCalculator([x//2, y], \n",
-    "                                             bins=1000, \n",
-    "                                             range=[0, 1000],\n",
-    "                                             nCells=memoryCells, \n",
-    "                                             cores=cpuCores)\n",
-    "histCalQuadsUH_cticor = xcal.HistogramCalculator([x//2, y], \n",
-    "                                             bins=1000, \n",
-    "                                             range=[0, 1000],\n",
-    "                                             nCells=memoryCells, \n",
-    "                                             cores=cpuCores)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "for i,data in enumerate(reader.readChunks()):\n",
-    "        \n",
-    "    data = offsetCorrection.correct(data.astype(np.float32)) #correct for the offset                \n",
-    "    data = cmCorrection.correct(data) #correct for the row common mode        \n",
-    "    \n",
-    "    dataLH = data[:x//2, :, :]\n",
-    "    dataUH = data[x//2:, :, :]\n",
-    "\n",
-    "    #*************CTI/Rel.gain Correction*************#\n",
-    "    \n",
-    "    dataLH = ctiCorrectionLH.correct(dataLH)\n",
-    "    dataLH = relGainCorrLH.correct(dataLH)\n",
-    "        \n",
-    "    dataUH = ctiCorrectionUH.correct(dataUH)\n",
-    "    dataUH = relGainCorrUH.correct(dataUH)\n",
-    "\n",
-    "    #*************Pattern Classification*************#\n",
-    "    dataLH, patternsLH = patternClassifierLH.classify(dataLH)\n",
-    "    dataUH, patternsUH = patternClassifierUH.classify(dataUH)\n",
-    "        \n",
-    "    histCalCTICorLH.fill(dataLH)\n",
-    "    histCalCTICorUH.fill(dataUH)\n",
-    "\n",
-    "    #*************Pattern Selection*************#\n",
-    "\n",
-    "    singlesLH, d0LH, d1LH, d2LH, d3LH, t0LH, t1LH, t2LH, t3LH, q0LH, q1LH, q2LH, q3LH = patternSelectorLH.select(dataLH, patternsLH)\n",
-    "        \n",
-    "    histCalSinglesLH_cticor.fill(singlesLH)\n",
-    "        \n",
-    "    histCalDoublesLH_cticor.fill(d0LH)\n",
-    "    histCalDoublesLH_cticor.fill(d1LH)\n",
-    "    histCalDoublesLH_cticor.fill(d2LH)\n",
-    "    histCalDoublesLH_cticor.fill(d3LH)\n",
-    "    \n",
-    "    histCalTriplesLH_cticor.fill(t0LH)\n",
-    "    histCalTriplesLH_cticor.fill(t1LH)\n",
-    "    histCalTriplesLH_cticor.fill(t2LH)\n",
-    "    histCalTriplesLH_cticor.fill(t3LH)\n",
-    "    \n",
-    "    histCalQuadsLH_cticor.fill(q0LH)\n",
-    "    histCalQuadsLH_cticor.fill(q1LH)\n",
-    "    histCalQuadsLH_cticor.fill(q2LH)\n",
-    "    histCalQuadsLH_cticor.fill(q3LH)\n",
-    "        \n",
-    "    singlesUH, d0UH, d1UH, d2UH, d3UH, t0UH, t1UH, t2UH, t3UH, q0UH, q1UH, q2UH, q3UH = patternSelectorUH.select(dataUH, patternsUH)\n",
-    "        \n",
-    "    histCalSinglesUH_cticor.fill(singlesUH)\n",
-    "    \n",
-    "    histCalDoublesUH_cticor.fill(d0UH)\n",
-    "    histCalDoublesUH_cticor.fill(d1UH)\n",
-    "    histCalDoublesUH_cticor.fill(d2UH)\n",
-    "    histCalDoublesUH_cticor.fill(d3UH)\n",
-    "    \n",
-    "    histCalTriplesUH_cticor.fill(t0UH)\n",
-    "    histCalTriplesUH_cticor.fill(t1UH)\n",
-    "    histCalTriplesUH_cticor.fill(t2UH)\n",
-    "    histCalTriplesUH_cticor.fill(t3UH)\n",
-    "    \n",
-    "    histCalQuadsUH_cticor.fill(q0UH)\n",
-    "    histCalQuadsUH_cticor.fill(q1UH)\n",
-    "    histCalQuadsUH_cticor.fill(q2UH)\n",
-    "    histCalQuadsUH_cticor.fill(q3UH)  \n",
-    "\n",
-    "    print(\"Processed {} images.\".format((i+1)*chunkSize))"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Plotting the Results"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "h,e,c,s = histCalCMCor.get()\n",
-    "h0L,e0L,c0L,s0L = histCalPatternCorLH.get()\n",
-    "h0U,e0U,c0U,s0U = histCalPatternCorUH.get()\n",
-    "h0 = h0L + h0U\n",
-    "h1L,e1L,c1L,s1L = histCalCTICorLH.get()\n",
-    "h1U,e1U,c1U,s1U = histCalCTICorUH.get()\n",
-    "h1 = h1L + h1U\n",
-    "hsL,esL,csL,ssL = histCalSinglesLH_cticor.get ()\n",
-    "hsU,esU,csU,ssU = histCalSinglesUH_cticor.get ()\n",
-    "hs = hsL + hsU\n",
-    "\n",
-    "\n",
-    "d = [{'x': c,\n",
-    "      'y': h,\n",
-    "      'y_err': np.sqrt(h[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'ecolor': 'navy',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'CM corr.'},\n",
-    "     {'x': c,\n",
-    "      'y': h0,\n",
-    "      'y_err': np.sqrt(h0[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'ecolor': 'green',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'Charge sharing corr.'},\n",
-    "     {'x': c,\n",
-    "      'y': h1,\n",
-    "      'y_err': np.sqrt(h1[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'ecolor': 'red',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'Rel. gain/CTI corr.'},\n",
-    "     {'x': c,\n",
-    "      'y': hs,\n",
-    "      'y_err': np.sqrt(hs[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'ecolor': 'red',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'Single events'},\n",
-    "     ]\n",
-    "     \n",
-    "\n",
-    "fig = xana.simplePlot(d, aspect=1, x_label='Energy(ADU)', \n",
-    "                      y_label='Number of occurrences', figsize='1col',\n",
-    "                      y_log=True, x_range=(0,400),\n",
-    "                      legend='top-center-frame-1col')"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false,
-    "scrolled": false
-   },
-   "outputs": [],
-   "source": [
-    "h1,e1,c1,s1 = histCalSinglesLH_cticor.get()\n",
-    "h,e,c,s = histCalCTICorLH.get()\n",
-    "h2,e2,c2,s2 = histCalDoublesLH_cticor.get ()\n",
-    "h3,e3,c3,s3 = histCalTriplesLH_cticor.get ()\n",
-    "h4,e4,c4,s4 = histCalQuadsLH_cticor.get ()\n",
-    "\n",
-    "d = [{'x': c,\n",
-    "      'y': h,\n",
-    "      'y_err': np.sqrt(h[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'ecolor': 'navy',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'all events'},\n",
-    "     {'x': c1,\n",
-    "      'y': h1,\n",
-    "      'y_err': np.sqrt(h1[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'singles'},\n",
-    "     {'x': c2,\n",
-    "      'y': h2,\n",
-    "      'y_err': np.sqrt(h2[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'doubles'},\n",
-    "     {'x': c3,\n",
-    "      'y': h3,\n",
-    "      'y_err': np.sqrt(h3[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'triples'},\n",
-    "     {'x': c4,\n",
-    "      'y': h4,\n",
-    "      'y_err': np.sqrt(h4[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'quads'},\n",
-    "     \n",
-    "    ]\n",
-    " \n",
-    "h=np.ma.getdata(h)\n",
-    "np.savez(out_folder+'LH_all_events_cticor.npz', c,h)\n",
-    "\n",
-    "h1=np.ma.getdata(h1)\n",
-    "np.savez(out_folder+'LH_singles_cticor.npz', c1,h1)\n",
-    "\n",
-    "h2=np.ma.getdata(h2)\n",
-    "np.savez(out_folder+'LH_doubles_cticor.npz', c2,h2) \n",
-    "\n",
-    "h3=np.ma.getdata(h3)\n",
-    "np.savez(out_folder+'LH_triples_cticor.npz', c3,h3)\n",
-    "\n",
-    "h4=np.ma.getdata(h4)\n",
-    "np.savez(out_folder+'LH_quads_cticor.npz', c4,h4) \n",
-    "    \n",
-    "\n",
-    "fig = xana.simplePlot(d, aspect=1, x_label='Energy (ADU) - LH', \n",
-    "                      y_label='Number of occurrences', figsize='1col',\n",
-    "                      y_log=True, x_range=(0,400),\n",
-    "                      legend='top-right-frame-2col')\n",
-    "\n",
-    "h1,e1,c1,s1 = histCalSinglesUH_cticor.get()\n",
-    "h,e,c,s = histCalCTICorUH.get()\n",
-    "h2,e2,c2,s2 = histCalDoublesUH_cticor.get()\n",
-    "h3,e3,c3,s3 = histCalTriplesLH_cticor.get ()\n",
-    "h4,e4,c4,s4 = histCalQuadsLH_cticor.get ()\n",
-    "\n",
-    "d = [{'x': c,\n",
-    "      'y': h,\n",
-    "      'y_err': np.sqrt(h[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'ecolor': 'navy',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'all events'},\n",
-    "     {'x': c1,\n",
-    "      'y': h1,\n",
-    "      'y_err': np.sqrt(h1[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'singles'},\n",
-    "     {'x': c2,\n",
-    "      'y': h2,\n",
-    "      'y_err': np.sqrt(h2[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'doubles'},\n",
-    "     {'x': c3,\n",
-    "      'y': h3,\n",
-    "      'y_err': np.sqrt(h3[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'triples'},\n",
-    "     {'x': c4,\n",
-    "      'y': h4,\n",
-    "      'y_err': np.sqrt(h4[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'quads'},\n",
-    "    ]\n",
-    "     \n",
-    "\n",
-    "fig = xana.simplePlot(d, aspect=1, x_label='Energy (ADU) - UH', \n",
-    "                      y_label='Number of occurrences', figsize='1col',\n",
-    "                      y_log=True, x_range=(0,400),\n",
-    "                      legend='top-right-frame-2col')\n",
-    "\n",
-    "h=np.ma.getdata(h)\n",
-    "np.savez(out_folder+'UH_all_events_cticor.npz', c,h)\n",
-    "\n",
-    "h1=np.ma.getdata(h1)\n",
-    "np.savez(out_folder+'UH_singles_cticor.npz', c1,h1)\n",
-    "\n",
-    "h2=np.ma.getdata(h2)\n",
-    "np.savez(out_folder+'UH_doubles_cticor.npz', c2,h2) \n",
-    "\n",
-    "h3=np.ma.getdata(h3)\n",
-    "np.savez(out_folder+'UH_triples_cticor.npz', c3,h3)\n",
-    "\n",
-    "h4=np.ma.getdata(h4)\n",
-    "np.savez(out_folder+'UH_quads_cticor.npz', c4,h4) "
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "h,e,c,s = histCalSinglesLH.get()\n",
-    "h1,e1,c1,s1 = histCalSinglesLH_cticor.get()\n",
-    "h2,e2,c2,s2 = histCalSinglesUH.get()\n",
-    "h3,e3,c3,s3 = histCalSinglesUH_cticor.get()\n",
-    "\n",
-    "d = [{'x': c,\n",
-    "      'y': h,\n",
-    "      'y_err': np.sqrt(h[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'ecolor': 'navy',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'Signels LH'},\n",
-    "     {'x': c1,\n",
-    "      'y': h1,\n",
-    "      'y_err': np.sqrt(h1[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'Singles LH corr.'},\n",
-    "     {'x': c2,\n",
-    "      'y': h2,\n",
-    "      'y_err': np.sqrt(h2[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'Singles UH'},\n",
-    "     {'x': c3,\n",
-    "      'y': h3,\n",
-    "      'y_err': np.sqrt(h3[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'errorstyle': 'bars',\n",
-    "      'errorcoarsing': 2,\n",
-    "      'label': 'Singles UH corr.'},\n",
-    "     ]\n",
-    "\n",
-    "fig = xana.simplePlot(d, aspect=1, x_label='Energy (ADU)', \n",
-    "                      y_label='Number of occurrences', figsize='1col',\n",
-    "                      y_log=True, x_range=(0,400),\n",
-    "                      legend='top-right-frame-2col')"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "patternStatsLH = patternClassifierLH.getPatternStats()\n",
-    "patternStatsUH = patternClassifierUH.getPatternStats()\n",
-    "\n",
-    "\n",
-    "t0 = PrettyTable()\n",
-    "t0.title = \"Total number of counts after all corrections\"\n",
-    "t0.field_names = [\"Hemisphere\",\"Singles\", \"First-singles\", \"Clusters\"]\n",
-    "t0.add_row([\"LH\", patternStatsLH['singles'], patternStatsLH['first singles'], patternStatsLH['clusters']])\n",
-    "t0.add_row([\"UH\", patternStatsUH['singles'], patternStatsUH['first singles'], patternStatsUH['clusters']])\n",
-    "\n",
-    "print(t0)\n",
-    "\n",
-    "t1 = PrettyTable()\n",
-    "\n",
-    "t1.field_names = [\"Index\",\"D-LH\", \"D-UH\", \"T-LH\", \"T-UH\", \"Q-LH\", \"Q-UH\"]\n",
-    "\n",
-    "t1.add_row([0, patternStatsLH['doubles'][0], patternStatsUH['doubles'][0], patternStatsLH['triples'][0], patternStatsUH['triples'][0], patternStatsLH['quads'][0], patternStatsUH['quads'][0]])\n",
-    "t1.add_row([1, patternStatsLH['doubles'][1], patternStatsUH['doubles'][1], patternStatsLH['triples'][1], patternStatsUH['triples'][1], patternStatsLH['quads'][1], patternStatsUH['quads'][1]])\n",
-    "t1.add_row([2, patternStatsLH['doubles'][2], patternStatsUH['doubles'][2], patternStatsLH['triples'][2], patternStatsUH['triples'][2], patternStatsLH['quads'][2], patternStatsUH['quads'][2]])\n",
-    "t1.add_row([3, patternStatsLH['doubles'][3], patternStatsUH['doubles'][3], patternStatsLH['triples'][3], patternStatsUH['triples'][3], patternStatsLH['quads'][3], patternStatsUH['quads'][3]])\n",
-    "\n",
-    "print(t1)"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.6.6"
-  },
-  "latex_envs": {
-   "LaTeX_envs_menu_present": true,
-   "autocomplete": true,
-   "bibliofile": "biblio.bib",
-   "cite_by": "apalike",
-   "current_citInitial": 1,
-   "eqLabelWithNumbers": true,
-   "eqNumInitial": 1,
-   "hotkeys": {
-    "equation": "Ctrl-E",
-    "itemize": "Ctrl-I"
-   },
-   "labels_anchors": false,
-   "latex_user_defs": false,
-   "report_style_numbering": false,
-   "user_envs_cfg": false
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 1
-}
diff --git a/notebooks/FastCCD/CorrectionNotebook_NewDAQ_FastCCD_NBC.ipynb b/notebooks/FastCCD/CorrectionNotebook_NewDAQ_FastCCD_NBC.ipynb
index 5452ee3f1..98194d39a 100644
--- a/notebooks/FastCCD/CorrectionNotebook_NewDAQ_FastCCD_NBC.ipynb
+++ b/notebooks/FastCCD/CorrectionNotebook_NewDAQ_FastCCD_NBC.ipynb
@@ -6,7 +6,7 @@
    "source": [
     "# FastCCD Data Correction ##\n",
     "\n",
-    "Author: I. Klačková, S. Hauf, Version 1.0\n",
+    "Authors: I. Klačková, S. Hauf, Version 1.0\n",
     "\n",
     "The following notebook provides correction of images acquired with the FastCCD."
    ]
@@ -23,33 +23,33 @@
    },
    "outputs": [],
    "source": [
-    "in_folder = \"/gpfs/exfel/exp/SCS/201930/p900074/raw/\"\n",
-    "out_folder = '/gpfs/exfel/data/scratch/xcal/test/' #output folder\n",
-    "path_template = 'RAW-R{:04d}-DA05-S{{:05d}}.h5'\n",
-    "run = 365\n",
-    "h5path = '/INSTRUMENT/SCS_CDIDET_FCCD2M/DAQ/FCCD:daqOutput/data/image'\n",
-    "h5path_t = '/CONTROL/SCS_CDIDET_FCCD2M/CTRL/LSLAN/inputA/crdg/value'\n",
+    "in_folder = \"/gpfs/exfel/exp/SCS/201930/p900074/raw/\" # input folder, required\n",
+    "out_folder = '/gpfs/exfel/data/scratch/xcal/test/' # output folder, required\n",
+    "path_template = 'RAW-R{:04d}-DA05-S{{:05d}}.h5' #  path template in hdf5 file\n",
+    "run = 339 # run number\n",
+    "h5path = '/INSTRUMENT/SCS_CDIDET_FCCD2M/DAQ/FCCD:daqOutput/data/image' # path in HDF5 file\n",
+    "h5path_t = '/CONTROL/SCS_CDIDET_FCCD2M/CTRL/LSLAN/inputA/crdg/value' # temperature path in HDF5 file\n",
     "h5path_cntrl = '/RUN/SCS_CDIDET_FCCD2M/DET/FCCD'  # path to control data\n",
     "cluster_profile = \"noDB\" #ipcluster profile to use\n",
     "cpuCores = 16 #Specifies the number of running cpu cores\n",
     "operation_mode = \"FF\" #or \"FF\". FS stands for frame-store and FF for full-frame opeartion\n",
     "split_evt_primary_threshold = 7. # primary threshold for split event classification in terms of n sigma noise\n",
-    "split_evt_secondary_threshold = 10. # secondary threshold for split event classification in terms of n sigma noise\n",
+    "split_evt_secondary_threshold = 4. # secondary threshold for split event classification in terms of n sigma noise\n",
     "split_evt_mip_threshold = 1000. # MIP threshold for event classification\n",
-    "cti_Axis_LH = 1 #CTI axis index for lower hemisphere\n",
-    "cti_Axis_UH = 0 #CTI axis indes for upper hemisphere\n",
     "cal_db_interface = \"tcp://max-exfl016:8015#8025\" # calibration DB interface to use\n",
-    "cal_db_timeout = 30000000 # timeout on caldb requests\n",
+    "cal_db_timeout = 300000000 # timeout on caldb requests\n",
     "sequences = [-1] # sequences to correct, set to -1 for all, range allowed\n",
     "chunk_size_idim = 1 # H5 chunking size of output data\n",
-    "overwrite = True\n",
-    "gain_file = \"/gpfs/exfel/d/cal/caldb_store/xfel/cal/fastccd-type/fastccd1/cal.1541167659.9355488.h5\"\n",
-    "cti_file = \"/gpfs/exfel/d/cal/caldb_store/xfel/cal/fastccd-type/fastccd1/cal.1541167661.4031153.h5\"\n",
-    "do_pattern_classification = False # classify split events\n",
-    "sequences_per_node = 1\n",
-    "limit_images = 0\n",
+    "overwrite = True # overwrite existing files\n",
+    "do_pattern_classification = True # classify split events\n",
+    "sequences_per_node = 1 # sequences to correct per node\n",
+    "limit_images = 0 # limit images per file \n",
     "correct_offset_drift = False  # correct for offset drifts\n",
-    "use_dir_creation_date = True\n",
+    "use_dir_creation_date = True # use dir creation data for calDB queries\n",
+    "time_offset_days = 0 # offset in days for calibration parameters\n",
+    "photon_energy_gain_map = 2. # energy in keV\n",
+    "fix_temperature = 233. # fix temperature to this value, set to 0 to use slow control value\n",
+    "flipped_between = [\"2019-02-01\", \"2019-04-02\"] # detector was flipped during this timespan\n",
     "\n",
     "def balance_sequences(in_folder, run, sequences, sequences_per_node):\n",
     "    import glob\n",
@@ -80,7 +80,7 @@
      "end_time": "2018-12-06T15:54:23.455376Z",
      "start_time": "2018-12-06T15:54:23.413579Z"
     },
-    "collapsed": true
+    "collapsed": false
    },
    "outputs": [],
    "source": [
@@ -116,6 +116,8 @@
     "from iCalibrationDB.detectors import DetectorTypes\n",
     "from cal_tools.tools import get_dir_creation_date\n",
     "\n",
+    "from datetime import timedelta\n",
+    "\n",
     "%matplotlib inline\n",
     "\n",
     "if sequences[0] == -1:\n",
@@ -165,9 +167,10 @@
     "import datetime\n",
     "creation_time = None\n",
     "if use_dir_creation_date:\n",
-    "    creation_time = get_dir_creation_date(in_folder, run)\n",
+    "    creation_time = get_dir_creation_date(in_folder, run) + timedelta(days=time_offset_days)\n",
     "if creation_time:\n",
-    "    print(\"Using {} as creation time\".format(creation_time.isoformat()))    "
+    "    print(\"Using {} as creation time\".format(creation_time.isoformat()))\n",
+    "\n"
    ]
   },
   {
@@ -206,6 +209,9 @@
     "    print(\"Detector integration time is set to {}\".format(integration_time))\n",
     "    temperature = np.mean(f[h5path_t])\n",
     "    temperature_k = temperature + 273.15\n",
+    "    if fix_temperature != 0.:\n",
+    "        temperature_k = fix_temperature\n",
+    "        print(\"Using fixed temperature\")\n",
     "    print(\"Mean temperature was {:0.2f} °C / {:0.2f} K at beginning of run\".format(temperature, temperature_k))\n",
     "    \n",
     "\n",
@@ -302,156 +308,130 @@
     "offset_temperature = None\n",
     "badPixelMap = None\n",
     "noiseMap = None\n",
-    "for i in range(2):\n",
-    "    try:\n",
-    "        if i == 1:\n",
-    "            det_gain = 0\n",
-    "            temperature_k = 250\n",
-    "        ## offset\n",
-    "        metadata = ConstantMetaData()\n",
-    "        offset = Constants.CCD(DetectorTypes.fastCCD).Offset()\n",
-    "        metadata.calibration_constant = offset\n",
+    "for i, g in enumerate([8, 2, 1]):\n",
+    "    ## offset\n",
+    "    metadata = ConstantMetaData()\n",
+    "    offset = Constants.CCD(DetectorTypes.fastCCD).Offset()\n",
+    "    metadata.calibration_constant = offset\n",
     "\n",
-    "        # set the operating condition\n",
-    "        condition = Conditions.Dark.CCD(bias_voltage=bias_voltage,\n",
-    "                                        integration_time=integration_time,\n",
-    "                                        gain_setting=det_gain,\n",
-    "                                        temperature=temperature_k,\n",
-    "                                        pixels_x=1934,\n",
-    "                                        pixels_y=960)\n",
+    "    # set the operating condition\n",
+    "    condition = Conditions.Dark.CCD(bias_voltage=bias_voltage,\n",
+    "                                    integration_time=integration_time,\n",
+    "                                    gain_setting=g,\n",
+    "                                    temperature=temperature_k,\n",
+    "                                    pixels_x=1934,\n",
+    "                                    pixels_y=960)\n",
     "\n",
-    "        for parm in condition.parameters:\n",
-    "            if parm.name == \"Sensor Temperature\":\n",
-    "                parm.lower_deviation = 5\n",
-    "                parm.upper_deviation = 5\n",
+    "    for parm in condition.parameters:\n",
+    "        if parm.name == \"Sensor Temperature\":\n",
+    "            parm.lower_deviation = 5\n",
+    "            parm.upper_deviation = 5\n",
     "\n",
     "\n",
-    "        device = Detectors.fastCCD1\n",
+    "    device = Detectors.fastCCD1\n",
     "\n",
     "\n",
-    "        metadata.detector_condition = condition\n",
+    "    metadata.detector_condition = condition\n",
     "\n",
     "\n",
     "\n",
-    "        # specify the a version for this constant\n",
-    "        if creation_time is None:\n",
-    "            metadata.calibration_constant_version = Versions.Now(device=device)\n",
-    "            metadata.retrieve(cal_db_interface)\n",
-    "        else:\n",
-    "            metadata.calibration_constant_version = Versions.Timespan(device=device,\n",
-    "                                                                      start=creation_time)\n",
-    "            metadata.retrieve(cal_db_interface, when=creation_time.isoformat(), timeout=3000000)\n",
-    "        \n",
-    "        offsetMap = offset.data\n",
+    "    # specify the a version for this constant\n",
+    "    if creation_time is None:\n",
+    "        metadata.calibration_constant_version = Versions.Now(device=device)\n",
+    "        metadata.retrieve(cal_db_interface)\n",
+    "    else:\n",
+    "        metadata.calibration_constant_version = Versions.Timespan(device=device,\n",
+    "                                                                  start=creation_time)\n",
+    "        metadata.retrieve(cal_db_interface, when=creation_time.isoformat(), timeout=3000000)\n",
     "\n",
-    "        offset_temperature = None\n",
-    "        for parm in condition.parameters:\n",
     "\n",
-    "            if parm.name == \"Sensor Temperature\":\n",
-    "                offset_temperature = parm.value\n",
+    "    if offsetMap is None:\n",
+    "        offsetMap = np.zeros(list(offset.data.shape)+[3], np.float32)\n",
+    "    offsetMap[...,i] = offset.data\n",
     "\n",
-    "        print(\"Temperature dark images for offset calculation \" +\n",
-    "              \"were taken at: {:0.2f} K\".format(offset_temperature))\n",
+    "    offset_temperature = None\n",
+    "    for parm in condition.parameters:\n",
     "\n",
+    "        if parm.name == \"Sensor Temperature\":\n",
+    "            offset_temperature = parm.value\n",
     "\n",
-    "        ## noise\n",
-    "        metadata = ConstantMetaData()\n",
-    "        noise = Constants.CCD(DetectorTypes.fastCCD).Noise()\n",
-    "        metadata.calibration_constant = noise\n",
+    "    print(\"Temperature dark images for offset calculation \" +\n",
+    "              \"were taken at: {:0.2f} K @ {}\".format(offset_temperature,\n",
+    "                                                     metadata.calibration_constant_version.begin_at))\n",
     "\n",
-    "        # set the operating condition\n",
-    "        condition = Conditions.Dark.CCD(bias_voltage=bias_voltage,\n",
-    "                                        integration_time=integration_time,\n",
-    "                                        gain_setting=det_gain,\n",
-    "                                        temperature=temperature_k,\n",
-    "                                        pixels_x=1934,\n",
-    "                                        pixels_y=960)\n",
+    "    ## noise\n",
+    "    metadata = ConstantMetaData()\n",
+    "    noise = Constants.CCD(DetectorTypes.fastCCD).Noise()\n",
+    "    metadata.calibration_constant = noise\n",
     "\n",
+    "    # set the operating condition\n",
+    "    condition = Conditions.Dark.CCD(bias_voltage=bias_voltage,\n",
+    "                                    integration_time=integration_time,\n",
+    "                                    gain_setting=g,\n",
+    "                                    temperature=temperature_k,\n",
+    "                                    pixels_x=1934,\n",
+    "                                    pixels_y=960)\n",
     "\n",
-    "        for parm in condition.parameters:\n",
-    "            if parm.name == \"Sensor Temperature\":\n",
-    "                parm.lower_deviation = 5\n",
-    "                parm.upper_deviation = 5\n",
     "\n",
+    "    for parm in condition.parameters:\n",
+    "        if parm.name == \"Sensor Temperature\":\n",
+    "            parm.lower_deviation = 5\n",
+    "            parm.upper_deviation = 5\n",
     "\n",
-    "        device = Detectors.fastCCD1\n",
     "\n",
+    "    device = Detectors.fastCCD1\n",
     "\n",
-    "        metadata.detector_condition = condition\n",
     "\n",
-    "        # specify the a version for this constant\n",
-    "        if creation_time is None:\n",
-    "            metadata.calibration_constant_version = Versions.Now(device=device)\n",
-    "            metadata.retrieve(cal_db_interface)\n",
-    "        else:\n",
-    "            metadata.calibration_constant_version = Versions.Timespan(device=device,\n",
-    "                                                                      start=creation_time)\n",
-    "            metadata.retrieve(cal_db_interface, when=creation_time.isoformat(), timeout=3000000)\n",
-    "        \n",
-    "        noiseMap = noise.data\n",
+    "    metadata.detector_condition = condition\n",
     "\n",
-    "        ## bad pixels \n",
+    "    # specify the a version for this constant\n",
+    "    if creation_time is None:\n",
+    "        metadata.calibration_constant_version = Versions.Now(device=device)\n",
+    "        metadata.retrieve(cal_db_interface)\n",
+    "    else:\n",
+    "        metadata.calibration_constant_version = Versions.Timespan(device=device,\n",
+    "                                                                  start=creation_time)\n",
+    "        metadata.retrieve(cal_db_interface, when=creation_time.isoformat(), timeout=3000000)\n",
     "\n",
-    "        metadata = ConstantMetaData()\n",
-    "        bpix = Constants.CCD(DetectorTypes.fastCCD).BadPixelsDark()\n",
-    "        metadata.calibration_constant = bpix\n",
+    "    if noiseMap is None:\n",
+    "        noiseMap = np.zeros(list(noise.data.shape)+[3], np.float32)\n",
+    "    noiseMap[...,i] = noise.data\n",
     "\n",
-    "        # set the operating condition\n",
-    "        condition = Conditions.Dark.CCD(bias_voltage=bias_voltage,\n",
-    "                                        integration_time=integration_time,\n",
-    "                                        gain_setting=det_gain,\n",
-    "                                        temperature=temperature_k,\n",
-    "                                        pixels_x=1934,\n",
-    "                                        pixels_y=960)\n",
     "\n",
-    "        for parm in condition.parameters:\n",
-    "            if parm.name == \"Sensor Temperature\":\n",
-    "                parm.lower_deviation = 5\n",
-    "                parm.upper_deviation = 5\n",
+    "    ## bad pixels \n",
     "\n",
+    "    metadata = ConstantMetaData()\n",
+    "    bpix = Constants.CCD(DetectorTypes.fastCCD).BadPixelsDark()\n",
+    "    metadata.calibration_constant = bpix\n",
     "\n",
-    "        device = Detectors.fastCCD1\n",
+    "    # set the operating condition\n",
+    "    condition = Conditions.Dark.CCD(bias_voltage=bias_voltage,\n",
+    "                                    integration_time=integration_time,\n",
+    "                                    gain_setting=g,\n",
+    "                                    temperature=temperature_k,\n",
+    "                                    pixels_x=1934,\n",
+    "                                    pixels_y=960)\n",
     "\n",
+    "    for parm in condition.parameters:\n",
+    "        if parm.name == \"Sensor Temperature\":\n",
+    "            parm.lower_deviation = 5\n",
+    "            parm.upper_deviation = 5\n",
     "\n",
-    "        metadata.detector_condition = condition\n",
     "\n",
-    "        # specify the a version for this constant\n",
-    "        metadata.calibration_constant_version = Versions.Now(device=device)\n",
-    "        #metadata.retrieve(cal_db_interface, timeout=cal_db_timeout)\n",
+    "    device = Detectors.fastCCD1\n",
     "\n",
-    "        badPixelMap = np.zeros_like(offsetMap).astype(np.uint32) #bpix.data\n",
-    "        break\n",
-    "    except Exception as e:\n",
-    "        print(e)\n",
-    "        pass\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-12-06T15:54:28.269179Z",
-     "start_time": "2018-12-06T15:54:28.256945Z"
-    },
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "td1 = temperature_k - 247.82\n",
-    "td2 = offset_temperature - 247.82\n",
-    "def fit_fun(x, m, b):\n",
-    "    return m*(x-b)**2\n",
-    "\n",
-    "c1 = fit_fun(offset_temperature, *offset_correction_args)\n",
-    "c2 = fit_fun(temperature_k, *offset_correction_args)\n",
-    "do = c2-c1\n",
-    "if td1 < -5 or td2 < -5:\n",
-    "    print(\"Temperature if 5 degrees colder than previously measured, do another offset drift analysis!\")\n",
-    "    print(\"Setting drift correction to 0\")\n",
-    "    do = 0\n",
-    "else:\n",
-    "    print(\"Correcting offset by c={:0.2f}\".format(do))"
+    "\n",
+    "    metadata.detector_condition = condition\n",
+    "\n",
+    "    # specify the a version for this constant\n",
+    "    metadata.calibration_constant_version = Versions.Now(device=device)\n",
+    "    metadata.retrieve(cal_db_interface, timeout=cal_db_timeout)\n",
+    "\n",
+    "    #badPixelMap = np.zeros_like(offsetMap).astype(np.uint32) #bpix.data\n",
+    "    if badPixelMap is None:\n",
+    "        badPixelMap = np.zeros(list(bpix.data.shape)+[3], np.uint32)\n",
+    "    badPixelMap[...,i] = bpix.data\n",
+    "        \n"
    ]
   },
   {
@@ -474,59 +454,101 @@
    "outputs": [],
    "source": [
     "## relative gain\n",
-    "if gain_file == \"\":\n",
-    "    metadata = ConstantMetaData()\n",
-    "    relgain = Constants.CCD(DetectorTypes.fastCCD).RelativeGain()\n",
-    "    metadata.calibration_constant = relgain\n",
     "\n",
-    "    # set the operating condition\n",
-    "    condition = Conditions.Illuminated.CCD(bias_voltage=bias_voltage,\n",
-    "                                           photon_energy=5.9,\n",
-    "                                           integration_time=integration_time,\n",
-    "                                           gain_setting=det_gain)\n",
-    "    device = Detectors.fastCCD1\n",
+    "metadata = ConstantMetaData()\n",
+    "relgain = Constants.CCD(DetectorTypes.fastCCD).RelativeGain()\n",
+    "metadata.calibration_constant = relgain\n",
     "\n",
+    "# set the operating condition\n",
+    "condition = Conditions.Illuminated.CCD(bias_voltage=bias_voltage,\n",
+    "                                       integration_time=integration_time,\n",
+    "                                       gain_setting=0,\n",
+    "                                       temperature=temperature_k,\n",
+    "                                       pixels_x=1934,\n",
+    "                                       pixels_y=960, photon_energy=photon_energy_gain_map)\n",
+    "device = Detectors.fastCCD1\n",
     "\n",
-    "    metadata.detector_condition = condition\n",
     "\n",
-    "    # specify the a version for this constant\n",
-    "    metadata.calibration_constant_version = Versions.Now(device=device)\n",
-    "    metadata.retrieve(cal_db_interface)\n",
+    "metadata.detector_condition = condition\n",
     "\n",
-    "    relGainLH = np.squeeze(relgain.data[0,...])\n",
-    "    relGainUH = np.squeeze(relgain.data[-1,...])\n",
-    "else:\n",
-    "    with h5py.File(gain_file, \"r\") as f:\n",
-    "        relGainLH = np.squeeze(f[\"FastCCD1/RelativeGainCCD/0/data\"][0,...])\n",
-    "        relGainUH = np.squeeze(f[\"FastCCD1/RelativeGainCCD/0/data\"][-1,...])\n",
-    "        \n",
-    "if cti_file == \"\":\n",
-    "    ## CTI gain\n",
+    "# specify the a version for this constant\n",
+    "metadata.calibration_constant_version = Versions.Now(device=device)\n",
+    "metadata.retrieve(cal_db_interface)\n",
     "\n",
-    "    metadata = ConstantMetaData()\n",
-    "    cti = Constants.CCD(DetectorTypes.fastCCD).CTI()\n",
-    "    metadata.calibration_constant = cti\n",
-    "\n",
-    "    # set the operating condition\n",
-    "    condition = Conditions.Illuminated.CCD(bias_voltage=bias_voltage,\n",
-    "                                           photon_energy=5.9,\n",
-    "                                           integration_time=integration_time,\n",
-    "                                           gain_setting=det_gain)\n",
-    "    device = Detectors.fastCCD1\n",
-    "\n",
-    "\n",
-    "    metadata.detector_condition = condition\n",
-    "\n",
-    "    # specify the a version for this constant\n",
-    "    metadata.calibration_constant_version = Versions.Now(device=device)\n",
-    "    metadata.retrieve(cal_db_interface)\n",
-    "\n",
-    "    ctiLH = np.squeeze(cti.data[0,...])\n",
-    "    ctiUH = np.squeeze(cti.data[-1,...])\n",
-    "else:\n",
-    "    with h5py.File(cti_file, \"r\") as f:\n",
-    "        ctiLH = np.squeeze(f[\"FastCCD1/CTICCD/0/data\"][0,...])\n",
-    "        ctiUH = np.squeeze(f[\"FastCCD1/CTICCD/0/data\"][-1,...])"
+    "relGain = relgain.data[::-1,...]\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "relGainCA = copy.copy(relGain)\n",
+    "relGainC = relGainCA[:relGainCA.shape[0]//2,...]\n",
+    "ctiA = np.ones(relGainCA.shape[:2])\n",
+    "cti = np.ones(relGainC.shape[:2])\n",
+    "i = 0\n",
+    "idx = (relGainC[i, :, 0] < 0.9) | (relGainC[i,:,0] > 1.1)\n",
+    "mn1 = np.nanmean(relGainC[i, ~idx, 0])\n",
+    "\n",
+    "for i in range(1, relGainC.shape[0]):\n",
+    "    idx = (relGainC[i, :, 0] < 0.9) | (relGainC[i,:,0] > 1.1)\n",
+    "    mn2 = np.nanmean(relGainC[i, ~idx, 0])\n",
+    "    cti[i,:] = mn2/mn1\n",
+    "ctiA[:relGainCA.shape[0]//2,...] = cti\n",
+    "\n",
+    "relGainC = relGainCA[relGainCA.shape[0]//2:,...]\n",
+    "\n",
+    "\n",
+    "cti = np.ones(relGainC.shape[:2])\n",
+    "i = -1\n",
+    "idx = (relGainC[i, :, 0] < 0.9) | (relGainC[i,:,0] > 1.1)\n",
+    "mn1 = np.nanmean(relGainC[i, ~idx, 0])\n",
+    "\n",
+    "for i in range(relGainC.shape[0]-1, 1, -1):\n",
+    "    idx = (relGainC[i, :, 0] < 0.9) | (relGainC[i,:,0] > 1.1)\n",
+    "    mn2 = np.nanmean(relGainC[i, ~idx, 0])\n",
+    "    cti[i,:] = mn2/mn1\n",
+    "\n",
+    "ctiA[relGainCA.shape[0]//2:,...] = cti\n",
+    "\n",
+    "relGainCA = copy.copy(relGain)\n",
+    "relGainC = relGainCA[:relGainCA.shape[0]//2,...]\n",
+    "for i in range(relGainC.shape[1]):\n",
+    "    idx = (relGainC[:,i, 0] < 0.95) | (relGainC[:,i,0] > 1.05)\n",
+    "    relGainC[idx,i,0] = np.nanmean(relGainC[~idx,i,0])\n",
+    "    relGainC[idx,i,1] = np.nanmean(relGainC[~idx,i,1])\n",
+    "    relGainC[idx,i,2] = np.nanmean(relGainC[~idx,i,2])\n",
+    "relGainCA[:relGainCA.shape[0]//2,...] = relGainC\n",
+    "relGainC = relGainCA[relGainCA.shape[0]//2:,...]\n",
+    "for i in range(relGainC.shape[1]):\n",
+    "    idx = (relGainC[:,i, 0] < 0.95) | (relGainC[:,i,0] > 1.05)\n",
+    "    relGainC[idx,i,0] = np.nanmean(relGainC[~idx,i,0])\n",
+    "    relGainC[idx,i,1] = np.nanmean(relGainC[~idx,i,1])\n",
+    "    relGainC[idx,i,2] = np.nanmean(relGainC[~idx,i,2])\n",
+    "relGainCA[relGainCA.shape[0]//2:,...] = relGainC\n",
+    "relGainC = relGainCA*ctiA[...,None]\n",
+    "\n",
+    "relGain = relGainC"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "import dateutil.parser\n",
+    "flipped_between = [dateutil.parser.parse(d) for d in flipped_between]\n",
+    "flip_rgain = creation_time >= flipped_between[0] and creation_time <= flipped_between[1]\n",
+    "flip_rgain &= (metadata.calibration_constant_version.begin_at.replace(tzinfo=None) >= flipped_between[0] \n",
+    "               and metadata.calibration_constant_version.begin_at.replace(tzinfo=None) <= flipped_between[1])\n",
+    "print(\"Accounting for flipped detector: {}\".format(flip_rgain))\n"
    ]
   },
   {
@@ -543,18 +565,13 @@
    "source": [
     "#************************Calculators************************#\n",
     "\n",
-    "offsetCorrection = xcal.OffsetCorrection([x, y], \n",
-    "                                          offsetMap, \n",
-    "                                          nCells = memoryCells, \n",
-    "                                          cores=cpuCores, gains=None,\n",
-    "                                          blockSize=blockSize)\n",
     "\n",
     "cmCorrection = xcal.CommonModeCorrection([x, y], \n",
     "                                         commonModeBlockSize, \n",
     "                                         commonModeAxisR,\n",
     "                                         nCells = memoryCells, \n",
     "                                         noiseMap = noiseMap,\n",
-    "                                        runParallel=run_parallel,\n",
+    "                                        runParallel=True,\n",
     "                                        stats=True)\n",
     "\n",
     "patternClassifierLH = xcal.PatternClassifier([x//2, y], \n",
@@ -566,8 +583,8 @@
     "                                              nCells=memoryCells, \n",
     "                                              cores=cpuCores, \n",
     "                                              allowElongated = False,\n",
-    "                                          blockSize=blockSize,\n",
-    "                                            runParallel=run_parallel)\n",
+    "                                              blockSize=[x//2, y],\n",
+    "                                              runParallel=True)\n",
     "\n",
     "\n",
     "\n",
@@ -580,54 +597,12 @@
     "                                              nCells=memoryCells, \n",
     "                                              cores=cpuCores, \n",
     "                                              allowElongated = False,\n",
-    "                                          blockSize=blockSize,\n",
-    "                                            runParallel=run_parallel)\n",
-    "\n",
+    "                                              blockSize=[x//2, y],\n",
+    "                                              runParallel=True)\n",
     "\n",
-    "\n",
-    "ctiCorrectionLH = xcal.CTICorrection([x//2, y], \n",
-    "                                   ctiLH, axis = cti_Axis_LH, cores=cpuCores,\n",
-    "                                   nCells = memoryCells, gains=None,\n",
-    "                                  blockSize=blockSize, runParallel=run_parallel) \n",
-    "                                   \n",
-    "relGainCorrLH = xcal.RelativeGainCorrection([x//2, y], \n",
-    "                                          relGainLH, axis = cti_Axis_LH, \n",
-    "                                          cores=cpuCores,\n",
-    "                                          nCells = memoryCells, gains=None,\n",
-    "                                         blockSize=blockSize, runParallel=run_parallel)\n",
-    "\n",
-    "ctiCorrectionUH = xcal.CTICorrection([x//2, y], \n",
-    "                                   ctiUH, axis = cti_Axis_UH, cores=cpuCores,\n",
-    "                                   nCells = memoryCells, gains=None,\n",
-    "                                  blockSize=blockSize, runParallel=run_parallel) \n",
     "                                   \n",
-    "relGainCorrUH = xcal.RelativeGainCorrection([x//2, y], \n",
-    "                                          relGainUH, axis = cti_Axis_UH, \n",
-    "                                          cores=cpuCores,\n",
-    "                                          nCells = memoryCells, gains=None,\n",
-    "                                         blockSize=blockSize, runParallel=run_parallel)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-12-06T16:08:51.637538Z",
-     "start_time": "2018-12-06T16:08:51.626971Z"
-    },
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "#*********Setting bad pixel mask***************#\n",
-    "\n",
-    "patternClassifierLH.setBadPixelMask(badPixelMap[:x//2, :])\n",
-    "patternClassifierUH.setBadPixelMask(badPixelMap[x//2:, :])\n",
-    "ctiCorrectionLH.setBadPixelMask(badPixelMap[:x//2, :])\n",
-    "ctiCorrectionUH.setBadPixelMask(badPixelMap[x//2:, :])\n",
-    "relGainCorrLH.setBadPixelMask(badPixelMap[:x//2, :])\n",
-    "relGainCorrUH.setBadPixelMask(badPixelMap[x//2:, :])"
+    "\n",
+    "\n"
    ]
   },
   {
@@ -645,40 +620,25 @@
     "#*****************Histogram Calculators******************#\n",
     "\n",
     "histCalOffsetCor = xcal.HistogramCalculator([x, y], \n",
-    "                                             bins=1050, \n",
+    "                                             bins=500, \n",
     "                                             range=[-50, 1000],\n",
     "                                             nCells=memoryCells, \n",
     "                                             cores=cpuCores,\n",
     "                                             blockSize=blockSize)\n",
     "\n",
-    "histCalCTICor = xcal.HistogramCalculator([x, y], \n",
-    "                                         bins=1050, \n",
+    "histCalPcorr = xcal.HistogramCalculator([x, y], \n",
+    "                                         bins=500, \n",
     "                                         range=[-50, 1000],\n",
     "                                         nCells=memoryCells, \n",
     "                                         cores=cpuCores,\n",
     "                                         blockSize=blockSize)\n",
     "\n",
-    "\n",
-    "histCalCTICorWide = xcal.HistogramCalculator([x, y], \n",
-    "                                         bins=2000, \n",
-    "                                         range=[0, 20000],\n",
-    "                                         nCells=memoryCells, \n",
-    "                                         cores=cpuCores,\n",
-    "                                         blockSize=blockSize)\n",
-    "\n",
-    "histCalCTICorWideMed = xcal.HistogramCalculator([x, y], \n",
+    "histCalPcorrS = xcal.HistogramCalculator([x, y], \n",
     "                                         bins=500, \n",
-    "                                         range=[0, 20000],\n",
-    "                                         nCells=memoryCells, \n",
-    "                                         cores=cpuCores,\n",
-    "                                         blockSize=blockSize)\n",
-    "\n",
-    "histCalCTICorWideLow = xcal.HistogramCalculator([x, y], \n",
-    "                                         bins=100, \n",
-    "                                         range=[0, 20000],\n",
+    "                                         range=[-50, 1000],\n",
     "                                         nCells=memoryCells, \n",
     "                                         cores=cpuCores,\n",
-    "                                         blockSize=blockSize)\n"
+    "                                         blockSize=blockSize)"
    ]
   },
   {
@@ -700,8 +660,10 @@
    },
    "outputs": [],
    "source": [
-    "patternClassifierLH._imagesPerChunk = 50\n",
-    "patternClassifierUH._imagesPerChunk = 50"
+    "patternClassifierLH._imagesPerChunk = 500\n",
+    "patternClassifierUH._imagesPerChunk = 500\n",
+    "patternClassifierLH.debug()\n",
+    "patternClassifierUH.debug()"
    ]
   },
   {
@@ -717,13 +679,7 @@
    "outputs": [],
    "source": [
     "histCalOffsetCor.debug()\n",
-    "offsetCorrection.debug()\n",
-    "relGainCorrLH.debug()\n",
-    "relGainCorrUH.debug()\n",
-    "histCalCTICor.debug()\n",
-    "histCalCTICorWide.debug()\n",
-    "histCalCTICorWideMed.debug()\n",
-    "histCalCTICorWideLow.debug()"
+    "histCalPcorr.debug()\n"
    ]
   },
   {
@@ -739,9 +695,7 @@
    "outputs": [],
    "source": [
     "def copy_and_sanitize_non_cal_data(infile, outfile, h5base):\n",
-    "    \"\"\" Copy and sanitize data in `infile` that is not touched by `correctLPD`\n",
-    "    \"\"\"\n",
-    "   \n",
+    "    \n",
     "    if h5base.startswith(\"/\"):\n",
     "        h5base = h5base[1:]\n",
     "    dont_copy = ['pixels']\n",
@@ -756,26 +710,10 @@
     "                group = str(k).split(\"/\")\n",
     "                group = \"/\".join(group[:-1])\n",
     "                infile.copy(k, outfile[group])\n",
-    "        \n",
+    "                \n",
     "    infile.visititems(visitor)"
    ]
   },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2018-12-06T16:08:53.829967Z",
-     "start_time": "2018-12-06T16:08:53.825645Z"
-    },
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "relGainLH[relGainLH< 0.5] = 1\n",
-    "relGainUH[relGainUH< 0.5] = 1"
-   ]
-  },
   {
    "cell_type": "code",
    "execution_count": null,
@@ -790,15 +728,25 @@
    "source": [
     "mean_im = None\n",
     "single_im = None\n",
+    "mean_im_cc = None\n",
+    "single_im_cc = None\n",
     "drift_lh = []\n",
     "drift_uh = []\n",
+    "offsetMap = np.squeeze(offsetMap)\n",
+    "noiseMap = np.squeeze(noiseMap)\n",
+    "badPixelMap = np.squeeze(badPixelMap)\n",
+    "relGain = np.squeeze(relGain)\n",
     "for k, f in enumerate(file_list):\n",
-    "    with h5py.File(f, 'r') as infile:\n",
-    "        out_file = \"{}/{}\".format(out_folder, f.split(\"/\")[-1])\n",
-    "        out_file = out_file.replace(\"RAW\", \"CORR\")\n",
-    "        #print(out_file)\n",
-    "        with h5py.File(out_file, \"w\") as ofile:\n",
-    "            try:\n",
+    "    with h5py.File(f, 'r', driver='core') as infile:\n",
+    "        out_fileb = \"{}/{}\".format(out_folder, f.split(\"/\")[-1])\n",
+    "        out_file = out_fileb.replace(\"RAW\", \"CORR\")\n",
+    "        #out_filed = out_fileb.replace(\"RAW\", \"CORR-SC\")\n",
+    "\n",
+    "        data = None\n",
+    "        noise = None\n",
+    "        try:\n",
+    "            with h5py.File(out_file, \"w\") as ofile:\n",
+    "            \n",
     "                copy_and_sanitize_non_cal_data(infile, ofile, h5path)\n",
     "                data = infile[h5path+\"/pixels\"][()]\n",
     "                nzidx = np.count_nonzero(data, axis=(1,2))\n",
@@ -811,13 +759,37 @@
     "                                             oshape,\n",
     "                                             chunks=(chunk_size_idim, oshape[1], oshape[2]),\n",
     "                                             dtype=np.float32)\n",
-    "\n",
     "                \n",
-    "                dg1 = (data > 2**12) & (data < 2**15)\n",
-    "                dg2 = (data >= 2**15)\n",
-    "                data = np.bitwise_and(data, 0b0011111111111111).astype(np.float32)\n",
-    "                data = offsetCorrection.correct(data.astype(np.float32)) #correct for the offset\n",
+    "                ddsetm = ofile.create_dataset(h5path+\"/mask\",\n",
+    "                                             oshape,\n",
+    "                                             chunks=(chunk_size_idim, oshape[1], oshape[2]),\n",
+    "                                             dtype=np.uint32, compression=\"gzip\")\n",
+    "                \n",
+    "                ddsetg = ofile.create_dataset(h5path+\"/gain\",\n",
+    "                                             oshape,\n",
+    "                                             chunks=(chunk_size_idim, oshape[1], oshape[2]),\n",
+    "                                             dtype=np.uint8, compression=\"gzip\")\n",
+    "                \n",
+    "                gain = np.right_shift(data, 14)\n",
+    "                \n",
+    "                gain[gain != 0] -= 1\n",
+    "                \n",
+    "                fstride = 1\n",
+    "                if not flip_rgain:  # rgain was taken during flipped orientation\n",
+    "                    fstride = -1\n",
     "                \n",
+    "                data = np.bitwise_and(data, 0b0011111111111111).astype(np.float32)                \n",
+    "                omap = np.repeat(offsetMap[...,None,:], data.shape[2], axis=2)\n",
+    "                rmap = np.repeat(relGain[:,::fstride,None,:], data.shape[2], axis=2)\n",
+    "                nmap = np.repeat(noiseMap[...,None,:], data.shape[2], axis=2)\n",
+    "                bmap = np.repeat(badPixelMap[...,None,:], data.shape[2], axis=2)\n",
+    "                offset = np.choose(gain, (omap[...,0], omap[...,1], omap[...,2]))\n",
+    "                rg = np.choose(gain, (rmap[...,0], rmap[...,1], rmap[...,2]))\n",
+    "                noise = np.choose(gain, (nmap[...,0], nmap[...,1], nmap[...,2]))\n",
+    "                bpix = np.choose(gain, (bmap[...,0], bmap[...,1], bmap[...,2]))\n",
+    "                \n",
+    "                data -= offset\n",
+    "                data *= rg\n",
     "                \n",
     "                if correct_offset_drift:\n",
     "                    lhd = np.mean(data[x//2-10:x//2,y//2-5:y//2+5,:], axis=(0,1))\n",
@@ -830,57 +802,68 @@
     "                \n",
     "                histCalOffsetCor.fill(data)\n",
     "\n",
-    "                #data = cmCorrection.correct(data) #correct for the row common mode        \n",
+    "               \n",
+    "                ddset[...] = np.moveaxis(data, 2, 0)\n",
+    "                ddsetm[...] = np.moveaxis(bpix, 2, 0)\n",
+    "                ddsetg[...] = np.moveaxis(gain, 2, 0).astype(np.uint8)\n",
     "                \n",
-    "                dataLH = data[:x//2, :, :]\n",
-    "                dataUH = data[x//2:, :, :]\n",
+    "                if mean_im is None:\n",
+    "                        mean_im = np.nanmean(data, axis=2)\n",
+    "                        single_im = data[...,0]\n",
+    "                        \n",
+    "                if do_pattern_classification:\n",
+    "                #with h5py.File(out_file, \"a\") as ofiled:\n",
+    "                    #copy_and_sanitize_non_cal_data(infile, ofiled, h5path)\n",
+    "\n",
+    "                    ddsetcm = ofile.create_dataset(h5path+\"/pixels_cm\",\n",
+    "                                         oshape,\n",
+    "                                         chunks=(chunk_size_idim, oshape[1], oshape[2]),\n",
+    "                                         dtype=np.float32)\n",
+    "\n",
+    "                    ddsetc = ofile.create_dataset(h5path+\"/pixels_classified\",\n",
+    "                                         oshape,\n",
+    "                                         chunks=(chunk_size_idim, oshape[1], oshape[2]),\n",
+    "                                         dtype=np.float32, compression=\"gzip\")\n",
+    "\n",
+    "                    ddsetp = ofile.create_dataset(h5path+\"/patterns\",\n",
+    "                                                 oshape,\n",
+    "                                                 chunks=(chunk_size_idim, oshape[1], oshape[2]),\n",
+    "                                                 dtype=np.int32, compression=\"gzip\")\n",
+    "                    \n",
     "\n",
-    "                #*************CTI/Rel.gain Correction*************#\n",
-    "                \n",
-    "                #dataLH /= relGainLH[None,:,None]\n",
-    "                #dataUH /= relGainUH[None,:,None]\n",
-    "                \n",
+    "                    patternClassifierLH._noisemap = noise[:x//2, :, :]\n",
+    "                    patternClassifierUH._noisemap = noise[x//2:, :, :]\n",
     "\n",
-    "                #dataLH = ctiCorrectionLH.correct(dataLH)\n",
-    "                #dataLH = relGainCorrLH.correct(dataLH)\n",
+    "                    data = cmCorrection.correct(data) #correct for the row common mode\n",
+    "                    ddsetcm[...] = np.moveaxis(data, 2, 0)\n",
+    "\n",
+    "                    dataLH = data[:x//2, :, :]\n",
+    "                    dataUH = data[x//2:, :, :]\n",
     "\n",
-    "                #dataUH = ctiCorrectionUH.correct(dataUH)\n",
-    "                #dataUH = relGainCorrUH.correct(dataUH)\n",
-    "                \n",
-    "                #*************Pattern Classification*************#\n",
-    "                if do_pattern_classification:\n",
     "                    dataLH, patternsLH = patternClassifierLH.classify(dataLH)\n",
     "                    dataUH, patternsUH = patternClassifierUH.classify(dataUH)\n",
     "\n",
-    "                \n",
-    "                \n",
-    "                data[:x//2, :, :] = dataLH\n",
-    "                data[x//2:, :, :] = dataUH\n",
-    "                \n",
-    "                data[dg1 != 0] *= 3.\n",
-    "                data[dg2 != 0] *= 8.\n",
-    "                \n",
-    "                histCalCTICor.fill(data)\n",
-    "                dh = copy.copy(data)\n",
-    "                dh[(dg1 != 0) | (dg2 != 0)] = np.nan\n",
-    "                histCalCTICorWide.fill(dh)\n",
-    "                dh = copy.copy(data)\n",
-    "                dh[~(dg1 != 0)] = np.nan\n",
-    "                histCalCTICorWideMed.fill(dh)\n",
-    "                dh = copy.copy(data)\n",
-    "                dh[~(dg2 != 0)] = np.nan\n",
-    "                if np.count_nonzero(dg2 != 0):\n",
-    "                    histCalCTICorWideLow.fill(data[dg2 != 0])\n",
-    "            \n",
-    "                \n",
-    "                ddset[...] = np.moveaxis(data, 2, 0)\n",
-    "                \n",
-    "                if mean_im is None:\n",
-    "                    mean_im = np.nanmedian(data, axis=2)\n",
-    "                    single_im = data[...,0]\n",
+    "                    data[:x//2, :, :] = dataLH\n",
+    "                    data[x//2:, :, :] = dataUH\n",
+    "\n",
+    "                    patterns = np.zeros(data.shape, patternsLH.dtype)\n",
+    "                    patterns[:x//2, :, :] = patternsLH\n",
+    "                    patterns[x//2:, :, :] = patternsUH\n",
+    "\n",
+    "                    data[data < split_evt_primary_threshold*noise] = 0\n",
+    "                    ddsetc[...] = np.moveaxis(data, 2, 0)\n",
+    "                    ddsetp[...] = np.moveaxis(patterns, 2, 0)\n",
+    "\n",
+    "                    histCalPcorr.fill(data)\n",
+    "                    data[patterns != 100] = np.nan\n",
+    "                    histCalPcorrS.fill(data)\n",
+    "\n",
+    "                    if mean_im_cc is None:\n",
+    "                        mean_im_cc = np.nanmean(data, axis=2)\n",
+    "                        single_im_cc = data[...,0]\n",
     "               \n",
-    "            except Exception as e:\n",
-    "                print(\"Couldn't calibrate data in {}: {}\".format(f, e))\n"
+    "        except Exception as e:\n",
+    "            print(\"Couldn't calibrate data in {}: {}\".format(f, e))\n"
    ]
   },
   {
@@ -1122,7 +1105,6 @@
    "outputs": [],
    "source": [
     "ho,eo,co,so = histCalOffsetCor.get()\n",
-    "h1,e1L,c1L,s1L = histCalCTICor.get()\n",
     "\n",
     "\n",
     "d = [{'x': co,\n",
@@ -1151,43 +1133,33 @@
      "end_time": "2018-12-06T16:12:57.289742Z",
      "start_time": "2018-12-06T16:12:45.529734Z"
     },
-    "collapsed": false
+    "collapsed": true
    },
    "outputs": [],
    "source": [
-    "\n",
-    "h1,e1L,c1L,s1L = histCalCTICorWide.get()\n",
-    "h2,e1L,c2L,s1L = histCalCTICorWideMed.get()\n",
-    "h3,e1L,c3L,s1L = histCalCTICorWideLow.get()\n",
-    "\n",
-    "\n",
-    "d = [\n",
-    "     {'x': c1L,\n",
-    "      'y': h1,\n",
-    "      'y_err': np.sqrt(h1[:]),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'label': 'High gain'},]\n",
-    "if h2 is not None and c2L is not None:\n",
-    "    d += [\n",
-    "    {'x': c2L,\n",
-    "      'y': h2/(c1L.size/c2L.size),\n",
-    "      'y_err': np.sqrt(h2[:]/(c1L.size/c2L.size)),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'label': 'Medium gain'}, ]\n",
-    "if h3 is not None and c3L is not None: \n",
-    "    d += [\n",
-    "    {'x': c3L,\n",
-    "      'y': h3/(c1L.size/c3L.size),\n",
-    "      'y_err': np.sqrt(h3[:]/(c1L.size/c3L.size)),\n",
-    "      'drawstyle': 'steps-mid',\n",
-    "      'label': 'Low gain'}, \n",
-    "     ]\n",
-    "     \n",
-    "\n",
-    "fig = xana.simplePlot(d, aspect=1, x_label='Energy(ADU)', \n",
-    "                      y_label='Number of occurrences', figsize='2col',\n",
-    "                      y_log=True, x_range=(1,20000),x_log=True,\n",
-    "                      legend='top-center-frame-2col')"
+    "if do_pattern_classification:\n",
+    "    h1,e1L,c1L,s1L = histCalPcorr.get()\n",
+    "    h1s,e1Ls,c1Ls,s1Ls = histCalPcorrS.get()\n",
+    "\n",
+    "\n",
+    "    d = [\n",
+    "         {'x': c1L,\n",
+    "          'y': h1,\n",
+    "          'y_err': np.sqrt(h1[:]),\n",
+    "          'drawstyle': 'steps-mid',\n",
+    "          'label': 'Split event corrected'},\n",
+    "         {'x': c1Ls,\n",
+    "          'y': h1s,\n",
+    "          'y_err': np.sqrt(h1s[:]),\n",
+    "          'drawstyle': 'steps-mid',\n",
+    "          'label': 'Single pixel hits'}\n",
+    "    ]\n",
+    "\n",
+    "\n",
+    "    fig = xana.simplePlot(d, aspect=1, x_label='Energy(ADU)', \n",
+    "                          y_label='Number of occurrences', figsize='2col',\n",
+    "                          y_log=True, x_range=(0,200),x_log=False,\n",
+    "                          legend='top-center-frame-2col')"
    ]
   },
   {
@@ -1205,7 +1177,7 @@
      "end_time": "2018-12-06T16:11:08.317130Z",
      "start_time": "2018-12-06T16:11:05.788655Z"
     },
-    "collapsed": true
+    "collapsed": false
    },
    "outputs": [],
    "source": [
@@ -1213,7 +1185,14 @@
     "                       x_label='Columns', y_label='Rows',\n",
     "                       lut_label='Signal (ADU)',\n",
     "                       x_range=(0,y),\n",
-    "                       y_range=(0,x), vmin=-50, vmax=4000)"
+    "                       y_range=(0,x), vmin=-50, vmax=500)\n",
+    "\n",
+    "if do_pattern_classification:\n",
+    "    fig = xana.heatmapPlot(mean_im_cc,\n",
+    "                       x_label='Columns', y_label='Rows',\n",
+    "                       lut_label='Signal (ADU)',\n",
+    "                       x_range=(0,y),\n",
+    "                       y_range=(0,x), vmin=-50, vmax=500)"
    ]
   },
   {
@@ -1241,27 +1220,16 @@
     "                       x_label='Columns', y_label='Rows',\n",
     "                       lut_label='Signal (ADU)',\n",
     "                       x_range=(0,y),\n",
-    "                       y_range=(0,x), vmin=-50, vmax=1000)"
+    "                       y_range=(0,x), vmin=-50, vmax=500)\n",
+    "\n",
+    "if do_pattern_classification:\n",
+    "    fig = xana.heatmapPlot(single_im_cc,\n",
+    "                       x_label='Columns', y_label='Rows',\n",
+    "                       lut_label='Signal (ADU)',\n",
+    "                       x_range=(0,y),\n",
+    "                       y_range=(0,x), vmin=-50, vmax=500)"
    ]
   },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": []
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": []
-  },
   {
    "cell_type": "code",
    "execution_count": null,
diff --git a/xfel_calibrate/notebooks.py b/xfel_calibrate/notebooks.py
index 2b63c3448..c2a398185 100644
--- a/xfel_calibrate/notebooks.py
+++ b/xfel_calibrate/notebooks.py
@@ -119,12 +119,6 @@ notebooks = {
                                                "use function": "balance_sequences",
                                                "cluster cores": 4},
                                 },
-                        "FF": {
-                                "notebook": "notebooks/FastCCD/Characterize_FlatFields_FastCCD_NBC.ipynb",
-                                "concurrency": {"parameter": None,
-                                                "default concurrency": None,
-                                                "cluster cores": 16},
-                                },
                        },
              "JUNGFRAU": {
                         "DARK": {
-- 
GitLab