diff --git a/cal_tools/cal_tools/agipdlib.py b/cal_tools/cal_tools/agipdlib.py
index 7509813830d60edd8e646fac6634ae04b09c02e8..ee6367de2abe4c64348d91d8fd9025ece334f6f8 100644
--- a/cal_tools/cal_tools/agipdlib.py
+++ b/cal_tools/cal_tools/agipdlib.py
@@ -624,7 +624,7 @@ class AgipdCorrections:
         # slopeFF = slopeFFpix/avarege(slopeFFpix)
         # To apply them we have to / not *
         if self.corr_bools.get("xray_corr"):
-            data /= self.xray_cor[module_idx]
+            data /= self.xray_cor[module_idx][cellid, ...]
 
         # use sharedmem raw_data and t0_rgain
         # after calculating it while offset correcting.
@@ -818,7 +818,7 @@ class AgipdCorrections:
             uq, fidxv, cntsv = np.unique(trains, return_index=True,
                                          return_counts=True)
 
-            # Validate calculated CORR INDEX contents by checking 
+            # Validate calculated CORR INDEX contents by checking
             # difference between trainId stored in RAW data and trains from
             train_diff = np.isin(np.array(infile["/INDEX/trainId"]), uq,
                                  invert=True)
@@ -905,12 +905,12 @@ class AgipdCorrections:
         exists of the current AGIPD instances.
 
         Relative gain is derived both from pulse capacitor as well as low
-        intensity flat field data, information from flat field data is 
-        needed to 'calibrate' pulse capacitor data, if there is no 
+        intensity flat field data, information from flat field data is
+        needed to 'calibrate' pulse capacitor data, if there is no
         available FF data, relative gain for High Gain stage is set to 1:
 
-        * Relative gain for High gain stage - from the FF data we get 
-          the relative slopes of a given pixel and memory cells with 
+        * Relative gain for High gain stage - from the FF data we get
+          the relative slopes of a given pixel and memory cells with
           respect to all memory cells and all pixels in the module,
           Please note: Current slopesFF avaialble in calibibration
           constants are created per pixel only, not per memory cell:
@@ -923,9 +923,9 @@ class AgipdCorrections:
           between high and medium gain using slope information from
           fits to the linear part of high and medium gain:
 
-             rfpc_high_medium = m_h/m_m          
+             rfpc_high_medium = m_h/m_m
 
-          where m_h and m_m is the medium gain slope of given memory cells 
+          where m_h and m_m is the medium gain slope of given memory cells
           and pixel and m_h is the high gain slope as above
            rel_gain_medium = rel_high_gain * rfpc_high_medium
 
@@ -954,32 +954,16 @@ class AgipdCorrections:
 
         if self.corr_bools.get("xray_corr"):
             bpixels |= cons_data["BadPixelsFF"].astype(np.uint32)[..., :bpixels.shape[2], None]  # noqa
-            slopesFF = np.squeeze(cons_data["SlopesFF"])
-
+            slopesFF = cons_data["SlopesFF"]
             if len(slopesFF.shape) == 4:
                 slopesFF = slopesFF[..., 0]
-
-            # first 32 cells are known to behave differently so if we can avoid
-            # them
-            # when calculating the mean X-ray derived gain slope for each pixel
-            if slopesFF.shape[2] > 32:
-                xray_cor = np.nanmedian(
-                    slopesFF[..., 32:min(96, self.max_cells)], axis=2)
-            elif slopesFF.shape[2] > 2:
-                xray_cor = np.nanmedian(
-                    slopesFF[..., :min(96, self.max_cells)], axis=2)
-            else:
-                xray_cor = np.squeeze(slopesFF[..., 0])
+            # Memory cell resolved xray_cor correction
+            xray_cor = slopesFF  # (128, 512, mem_cells)
 
             # relative X-ray correction is normalized by the median
             # of all pixels
 
-            # TODO: A check is required to know why it is again divided by 
-            # median. If we have relative slopes in the constants
-            # and (we have!) 
-            # xray cor = (slopeFF/avarege_slopeFF)/avarege_slopeFF.
-            # It didn't not make sense and was removed. 
-            # xray_cor /= np.nanmedian(xray_cor)
+            xray_cor /= np.nanmedian(xray_cor)
 
             self.xray_cor[module_idx][...] = xray_cor.transpose()[...]
 
@@ -1041,13 +1025,19 @@ class AgipdCorrections:
             # ration between HG and MG per pixel per mem cell used
             # for rel gain calculation
             frac_high_med_pix = pc_high_m / pc_med_m
-            # avarage ration between HG and MG as a function of 
+            # avarage ration between HG and MG as a function of
             # mem cell (needed for bls_stripes)
             # TODO: Per pixel would be more optimal correction
             frac_high_med = pc_high_med / pc_med_med
             # calculate additional medium-gain offset
             md_additional_offset = pc_high_l - pc_med_l * pc_high_m / pc_med_m
 
+            # Calculate relative gain. If FF constants are available,
+            # use them for high gain
+            # if not rel_gain is calculated using PC data only
+            # if self.corr_bools.get("xray_corr"):
+            #     rel_gain[..., :self.max_cells, 0] /= xray_corr
+
             # PC data should be 'calibrated with X-ray data,
             # if it is not done, it is better to use 1 instead of bias
             # the results with PC arteffacts.
@@ -1078,17 +1068,35 @@ class AgipdCorrections:
         dname = device.device_name
         cons_data = dict()
         when = dict()
+
         for cname, mdata in const_yaml[dname].items():
             when[cname] = mdata["creation-time"]
             if when[cname]:
-                cf = h5py.File(mdata["file-path"], "r")
-                cons_data[cname] = np.copy(cf[f"{dname}/{cname}/0/data"])
-                cf.close()
+                # This path is only used when testing new flat fields from
+                # file during development: it takes ages to test using all
+                # cells. Consequently, the shape needs to be fixed when less
+                # cells are used.
+                with h5py.File(mdata["file-path"], "r") as cf:
+                    cons_data[cname] = np.copy(cf[f"{dname}/{cname}/0/data"])
+                    shape = cons_data[cname].shape  # (128, 512, mem_cells)
+                    extra_dims = shape[:2] + (self.max_cells-shape[2], )
+
+                    if extra_dims[-1] != 0 and cname == "BadPixelsFF":
+                        extra_temp = np.zeros(extra_dims, dtype=np.int32)
+                        cons_data[cname] = np.concatenate(
+                            (cons_data[cname], extra_temp), axis=2)
+                        print('An extra dimension was added to the constants '
+                              'for the benefit of BadPixelsFF')
+
+                    if extra_dims[-1] != 0 and cname == "SlopesFF":
+                        extra_temp = np.ones(extra_dims, dtype=np.float32)
+                        cons_data[cname] = np.concatenate(
+                            (cons_data[cname], extra_temp), axis=2)
+                        print('An extra dimension was added to the constants '
+                              'for the benefit of SlopesFF')
             else:
                 # Create empty constant using the list elements
-                cons_data[cname] = \
-                    getattr(np, mdata["file-path"][0])(mdata["file-path"][1])
-
+                cons_data[cname] = getattr(np, mdata["file-path"][0])(mdata["file-path"][1])  # noqa
         self.init_constants(cons_data, module_idx)
 
         return when
@@ -1191,7 +1199,7 @@ class AgipdCorrections:
                                                              dtype='f4')
 
             self.mask[module_idx] = sharedmem.empty(constant_shape, dtype='i4')
-            self.xray_cor[module_idx] = sharedmem.empty(constant_shape[2:],
+            self.xray_cor[module_idx] = sharedmem.empty(constant_shape[1:],
                                                         dtype='f4')
 
     def allocate_images(self, shape, n_cores_files):
diff --git a/cal_tools/cal_tools/agipdutils.py b/cal_tools/cal_tools/agipdutils.py
index c6a7ae4a6c97cb9eccf98281ccdbb2fd502f9e25..e217c04dafbf19c53fcb0b73bec0cd23a6bed91c 100644
--- a/cal_tools/cal_tools/agipdutils.py
+++ b/cal_tools/cal_tools/agipdutils.py
@@ -43,7 +43,7 @@ def assemble_constant_dict(corr_bools, pc_bools, memory_cells, bias_voltage,
     const_dict = {
         "Offset": ["zeros", (128, 512, memory_cells, 3), darkcond],
         "Noise": ["zeros", (128, 512, memory_cells, 3), darkcond],
-        "ThresholdsDark": ["ones", (128, 512, memory_cells, 2), darkcond],
+        "ThresholdsDark": ["ones", (128, 512, memory_cells, 5), darkcond],
         "BadPixelsDark": ["zeros", (128, 512, memory_cells, 3), darkcond],
     }
 
diff --git a/cal_tools/cal_tools/agipdutils_ff.py b/cal_tools/cal_tools/agipdutils_ff.py
new file mode 100644
index 0000000000000000000000000000000000000000..65ff9186690cdd07a532641d7276164dac2ebeb2
--- /dev/null
+++ b/cal_tools/cal_tools/agipdutils_ff.py
@@ -0,0 +1,242 @@
+from typing import Any, Dict, List, Optional, Tuple
+
+from iminuit import Minuit
+import numpy as np
+
+from cal_tools.enums import BadPixelsFF
+
+
+def any_in(mask: np.ndarray, bits: int) -> bool:
+    return mask.astype(np.uint) & bits > 0
+
+
+def gaussian(x: np.ndarray, norm: int = 1, mean: int = 0, sigma: int = 1) -> float:  # noqa
+    """
+    Return value of Gaussian function
+
+    :param x: Argument (float of 1D array) of Gaussian function
+    :param norm: Normalization of Gaussian function
+    :param mean: Mean parameter
+    :param sigma: Sigma parameter
+    :return: Value of gaussian function.
+    """
+    return norm * np.exp(-1 / 2 * ((x - mean) / sigma) ** 2) / (sigma * np.sqrt(2 * np.pi))  # noqa
+
+
+def gaussian_sum(x: np.ndarray, ng: int = 4, *p: Tuple[Any]) -> float:
+    """Sum of ng Gaussian functions
+
+    :param x: Argument (float of 1D array) of the function
+    :param ng: Number of Gaussian functions
+    :param p: List of parameters (norm1,mean1,sigma1,norm2,mean2,sigma2,...)
+    """
+    r = 0.
+    for i in range(ng):
+        r += gaussian(x, *p[i * 3:(i + 1) * 3])
+    return r
+
+
+def get_statistical_parameters(x: np.ndarray,
+                               y: np.ndarray,
+                               x_range: List) -> Tuple[np.uint64, np.float64, np.float64, np.ndarray]:  # noqa
+    """Return statistical parameters of selected part of a histogram.
+
+    :param x: Center of bins of the histogram
+    :param y: Value of bins of the histogram
+    :param x_range: x range to be considered
+    :return: Sum of histogram, Mean value, Standard deviation,
+             List of selected bins
+    """
+    # TODO: Check if wq.median works better than mean
+    sel = (x >= x_range[0]) & (x < x_range[1])
+    h_sum = np.sum(y[sel])
+    h_norm = y[sel] / h_sum
+    h_mean = np.sum(h_norm * x[sel])
+    h_sqr = (x[sel] - h_mean) ** 2
+    h_std = np.sqrt(np.sum(h_norm * h_sqr))
+
+    return h_sum, h_mean, h_std, sel
+
+
+def get_starting_parameters(xe: np.ndarray,
+                            ye: np.ndarray,
+                            limits: np.ndarray,
+                            n_peaks: int = 3,
+                            f_lim: int = 2) -> Tuple[Dict[str, Any], List[Tuple]]:  # noqa
+    """
+    Estimate starting parameters for Gaussian fit of several peaks.
+
+    :param xe: Center of bins of the histogram
+    :param ye: Value of bins of the histogram
+    :param limits: Position of each peak ((left1, right1),
+    (left2, right2), ...) to be considered.
+    :param n_peaks: Number of peaks
+    :param f_lim: Limits in units of standard deviation to consider
+    """
+    parameters = {}
+    shapes = []
+    for peak in range(n_peaks):
+        n, m, rms, idx = get_statistical_parameters(xe, ye, limits[peak])
+        limits2 = [m - f_lim * rms, m + f_lim * rms]
+        n, m, rms, idx = get_statistical_parameters(xe, ye, limits2)
+        shapes.append((n, m, rms, idx))
+
+        parameters.update({f'g{peak}sigma': rms,
+                           f'g{peak}n': float(n),
+                           f'g{peak}mean': m})
+    return parameters, shapes
+
+
+def fit_n_peaks(x: np.ndarray,
+                y: np.ndarray,
+                pars: Dict[str, Any],
+                x_range: Tuple[float, float],
+                do_minos: Optional[bool] = False,
+                n_peaks: Optional[int] = 4,
+                fix_d01: Optional[bool] = True) -> Minuit:
+    """
+    Fit histogram with n-Gaussian function.
+
+    :param x: Center of bins of the histogram
+    :param y: Value of bins of the histogram
+    :param pars: Dictionary of initial parameters for fitting
+    :param x_range: x Range to be considered for the fitting
+    :param do_minos: Run Minos if True
+    :param n_peaks: Number of Gaussian peaks to fit (min 2, max 4)
+    :param fix_d01: Fix position of peaks to the distance between noise and
+    first photon peak.
+    :return: minuit object
+    """
+    sel = (x >= x_range[0]) & (x < x_range[1])
+
+    # Square of bin errors
+    yrr2 = np.copy(y[sel])
+    yrr2[yrr2 == 0] = 1  # bins with zero events have error=1
+
+    if fix_d01:
+        pars['fix_g2mean'] = True
+        pars['fix_g3mean'] = True
+
+    if n_peaks < 4:
+        pars['g3n'] = 0
+        pars['fix_g3n'] = True
+        pars['g3sigma'] = 1
+        pars['fix_g3sigma'] = True
+        pars['fix_g3mean'] = True
+
+    if n_peaks < 3:
+        pars['g2n'] = 0
+        pars['fix_g2n'] = True
+        pars['g2sigma'] = 1
+        pars['fix_g2sigma'] = True
+        pars['fix_g2mean'] = True
+
+    def chi2_f(g0n, g0mean, g0sigma,
+               g1n, g1mean, g1sigma,
+               g2n, g2mean, g2sigma,
+               g3n, g3mean, g3sigma, ):
+
+        d01 = (g1mean - g0mean)
+
+        if 'fix_g2mean' in pars and pars['fix_g2mean']:
+            g2mean = g0mean + d01 * 2
+
+        if 'fix_g3mean' in pars and pars['fix_g3mean']:
+            g3mean = g0mean + d01 * 3
+
+        if g3n == 0:
+            n_peaks = 3
+        elif g2n == 0:
+            n_peaks = 2
+        else:
+            n_peaks = 4
+
+        yt = gaussian_sum(x[sel], n_peaks,
+                          g0n, g0mean, g0sigma,
+                          g1n, g1mean, g1sigma,
+                          g2n, g2mean, g2sigma,
+                          g3n, g3mean, g3sigma)
+        return np.nansum((yt - y[sel]) ** 2 / yrr2)
+
+    minuit = Minuit(chi2_f, **pars, pedantic=False)
+    minuit.migrad()
+
+    if do_minos:
+        if minuit.get_fmin().is_valid:
+            minuit.minos()
+
+    return minuit
+
+
+def set_par_limits(pars: Dict[str, Any],
+                   peak_range: np.ndarray,
+                   peak_norm_range: np.ndarray,
+                   peak_width_range: np.ndarray,
+                   n_peaks: Optional[int] = 4):
+    """
+    Set limits on initial fit parameters based on given values
+
+    :param pars: Dictionary of initial fit parameters
+    :param peak_range: Limits on peak positions
+    :param peak_norm_range: Limits on normalization of Gaussian peaks
+    :param peak_width_range: Limits on width of Gaussian peaks
+    :param n_peaks: Number of Gaussian peaks
+    """
+    for peak in range(n_peaks):
+        pars.update({f'limit_g{peak}n': peak_norm_range[peak],
+                     f'limit_g{peak}mean': peak_range[peak],
+                     f'limit_g{peak}sigma': peak_width_range[peak],
+                     })
+
+
+def get_mask(fit_summary: Dict[str, Any],
+             peak_lim: List,
+             d0_lim: List,
+             chi2_lim: List,
+             peak_width_lim: np.array) -> int:
+    """
+    Calculate Bad pixels mask based on fit results and given limits
+
+    :param fit_summary: Dictionary of the fit output from Multi-Gaussian fit
+    :param peak_lim: Limits on noise peak position
+    :param d0_lim: Limits on distance between noise and first photon peak
+    :param chi2_lim: Limits on reduced chi^2 value
+    :param peak_width_lim: Limits on noise peak width
+    :return: Bad pixel mask
+    """
+    if not fit_summary['is_valid']:
+        return BadPixelsFF.FIT_FAILED.value
+
+    m0 = fit_summary['g0mean']
+    s0 = fit_summary['g0sigma']
+    s1 = fit_summary['g1sigma']
+    s2 = fit_summary['g2sigma']
+    chi2_ndof = fit_summary['chi2_ndof']
+    d01 = fit_summary['g1mean'] - m0
+
+    mask = 0
+    if not fit_summary['is_valid']:
+        mask |= BadPixelsFF.FIT_FAILED.value
+
+    if not fit_summary['has_accurate_covar']:
+        mask |= BadPixelsFF.ACCURATE_COVAR.value
+
+    if not peak_lim[0] <= m0 <= peak_lim[1]:
+        mask |= BadPixelsFF.NOISE_PEAK_THRESHOLD.value
+
+    if not d0_lim[0] <= d01 <= d0_lim[1]:
+        mask |= BadPixelsFF.GAIN_THRESHOLD.value
+
+    if not chi2_lim[0] <= chi2_ndof <= chi2_lim[1]:
+        mask |= BadPixelsFF.CHI2_THRESHOLD.value
+
+    width_lim = peak_width_lim[0] * s0
+    inside_s1 = width_lim[0] <= s1 <= width_lim[1]
+
+    width_lim = peak_width_lim[1] * s0
+    inside_s2 = width_lim[0] <= s2 <= width_lim[1]
+
+    if not inside_s1 and inside_s2:
+        mask |= BadPixelsFF.PEAK_WIDTH_THRESHOLD.value
+
+    return mask
diff --git a/cal_tools/cal_tools/enums.py b/cal_tools/cal_tools/enums.py
index 513b25cb80f2827614987a27eab91ab1c9826d59..a516dee2f55dbd8ece632c00f523d58a80b2f578 100644
--- a/cal_tools/cal_tools/enums.py
+++ b/cal_tools/cal_tools/enums.py
@@ -28,6 +28,21 @@ class BadPixels(Enum):
     NON_LIN_RESPONSE_REGION  = 0b100000000000000000000 # bit 21
     
     
+class BadPixelsFF(Enum):
+    """ The SLopesFF Bad Pixel Encoding
+    """
+        
+    FIT_FAILED               = 0b000000000000000000001 # bit 1
+    CHI2_THRESHOLD           = 0b000000000000000000010 # bit 2
+    NOISE_PEAK_THRESHOLD     = 0b000000000000000000100 # bit 3
+    GAIN_THRESHOLD           = 0b000000000000000001000 # bit 4
+    PEAK_WIDTH_THRESHOLD     = 0b000000000000000010000 # bit 5
+    ACCURATE_COVAR           = 0b000000000000000100000 # bit 6
+    BAD_DARK                 = 0b000000000000001000000 # bit 6
+    NO_ENTRY                 = 0b000000000000010000000 # bit 7
+    GAIN_DEVIATION           = 0b000000000000100000000 # bit 8
+    
+    
 class SnowResolution(Enum):
     """ An Enum specifying how to resolve snowy pixels
     """
diff --git a/notebooks/AGIPD/AGIPD_Correct_and_Verify.ipynb b/notebooks/AGIPD/AGIPD_Correct_and_Verify.ipynb
index 2990da965082f29c46a4233c9febd3f9880755d8..09b22f1b248153b378723956e7df32ae2987cec4 100644
--- a/notebooks/AGIPD/AGIPD_Correct_and_Verify.ipynb
+++ b/notebooks/AGIPD/AGIPD_Correct_and_Verify.ipynb
@@ -38,6 +38,8 @@
     "karabo_id_control = \"HED_EXP_AGIPD500K2G\" # karabo-id for control device\n",
     "karabo_da_control = 'AGIPD500K2G00' # karabo DA for control infromation\n",
     "\n",
+    "slopes_ff_from_files = \"\" # Path to locally stored SlopesFF and BadPixelsFF constants\n",
+    "\n",
     "use_dir_creation_date = True # use the creation data of the input dir for database queries\n",
     "cal_db_interface = \"tcp://max-exfl016:8015#8045\" # the database interface to use\n",
     "cal_db_timeout = 30000 # in milli seconds\n",
diff --git a/notebooks/AGIPD/AGIPD_Retrieve_Constants_Precorrection.ipynb b/notebooks/AGIPD/AGIPD_Retrieve_Constants_Precorrection.ipynb
index ff59e54189eaec5d49481e77698b83dcaca5495f..412355eae416e93bea055396b9f5d0b480c1303f 100644
--- a/notebooks/AGIPD/AGIPD_Retrieve_Constants_Precorrection.ipynb
+++ b/notebooks/AGIPD/AGIPD_Retrieve_Constants_Precorrection.ipynb
@@ -40,6 +40,7 @@
     "cal_db_interface = \"tcp://max-exfl016:8015#8045\" # the database interface to use\n",
     "creation_date_offset = \"00:00:00\" # add an offset to creation date, e.g. to get different constants\n",
     "\n",
+    "slopes_ff_from_files = \"\" # Path to locally stored SlopesFF and BadPixelsFF constants\n",
     "calfile =  \"\" # path to calibration file. Leave empty if all data should come from DB\n",
     "nodb = False # if set only file-based constants will be used\n",
     "mem_cells = 0 # number of memory cells used, set to 0 to automatically infer\n",
@@ -93,17 +94,13 @@
     "import sys\n",
     "from collections import OrderedDict\n",
     "\n",
-    "# make sure a cluster is running with ipcluster start --n=32, give it a while to start\n",
     "import os\n",
     "import h5py\n",
     "import numpy as np\n",
     "import matplotlib\n",
     "matplotlib.use(\"agg\")\n",
     "import matplotlib.pyplot as plt\n",
-    "from ipyparallel import Client\n",
-    "print(f\"Connecting to profile {cluster_profile}\")\n",
-    "view = Client(profile=cluster_profile)[:]\n",
-    "view.use_dill()\n",
+    "import multiprocessing as mp\n",
     "\n",
     "from iCalibrationDB import Constants, Conditions, Detectors\n",
     "from cal_tools.tools import (map_modules_from_folder, get_dir_creation_date)\n",
@@ -236,6 +233,7 @@
     "from functools import partial\n",
     "import yaml\n",
     "\n",
+    "\n",
     "def retrieve_constants(karabo_id, bias_voltage, max_cells, acq_rate, \n",
     "                       gain_setting, photon_energy, only_dark, nodb_with_dark, \n",
     "                       cal_db_interface, creation_time, \n",
@@ -294,7 +292,7 @@
     "        acq_rate = get_acq_rate((f, karabo_id, idx))\n",
     "\n",
     "    # avoid creating retireving constant, if requested.\n",
-    "    if not nodb_with_dark:  \n",
+    "    if not nodb_with_dark:\n",
     "        const_dict = assemble_constant_dict(corr_bools, pc_bools, max_cells, bias_voltage,\n",
     "                                            gain_setting, acq_rate, photon_energy,\n",
     "                                            beam_energy=None, only_dark=only_dark)\n",
@@ -303,26 +301,32 @@
     "        # to return a dict of useful metadata.\n",
     "        mdata_dict = dict()\n",
     "        for cname, cval in const_dict.items():\n",
-    "            try:\n",
-    "                condition = getattr(Conditions, cval[2][0]).AGIPD(**cval[2][1])\n",
-    "                co, mdata = \\\n",
-    "                    get_from_db(dev, getattr(Constants.AGIPD, cname)(),\n",
-    "                                condition, getattr(np, cval[0])(cval[1]),\n",
-    "                                cal_db_interface, creation_time, meta_only=True, verbosity=0)\n",
-    "                mdata_const = mdata.calibration_constant_version\n",
-    "                # saving metadata in a dict\n",
-    "                mdata_dict[cname] = dict()\n",
-    "                # check if constant was sucessfully retrieved.\n",
-    "                if mdata.comm_db_success:                       \n",
-    "                    mdata_dict[cname][\"file-path\"] = f\"{mdata_const.hdf5path}\" \\\n",
-    "                                                     f\"{mdata_const.filename}\"\n",
-    "                    mdata_dict[cname][\"creation-time\"] = f\"{mdata_const.begin_at}\"\n",
-    "                else:\n",
-    "                    mdata_dict[cname][\"file-path\"] = const_dict[cname][:2]\n",
-    "                    mdata_dict[cname][\"creation-time\"] = None\n",
-    "            except Exception as e:\n",
-    "                err = f\"Error: {e}, Traceback: {traceback.format_exc()}\"\n",
-    "                print(err)\n",
+    "            # saving metadata in a dict\n",
+    "            mdata_dict[cname] = dict()\n",
+    "            \n",
+    "            if slopes_ff_from_files and cname in [\"SlopesFF\", \"BadPixelsFF\"]:\n",
+    "                mdata_dict[cname][\"file-path\"] = f\"{slopes_ff_from_files}/slopesff_bpmask_module_{qm}.h5\"\n",
+    "                mdata_dict[cname][\"creation-time\"] = \"00:00:00\"\n",
+    "            else:\n",
+    "                try:\n",
+    "                    condition = getattr(Conditions, cval[2][0]).AGIPD(**cval[2][1])\n",
+    "                    co, mdata = \\\n",
+    "                        get_from_db(dev, getattr(Constants.AGIPD, cname)(),\n",
+    "                                    condition, getattr(np, cval[0])(cval[1]),\n",
+    "                                    cal_db_interface, creation_time, meta_only=True, verbosity=0)\n",
+    "                    mdata_const = mdata.calibration_constant_version\n",
+    "                    \n",
+    "                    # check if constant was sucessfully retrieved.\n",
+    "                    if mdata.comm_db_success:                       \n",
+    "                        mdata_dict[cname][\"file-path\"] = f\"{mdata_const.hdf5path}\" \\\n",
+    "                                                         f\"{mdata_const.filename}\"\n",
+    "                        mdata_dict[cname][\"creation-time\"] = f\"{mdata_const.begin_at}\"\n",
+    "                    else:\n",
+    "                        mdata_dict[cname][\"file-path\"] = const_dict[cname][:2]\n",
+    "                        mdata_dict[cname][\"creation-time\"] = None\n",
+    "                except Exception as e:\n",
+    "                    err = f\"Error: {e}, Traceback: {traceback.format_exc()}\"\n",
+    "                    print(err)\n",
     "\n",
     "    return qm, mdata_dict, dev.device_name, acq_rate, max_cells, err\n",
     "\n",
@@ -360,8 +364,9 @@
     "            cal_db_interface, creation_time, \n",
     "            corr_bools, pc_bools)\n",
     "\n",
-    "results = view.map_sync(p, inp)\n",
-    "#results = list(map(p, inp))\n",
+    "with mp.Pool(processes=16) as pool:\n",
+    "    results = pool.map(p, inp)\n",
+    "\n",
     "mod_dev = dict()\n",
     "mdata_dict = dict()\n",
     "for r in results:\n",
@@ -433,13 +438,6 @@
     "with open(f\"{out_folder}/retrieved_constants.yml\",\"w\") as fyml:\n",
     "        yaml.safe_dump(time_summary, fyml)"
    ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": []
   }
  ],
  "metadata": {
diff --git a/notebooks/AGIPD/Characterize_AGIPD_Gain_FlatFields_NBC.ipynb b/notebooks/AGIPD/Characterize_AGIPD_Gain_FlatFields_NBC.ipynb
index 6c7a36b8f44b6dfabe902d4f38a5ac7504249481..695eb6634ff61193d8665a58812667821ad55a6c 100644
--- a/notebooks/AGIPD/Characterize_AGIPD_Gain_FlatFields_NBC.ipynb
+++ b/notebooks/AGIPD/Characterize_AGIPD_Gain_FlatFields_NBC.ipynb
@@ -4,59 +4,69 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "# Gain Characterization (Flat Fields) #\n",
-    "\n",
-    "The following code characterizes the gain of the AGIPD detector from flat field data, i.e. data with X-rays of defined intensity. This data should fullfil the following requirements:\n",
-    "\n",
-    "* intensity should be such that single photon peaks are visible\n",
-    "* data for all modules should be present\n",
-    "* no shadowing should occur on any of the modules\n",
-    "* each pixel should have at minimum arround 100 events per photon peak per memory cell\n",
-    "* if central beam edges are visible, they should not be significantly more intense\n",
-    "\n",
-    "Characterization is done by a weighted average algorithm, which evaluates the peak locations for all pixels\n",
-    "and memory cells of a given module. These locations are then fitted to a linear function of the average peak\n",
-    "location in each module, such that it yield a relative gain correction."
+    "# Gain Characterization #\n",
+    "\n"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2019-09-13T07:34:24.467244Z",
-     "start_time": "2019-09-13T07:34:24.457261Z"
-    }
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
-    "# the following lines should be adjusted depending on data\n",
-    "in_folder = '/gpfs/exfel/exp/MID/201931/p900091/raw/' # path to input data, required\n",
-    "modules = [3] # modules to work on, required, range allowed\n",
-    "out_folder = \"/gpfs/exfel/exp/MID/201931/p900091/usr/FF/2.2\" # path to output to, required\n",
-    "runs = [484, 485,] # runs to use, required, range allowed\n",
-    "sequences = [0,1,2,3]#,4,5,6,7,8] #,5,6,7,8,9,10] # sequences files to use, range allowed\n",
-    "cluster_profile = \"noDB\" # The ipcluster profile to use\n",
+    "in_folder = \"/gpfs/exfel/exp/SPB/202030/p900138/scratch/karnem/r0203_r0204_v01/\" # the folder to read histograms from, required\n",
+    "out_folder = \"/gpfs/exfel/exp/SPB/202030/p900138/scratch/karnem/r0203_r0204_v01/\"  # the folder to output to, required\n",
+    "hist_file_template = \"hists_m{:02d}_sum.h5\" # the template to use to access histograms\n",
+    "modules = [10] # modules to correct, set to -1 for all, range allowed\n",
+    "\n",
+    "image_data_path = \"/gpfs/exfel/exp/MID/202030/p900137/raw\" # Path to image data used to create histograms\n",
+    "run = 449 # of the run of image data used to create histograms\n",
+    "\n",
+    "karabo_id = \"MID_DET_AGIPD1M-1\" # karabo karabo_id\n",
+    "karabo_da = ['-1']  # a list of data aggregators names, Default [-1] for selecting all data aggregators\n",
+    "receiver_id = \"{}CH0\" # inset for receiver devices\n",
+    "path_template = 'RAW-R{:04d}-{}-S{:05d}.h5' # the template to use to access data\n",
+    "h5path = 'INSTRUMENT/{}/DET/{}:xtdf/' # path in the HDF5 file to images\n",
+    "h5path_idx = 'INDEX/{}/DET/{}:xtdf/' # path in the HDF5 file to images\n",
+    "h5path_ctrl = '/CONTROL/{}/MDL/FPGA_COMP' # path to control information\n",
+    "karabo_id_control = \"MID_IRU_AGIPD1M1\" # karabo-id for control device\n",
+    "karabo_da_control = 'AGIPD1MCTRL00' # karabo DA for control infromation\n",
+    "\n",
+    "use_dir_creation_date = True # use the creation data of the input dir for database queries\n",
+    "cal_db_interface = \"tcp://max-exfl016:8015#8045\" # the database interface to use\n",
+    "cal_db_timeout = 30000 # in milli seconds\n",
     "local_output = True # output constants locally\n",
     "db_output = False # output constants to database\n",
-    "bias_voltage = 300 # detector bias voltage\n",
-    "cal_db_interface = \"tcp://max-exfl016:8026#8035\"  # the database interface to use\n",
-    "mem_cells = 0  # number of memory cells used\n",
-    "interlaced = False # assume interlaced data format, for data prior to Dec. 2017\n",
-    "fit_hook = True # fit a hook function to medium gain slope\n",
-    "rawversion = 2 # RAW file format version\n",
-    "instrument = \"MID\"\n",
-    "photon_energy = 9.2 # the photon energy in keV\n",
-    "offset_store = \"\" # for file-baed access\n",
-    "high_res_badpix_3d = False # set this to True if you need high-resolution 3d bad pixel plots. Runtime: ~ 1h\n",
-    "db_input = True # retreive data from calibration database, setting offset-store will overwrite this\n",
-    "deviation_threshold = 0.75 # peaks with an absolute location deviation larger than the medium are are considere bad\n",
-    "acqrate = 0. # acquisition rate\n",
-    "use_dir_creation_date = True\n",
-    "creation_time = \"\" # To overwrite the measured creation_time. Required Format: YYYY-MM-DD HR:MN:SC.ms e.g. 2019-07-04 11:02:41.00\n",
-    "gain_setting = 0.1 # gain setting can have value 0 or 1, Default=0.1 for no (None) gain-setting\n",
-    "karabo_da_control = \"AGIPD1MCTRL00\" # karabo DA for control infromation\n",
-    "h5path_ctrl = '/CONTROL/{}/MDL/FPGA_COMP_TEST' # path to control information"
+    "\n",
+    "# Fit parameters\n",
+    "peak_range = [-30, 30, 35, 70, 95, 135, 145, 220] # where to look for the peaks, [a0, b0, a1, b1, ...] exactly 8 elements\n",
+    "peak_width_range = [0, 30, 0, 35, 0, 40, 0, 45] # fit limits on the peak widths, [a0, b0, a1, b1, ...] exactly 8 elements\n",
+    "peak_norm_range = [0.0, -1, 0, -1, 0, -1, 0, -1] #  \n",
+    "\n",
+    "# Bad-pixel thresholds (gain evaluation error). Contribute to BadPixel bit \"Gain_Evaluation_Error\"\n",
+    "peak_lim = [-30, 30] # Limit of position of noise peak\n",
+    "d0_lim = [10, 80] # hard limits for distance between noise and first peak\n",
+    "peak_width_lim = [0.9, 1.55, 0.95, 1.65] # hard limits on the peak widths for first and second peak, in units of the noise peak. 4 parameters.\n",
+    "chi2_lim = [0, 3.0] # Hard limit on chi2/nDOF value\n",
+    "\n",
+    "intensity_lim = 15 # Threshold on standard deviation of a histogram in ADU. Contribute to BadPixel bit \"No_Entry\"\n",
+    "gain_lim = [0.85, 1.15] # Threshold on gain in relative number. Contribute to BadPixel bit \"Gain_deviation\"\n",
+    "\n",
+    "cell_range = [1, 3] # range of cell to be considered, [0,0] for all\n",
+    "pixel_range = [0, 0, 32, 32] # range of pixels x1,y1,x2,y2 to consider [0,0,512,128] for all\n",
+    "max_bins = 0 # Maximum number of bins to consider, 0 for all bins\n",
+    "batch_size = [1, 8, 8] # batch size: [cell,x,y]\n",
+    "fit_range = [0, 0] # range of a histogram considered for fitting in ADU. Dynamically evaluated in case [0,0]\n",
+    "n_peaks_fit = 4 # Number of gaussian peaks to fit including noise peak\n",
+    "fix_peaks = False # Fix distance between photon peaks\n",
+    "do_minos = False # This is additional feature of minuit to evaluate errors. \n",
+    "\n",
+    "# Detector conditions\n",
+    "max_cells = 0 # number of memory cells used, set to 0 to automatically infer\n",
+    "bias_voltage = 0  # Bias voltage\n",
+    "acq_rate = 0. # the detector acquisition rate, use 0 to try to auto-determine\n",
+    "gain_setting = 0.1 # the gain setting, use 0.1 to try to auto-determine\n",
+    "photon_energy = 8.05 # photon energy in keV"
    ]
   },
   {
@@ -65,147 +75,46 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "# std library imports\n",
-    "from datetime import datetime\n",
-    "import dateutil\n",
-    "from functools import partial\n",
-    "import warnings\n",
-    "warnings.filterwarnings('ignore')\n",
+    "import glob\n",
+    "from multiprocessing import Pool\n",
     "import os\n",
+    "import traceback\n",
+    "import warnings\n",
     "\n",
     "import h5py\n",
-    "# numpy and matplot lib specific\n",
-    "import numpy as np\n",
-    "import matplotlib\n",
-    "matplotlib.use(\"Agg\")\n",
+    "from iminuit import Minuit\n",
     "import matplotlib.pyplot as plt\n",
-    "%matplotlib inline\n",
-    "\n",
-    "# parallel processing via ipcluster\n",
-    "# make sure a cluster is running with ipcluster start --n=32, give it a while to start\n",
-    "from ipyparallel import Client\n",
-    "\n",
-    "client = Client(profile=cluster_profile)\n",
-    "view = client[:]\n",
-    "view.use_dill()\n",
-    "\n",
-    "# pyDetLib imports\n",
-    "import XFELDetAna.xfelpycaltools as xcal\n",
+    "import numpy as np\n",
+    "import sharedmem\n",
+    "from XFELDetAna.plotting.heatmap import heatmapPlot\n",
+    "from XFELDetAna.plotting.simpleplot import simplePlot\n",
     "import XFELDetAna.xfelpyanatools as xana\n",
-    "from XFELDetAna.util import env\n",
-    "env.iprofile = cluster_profile\n",
-    "profile = cluster_profile\n",
     "\n",
-    "from iCalibrationDB import ConstantMetaData, Constants, Conditions, Detectors, Versions\n",
-    "from cal_tools.agipdlib import get_num_cells, get_acq_rate, get_gain_setting\n",
-    "from cal_tools.enums import BadPixels\n",
-    "from cal_tools.influx import InfluxLogger\n",
-    "from cal_tools.plotting import show_overview, plot_badpix_3d\n",
-    "from cal_tools.tools import gain_map_files, parse_runs, run_prop_seq_from_path, get_notebook_name, get_dir_creation_date, get_random_db_interface\n"
+    "from cal_tools.ana_tools import save_dict_to_hdf5, get_range\n",
+    "from cal_tools.agipdlib import get_bias_voltage\n",
+    "from cal_tools.agipdutils_ff import (\n",
+    "    any_in, gaussian, gaussian_sum, get_mask,\n",
+    "    get_starting_parameters, set_par_limits, fit_n_peaks\n",
+    ")\n",
+    "from cal_tools.enums import BadPixelsFF\n",
+    "\n",
+    "# %load_ext autotime\n",
+    "%matplotlib inline\n",
+    "warnings.filterwarnings('ignore')"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2019-09-13T07:34:24.830847Z",
-     "start_time": "2019-09-13T07:34:24.745094Z"
-    }
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
-    "# usually no need to change these lines\n",
-    "sensor_size = [128, 512]\n",
-    "block_size = [128, 512]\n",
-    "QUADRANTS = 4\n",
-    "MODULES_PER_QUAD = 4\n",
-    "DET_FILE_INSET = \"AGIPD\"\n",
-    "\n",
-    "# Define constant creation time.\n",
-    "if creation_time:\n",
-    "    try:\n",
-    "        creation_time = datetime.strptime(creation_time, '%Y-%m-%d %H:%M:%S.%f')\n",
-    "    except Exception as e:\n",
-    "        print(f\"creation_time value error: {e}.\" \n",
-    "               \"Use same format as YYYY-MM-DD HR:MN:SC.ms e.g. 2019-07-04 11:02:41.00/n\")\n",
-    "        creation_time = None\n",
-    "        print(\"Given creation time wont be used.\")\n",
-    "else:\n",
-    "    creation_time = None\n",
-    "\n",
-    "if not creation_time and use_dir_creation_date:\n",
-    "    ntries = 100\n",
-    "    while ntries > 0:\n",
-    "        try:\n",
-    "            creation_time = get_dir_creation_date(in_folder, runs[0])\n",
-    "            break\n",
-    "        except OSError:\n",
-    "            pass\n",
-    "        ntries -= 1\n",
-    "    \n",
-    "print(\"Using {} as creation time\".format(creation_time))\n",
-    "    \n",
-    "runs = parse_runs(runs)\n",
-    "\n",
-    "if offset_store != \"\":\n",
-    "    db_input = False\n",
-    "else:\n",
-    "    db_input = True \n",
-    "\n",
-    "limit_trains = 20\n",
-    "limit_trains_eval = None\n",
-    "\n",
-    "print(\"Parameters are:\")\n",
-    "\n",
-    "print(\"Runs: {}\".format(runs))\n",
-    "print(\"Modules: {}\".format(modules))\n",
-    "print(\"Sequences: {}\".format(sequences))\n",
-    "print(\"Using DB: {}\".format(db_output))\n",
-    "\n",
-    "\n",
-    "if instrument == \"SPB\":\n",
-    "    loc = \"SPB_DET_AGIPD1M-1\"\n",
-    "    dinstance = \"AGIPD1M1\"\n",
-    "    karabo_id_control = \"SPB_IRU_AGIPD1M1\"\n",
-    "else:\n",
-    "    loc = \"MID_DET_AGIPD1M-1\"\n",
-    "    dinstance = \"AGIPD1M2\"\n",
-    "    karabo_id_control = \"MID_EXP_AGIPD1M1\"\n",
-    "\n",
-    "cal_db_interface = get_random_db_interface(cal_db_interface)\n",
-    "\n",
-    "# these lines can usually stay as is\n",
-    "fbase = \"{}/{{}}/RAW-{{}}-AGIPD{{:02d}}-S{{:05d}}.h5\".format(in_folder)\n",
-    "gains = np.arange(3)\n",
-    "\n",
-    "\n",
-    "run, prop, seq = run_prop_seq_from_path(in_folder)\n",
-    "logger = InfluxLogger(detector=\"AGIPD\", instrument=instrument, mem_cells=mem_cells,\n",
-    "                      notebook=get_notebook_name(), proposal=prop)\n",
-    "\n",
-    "# extract the memory cells and acquisition rate from \n",
-    "# the file of the first module, first sequence and first run\n",
-    "channel = 0\n",
-    "fname = fbase.format(runs[0], runs[0].upper(), channel, sequences[0])\n",
-    "if acqrate == 0.:\n",
-    "    acqrate = get_acq_rate((fname, loc, channel))\n",
-    "    \n",
-    "\n",
-    "if mem_cells == 0:\n",
-    "    cells = get_num_cells(fname, loc, channel)\n",
-    "    mem_cells = cells  # avoid setting twice\n",
-    "    \n",
-    "\n",
-    "IL_MODE = interlaced\n",
-    "max_cells = mem_cells if not interlaced else mem_cells*2\n",
-    "memory_cells = mem_cells\n",
-    "print(\"Interlaced mode: {}\".format(IL_MODE))\n",
-    "\n",
-    "cells = np.arange(max_cells)\n",
-    "\n",
-    "print(f\"Acquisition rate and memory cells set from file {fname} are \" \n",
-    "      f\"{acqrate} MHz and {max_cells}, respectively\")"
+    "peak_range = np.reshape(peak_range,(4,2))\n",
+    "peak_width_range = np.reshape(peak_width_range,(4,2))\n",
+    "peak_width_lim = np.reshape(peak_width_lim,(2,2))\n",
+    "peak_norm_range = [None if x == -1 else x for x in peak_norm_range]\n",
+    "peak_norm_range = np.reshape(peak_norm_range,(4,2))\n",
+    "module = modules[0]"
    ]
   },
   {
@@ -214,912 +123,371 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "control_fname = f'{in_folder}/{runs[0]}/RAW-{runs[0].upper()}-{karabo_da_control}-S00000.h5'\n",
-    "\n",
-    "if \"{\" in h5path_ctrl:\n",
-    "    h5path_ctrl = h5path_ctrl.format(karabo_id_control)\n",
-    "\n",
-    "if gain_setting == 0.1:\n",
-    "    if creation_time.replace(tzinfo=None) < dateutil.parser.parse('2020-01-31'):\n",
-    "        print(\"Set gain-setting to None for runs taken before 2020-01-31\")\n",
-    "        gain_setting = None\n",
-    "    else:\n",
-    "        try:\n",
-    "            gain_setting = get_gain_setting(control_fname, h5path_ctrl)\n",
-    "        except Exception as e:\n",
-    "            print(f'Error while reading gain setting from: \\n{control_fname}')\n",
-    "            print(e)\n",
-    "            print(\"Gain setting is not found in the control information\")\n",
-    "            print(\"Data will not be processed\")\n",
-    "            sequences = []\n",
-    "print(f\"Gain setting: {gain_setting}\")"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "For the characterization offset maps for each module are needed. The are read in either locally or through the XFEL calibration database."
+    "if bias_voltage == 0:\n",
+    "    # Read the bias voltage from files, if recorded.\n",
+    "    # If not available, make use of the historical voltage the detector is running at\n",
+    "    control_filename = f'{image_data_path}/r{run:04d}/RAW-R{run:04d}-{karabo_da_control}-S00000.h5'\n",
+    "    bias_voltage = get_bias_voltage(control_filename, karabo_id_control)\n",
+    "    bias_voltage = bias_voltage if bias_voltage is not None else 300\n",
+    "print(f\"Bias voltage: {bias_voltage}V\")"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2019-09-13T07:34:45.876476Z",
-     "start_time": "2019-09-13T07:34:25.656979Z"
-    }
-   },
-   "outputs": [],
-   "source": [
-    "from dateutil import parser\n",
-    "offset_g = {}\n",
-    "noise_g = {}\n",
-    "thresholds_g = {}\n",
-    "pc_g = {}\n",
-    "if not db_input:\n",
-    "    print(\"Offset, noise and thresholds have been read in from: {}\".format(offset_store))\n",
-    "    store_file = h5py.File(offset_store, \"r\")\n",
-    "    for i in modules:\n",
-    "        qm = \"Q{}M{}\".format(i//4+1, i%4+1)\n",
-    "        offset_g[qm] = np.array(store_file[\"{}/Offset/0/data\".format(qm)])\n",
-    "        noise_g[qm] = np.array(store_file[\"{}/Noise/0/data\".format(qm)])\n",
-    "        thresholds_g[qm] = np.array(store_file[\"{}/Threshold/0/data\".format(qm)])\n",
-    "    store_file.close()\n",
-    "else:\n",
-    "    print(\"Offset, noise and thresholds have been read in from calibration database:\")\n",
-    "    first_ex = True\n",
-    "    for i in modules:\n",
-    "        qm = \"Q{}M{}\".format(i//4+1, i%4+1)\n",
-    "        metadata = ConstantMetaData()\n",
-    "        offset = Constants.AGIPD.Offset()\n",
-    "        metadata.calibration_constant = offset\n",
-    "        det = getattr(Detectors, dinstance)\n",
-    "\n",
-    "        # set the operating condition\n",
-    "        condition = Conditions.Dark.AGIPD(memory_cells=max_cells, bias_voltage=bias_voltage,\n",
-    "                                          acquisition_rate=acqrate, gain_setting=gain_setting)\n",
-    "        metadata.detector_condition = condition\n",
-    "\n",
-    "        # specify the a version for this constant\n",
-    "        if creation_time is None:\n",
-    "            metadata.calibration_constant_version = Versions.Now(device=getattr(det, qm))\n",
-    "        else:\n",
-    "            metadata.calibration_constant_version = Versions.Timespan(device=getattr(det, qm), start=creation_time)\n",
-    "        metadata.retrieve(cal_db_interface, when=creation_time.isoformat(), timeout=300000)\n",
-    "        offset_g[qm] = offset.data\n",
-    "        if first_ex:\n",
-    "            print(\"Offset map was injected on: {}\".format(metadata.calibration_constant_version.begin_at))\n",
-    "        \n",
-    "        metadata = ConstantMetaData()\n",
-    "        noise = Constants.AGIPD.Noise()\n",
-    "        metadata.calibration_constant = noise\n",
-    "\n",
-    "        # set the operating condition\n",
-    "        condition = Conditions.Dark.AGIPD(memory_cells=max_cells, bias_voltage=bias_voltage,\n",
-    "                                          acquisition_rate=acqrate, gain_setting=gain_setting)\n",
-    "        metadata.detector_condition = condition\n",
-    "\n",
-    "        # specify the a version for this constant\n",
-    "        if creation_time is None:\n",
-    "            metadata.calibration_constant_version = Versions.Now(device=getattr(det, qm))\n",
-    "        else:\n",
-    "            metadata.calibration_constant_version = Versions.Timespan(device=getattr(det, qm), start=creation_time)\n",
-    "        metadata.retrieve(cal_db_interface, when=creation_time.isoformat(), timeout=300000)\n",
-    "        noise_g[qm] = noise.data\n",
-    "        if first_ex:\n",
-    "            print(\"Noise map was injected on: {}\".format(metadata.calibration_constant_version.begin_at))\n",
-    "        \n",
-    "        metadata = ConstantMetaData()\n",
-    "        thresholds = Constants.AGIPD.ThresholdsDark()\n",
-    "        metadata.calibration_constant = thresholds\n",
-    "\n",
-    "        # set the operating condition\n",
-    "        condition = Conditions.Dark.AGIPD(memory_cells=max_cells, bias_voltage=bias_voltage,\n",
-    "                                          acquisition_rate=acqrate, gain_setting=gain_setting)\n",
-    "        metadata.detector_condition = condition\n",
-    "\n",
-    "        # specify the a version for this constant\n",
-    "        if creation_time is None:\n",
-    "            metadata.calibration_constant_version = Versions.Now(device=getattr(det, qm))\n",
-    "        else:\n",
-    "            metadata.calibration_constant_version = Versions.Timespan(device=getattr(det, qm), start=creation_time)\n",
-    "        metadata.retrieve(cal_db_interface, when=creation_time.isoformat(), timeout=300000)\n",
-    "        thresholds_g[qm] = thresholds.data\n",
-    "        if first_ex:\n",
-    "            print(\"Threshold map was injected on: {}\".format(metadata.calibration_constant_version.begin_at))\n",
-    "            \n",
-    "            \n",
-    "        metadata = ConstantMetaData()\n",
-    "        pc = Constants.AGIPD.SlopesPC()\n",
-    "        metadata.calibration_constant = pc\n",
-    "\n",
-    "        # set the operating condition\n",
-    "        condition = Conditions.Dark.AGIPD(memory_cells=max_cells, bias_voltage=bias_voltage,\n",
-    "                                          acquisition_rate=acqrate, gain_setting=gain_setting)\n",
-    "        metadata.detector_condition = condition\n",
-    "\n",
-    "        # specify the a version for this constant\n",
-    "        if creation_time is None:\n",
-    "            metadata.calibration_constant_version = Versions.Now(device=getattr(det, qm))\n",
-    "        else:\n",
-    "            metadata.calibration_constant_version = Versions.Timespan(device=getattr(det, qm), start=creation_time)\n",
-    "        metadata.retrieve(cal_db_interface, when=creation_time.isoformat(), timeout=300000)\n",
-    "        pc_g[qm] = np.nanmedian(pc.data[0,...])/pc.data\n",
-    "        if first_ex:\n",
-    "            print(\"PC map was injected on: {}\".format(metadata.calibration_constant_version.begin_at))\n",
-    "            first_ex = False\n",
-    "            "
-   ]
-  },
-  {
-   "cell_type": "markdown",
    "metadata": {},
+   "outputs": [],
    "source": [
-    "## Initial peak estimates ##\n",
-    "\n",
-    "First initial peak estimates need to be defined. This is done by inspecting histograms created from (a subset of) the offset-corrected data for peak locations and peak heights."
+    "def idx_gen(batch_start, batch_size):\n",
+    "    \"\"\"\n",
+    "    This generator iterate across pixels and memory cells starting\n",
+    "    from batch_start until batch_start+batch_size\n",
+    "    \"\"\"\n",
+    "    for c_idx in range(batch_start[0], batch_start[0]+batch_size[0]):\n",
+    "        for x_idx in range(batch_start[1], batch_start[1]+batch_size[1]):\n",
+    "            for y_idx in range(batch_start[2], batch_start[2]+batch_size[2]):\n",
+    "                yield(c_idx, x_idx, y_idx)"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2019-09-13T07:51:37.815384Z",
-     "start_time": "2019-09-13T07:34:45.879121Z"
-    }
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
-    "def hist_single_module(fbase, runs, sequences, sensor_size, memory_cells, block_size,\n",
-    "                       il_mode, limit_trains, profile, rawversion, instrument, inp):\n",
-    "    \"\"\" This function calculates a per-pixel histogram for a single module\n",
-    "    \n",
-    "    Runs and sequences give the data to calculate histogram from\n",
-    "    \"\"\"\n",
-    "    channel, offset, thresholds, pc, noise = inp\n",
-    "    \n",
-    "    import XFELDetAna.xfelpycaltools as xcal\n",
-    "    import numpy as np\n",
-    "    import h5py\n",
-    "    from XFELDetAna.util import env\n",
-    "    \n",
-    "    \n",
-    "    env.iprofile = profile\n",
-    "    \n",
-    "    def baseline_correct_via_noise(d, noise, g, baseline_corr_noise_threshold=1000):\n",
-    "        \"\"\" Correct baseline shifts by evaluating position of the noise peak\n",
-    "\n",
-    "        :param: d: the data to correct, should be a single image\n",
-    "        :param noise: the mean noise for the cell id of `d`\n",
-    "        :param g: gain array matching `d` array\n",
-    "\n",
-    "        Correction is done by identifying the left-most (significant) peak\n",
-    "        in the histogram of `d` and shifting it to be centered on 0.\n",
-    "        This is done via a continous wavelet transform (CWT), using a Ricker\n",
-    "        wavelet.\n",
-    "\n",
-    "        Only high gain data is evaulated, and data larger than 50 ADU,\n",
-    "        equivalent of roughly a 9 keV photon is ignored.\n",
-    "\n",
-    "        Two passes are executed: the first shift is accurate to 10 ADU and\n",
-    "        will catch baseline shifts smaller then -2000 ADU. CWT is evaluated\n",
-    "        for peaks of widths one, three and five time the noise.\n",
-    "        The baseline is then shifted by the position of the summed CWT maximum.\n",
-    "\n",
-    "        In a second pass histogram is evaluated within a range of\n",
-    "        +- 5*noise of the initial shift value, for peak of width noise.\n",
-    "\n",
-    "        Baseline shifts larger than the maximum threshold or positive shifts\n",
-    "        larger 10 are discarded, and the original data is returned, otherwise\n",
-    "        the shift corrected data is returned.\n",
-    "\n",
-    "        \"\"\"\n",
-    "        import copy\n",
-    "        from scipy.signal import cwt, ricker\n",
-    "        # we work on a copy to be able to filter values by setting them to\n",
-    "        # np.nan\n",
-    "        dd = copy.copy(d)\n",
-    "        dd[g != 0] = np.nan  # only high gain data\n",
-    "        dd[dd > 50] = np.nan  # only noise data\n",
-    "        h, e = np.histogram(dd, bins=210, range=(-2000, 100))\n",
-    "        c = (e[1:] + e[:-1]) / 2\n",
-    "\n",
-    "        try:\n",
-    "            cwtmatr = cwt(h, ricker, [noise, 3. * noise, 5. * noise])\n",
-    "        except:\n",
-    "            return d\n",
-    "        cwtall = np.sum(cwtmatr, axis=0)\n",
-    "        marg = np.argmax(cwtall)\n",
-    "        pc = c[marg]\n",
-    "\n",
-    "        high_res_range = (dd > pc - 5 * noise) & (dd < pc + 5 * noise)\n",
-    "        dd[~high_res_range] = np.nan\n",
-    "        h, e = np.histogram(dd, bins=200,\n",
-    "                            range=(pc - 5 * noise, pc + 5 * noise))\n",
-    "        c = (e[1:] + e[:-1]) / 2\n",
-    "        try:\n",
-    "            cwtmatr = cwt(h, ricker, [noise, ])\n",
-    "        except:\n",
-    "            return d\n",
-    "        marg = np.argmax(cwtmatr)\n",
-    "        pc = c[marg]\n",
-    "        # too large shift to be easily decernable via noise\n",
-    "        if pc > 10 or pc < -baseline_corr_noise_threshold:\n",
-    "            return d\n",
-    "        return d - pc\n",
+    "n_pixels_x = pixel_range[2]-pixel_range[0]\n",
+    "n_pixels_y = pixel_range[3]-pixel_range[1]\n",
     "\n",
+    "hist_data = {}\n",
+    "with h5py.File(f\"{in_folder}/{hist_file_template.format(module)}\", 'r') as hf:\n",
+    "    hist_data['cellId'] = np.array(hf['cellId'][()])\n",
+    "    hist_data['hRange'] = np.array(hf['hRange'][()])\n",
+    "    hist_data['nBins'] = np.array(hf['nBins'][()])\n",
     "    \n",
-    "    # function needs to be inline for parallell processing\n",
-    "    def read_fun(filename, channel):\n",
-    "        \"\"\" A reader function used by pyDetLib\n",
-    "        \"\"\"\n",
-    "        infile = h5py.File(filename, \"r\", driver=\"core\")\n",
-    "        if rawversion == 2:\n",
-    "            count = np.squeeze(infile[\"/INDEX/{}_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/count\".format(instrument, channel)])\n",
-    "            first = np.squeeze(infile[\"/INDEX/{}_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/first\".format(instrument, channel)])\n",
-    "            last_index = int(first[count != 0][-1]+count[count != 0][-1])\n",
-    "            first_index = int(first[count != 0][0])\n",
-    "        else:\n",
-    "            status = np.squeeze(infile[\"/INDEX/{}_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/status\".format(instrument, channel)])\n",
-    "            if np.count_nonzero(status != 0) == 0:\n",
-    "                return\n",
-    "            last = np.squeeze(infile[\"/INDEX/{}_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/last\".format(instrument, channel)])\n",
-    "            last_index = int(last[status != 0][-1])\n",
-    "            first_index = int(last[status != 0][0])\n",
-    "        if limit_trains is not None:\n",
-    "            last_index = min(limit_trains*memory_cells+first_index, last_index)\n",
-    "        \n",
-    "        im = np.array(infile[\"/INSTRUMENT/{}_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/data\".format(instrument, channel)][first_index:last_index,...])    \n",
-    "        carr = infile[\"/INSTRUMENT/{}_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/cellId\".format(instrument, channel)][first_index:last_index]\n",
-    "        cells = np.squeeze(np.array(carr))\n",
-    "        infile.close()\n",
+    "    if cell_range == [0,0]:\n",
+    "        cell_range[1] = hist_data['cellId'].shape[0]\n",
     "        \n",
-    "        if il_mode:\n",
-    "            ga = im[1::2, 0, ...]\n",
-    "            im = im[0::2, 0, ...].astype(np.float32)\n",
-    "        else:\n",
-    "            ga = im[:, 1, ...]\n",
-    "            im = im[:, 0, ...].astype(np.float32)\n",
-    "\n",
-    "        im = np.rollaxis(im, 2)\n",
-    "        im = np.rollaxis(im, 2, 1)\n",
-    "\n",
-    "        ga = np.rollaxis(ga, 2)\n",
-    "        ga = np.rollaxis(ga, 2, 1)\n",
-    "        return im, ga, cells\n",
-    "    \n",
-    "    offset_cor = xcal.OffsetCorrection(sensor_size,\n",
-    "                                       offset,\n",
-    "                                       nCells=memory_cells,\n",
-    "                                       blockSize=block_size,\n",
-    "                                       gains=[0,1,2])\n",
-    "    offset_cor.mapper = offset_cor._view.map_sync\n",
-    "    offset_cor.debug() # force non-parallel processing since outer function will run concurrently\n",
-    "    hist_calc = xcal.HistogramCalculator(sensor_size,\n",
-    "                                         bins=4000,\n",
-    "                                         range=(-4000, 8000),\n",
-    "                                         blockSize=block_size)\n",
-    "    hist_calc.mapper = hist_calc._view.map_sync\n",
-    "    hist_calc.debug() # force non-parallel processing since outer function will run concurrently\n",
-    "    for run in runs:\n",
-    "        for seq in sequences:\n",
-    "            fname = fbase.format(run, run.upper(), channel, seq)\n",
-    "            d, ga, c = read_fun(fname, channel)\n",
-    "            vidx = (c < offset.shape[2]) & (c < thresholds.shape[2])\n",
-    "            c = c[vidx]\n",
-    "            d = d[:,:,vidx]\n",
-    "            ga = ga[:,:,vidx]\n",
-    "            # we need to do proper gain thresholding\n",
-    "            g = np.zeros(ga.shape, np.uint8)\n",
-    "            g[...] = 2\n",
-    "            \n",
-    "            g[ga < thresholds[...,c,1]] = 1\n",
-    "            g[ga < thresholds[...,c,0]] = 0\n",
-    "            d = offset_cor.correct(d, cellTable=c, gainMap=g)\n",
-    "            for i in range(d.shape[2]):\n",
-    "                mn_noise = np.nanmean(noise[...,c[i]])\n",
-    "                d[...,i] = baseline_correct_via_noise(d[...,i],\n",
-    "                                                      mn_noise,\n",
-    "                                                      g[..., i])\n",
-    "            \n",
-    "            d *= np.moveaxis(pc[c,...], 0, 2)\n",
-    "            \n",
-    "            hist_calc.fill(d)\n",
-    "    h, e, c, _ = hist_calc.get()\n",
-    "    return h, e, c\n",
-    "\n",
-    "inp = []\n",
-    "for i in modules:\n",
-    "    qm = \"Q{}M{}\".format(i//4+1, i%4+1)\n",
-    "    inp.append((i, offset_g[qm], thresholds_g[qm], pc_g[qm][0,...], noise_g[qm][...,0]))\n",
-    "    \n",
-    "p = partial(hist_single_module, fbase, runs, sequences,\n",
-    "            sensor_size, memory_cells, block_size, IL_MODE, limit_trains, profile, rawversion, instrument)\n",
-    "res_uncorr = list(map(p, inp))\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2019-09-13T07:51:38.024035Z",
-     "start_time": "2019-09-13T07:51:37.818276Z"
-    },
-    "scrolled": false
-   },
-   "outputs": [],
-   "source": [
-    "d = []\n",
-    "qms = []\n",
-    "for i, r in enumerate(res_uncorr):\n",
-    "    ii = list(modules)[i]\n",
-    "    qm = \"Q{}M{}\".format(ii//4+1, ii%4+1)\n",
-    "    qms.append(qm)\n",
-    "    h, e, c = r \n",
-    "    d.append({\n",
-    "            'x': c, \n",
-    "            'y': h,\n",
-    "            'drawstyle': 'steps-mid'\n",
-    "        })\n",
+    "    if max_bins == 0:\n",
+    "        max_bins = hist_data['nBins']\n",
     "    \n",
-    "fig = xana.simplePlot(d, y_log=False,\n",
-    "                      figsize=\"2col\",\n",
-    "                      aspect=2,\n",
-    "                      x_range=(-50, 500),\n",
-    "                      x_label=\"Intensity (ADU)\",\n",
-    "                      y_label=\"Counts\")\n",
-    "\n",
-    "fig.savefig(\"{}/FF_module_{}_peak_pos.png\".format(out_folder, \",\".join(qms)))"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2019-09-13T07:51:38.029704Z",
-     "start_time": "2019-09-13T07:51:38.026111Z"
-    }
-   },
-   "outputs": [],
-   "source": [
-    "# these should be quite stable\n",
+    "    hist_data['cellId'] = hist_data['cellId'][cell_range[0]:cell_range[1]]\n",
+    "    hist_data['hist'] = np.array(hf['hist'][cell_range[0]:cell_range[1], :max_bins, :])\n",
     "\n",
-    "peak_estimates = [0, 55, 105, 155]\n",
-    "peak_heights = [5e7, 5e6, 1e6, 5e5]\n",
-    "peak_sigma = [5., 5.,5., 5.,]\n",
+    "n_cells = cell_range[1]-cell_range[0]\n",
+    "hist_data['hist'] = hist_data['hist'].reshape(n_cells, max_bins, 512, 128)\n",
+    "hist_data['hist'] = hist_data['hist'][:,:,pixel_range[0]:pixel_range[2],pixel_range[1]:pixel_range[3]]\n",
     "\n",
-    "print(\"Using the following peak estimates: {}\".format(peak_estimates))"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {
-    "collapsed": true
-   },
-   "source": [
-    "## Calculate relative gain per module ##\n",
+    "print(f'Data shape {hist_data[\"hist\"].shape}')\n",
+    "    \n",
+    "bin_edges = np.linspace(hist_data['hRange'][0], hist_data['hRange'][1], int(hist_data['nBins']+1))\n",
+    "x = (bin_edges[1:] + bin_edges[:-1])[:max_bins] * 0.5\n",
+    "   \n",
     "\n",
-    "Using the so obtained estimates, we calculate the relative gain per module. For this we use the weighted average method implemented in pyDetLib."
+    "batches = []\n",
+    "for c_idx in range(0, n_cells, batch_size[0]):\n",
+    "    for x_idx in range(0, n_pixels_x, batch_size[1]):\n",
+    "        for y_idx in range(0, n_pixels_y, batch_size[2]):\n",
+    "            batches.append([c_idx,x_idx,y_idx])\n",
+    "        \n",
+    "print(f'Number of batches {len(batches)}')"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
    "metadata": {
-    "ExecuteTime": {
-     "end_time": "2019-09-13T08:16:50.652492Z",
-     "start_time": "2019-09-13T07:51:38.031787Z"
-    }
+    "scrolled": true
    },
    "outputs": [],
    "source": [
-    "block_size = [64, 64]\n",
-    "def relgain_single_module(fbase, runs, sequences, peak_estimates,\n",
-    "                          peak_heights, peak_sigma, memory_cells, sensor_size,\n",
-    "                          block_size, il_mode, profile, limit_trains_eval, rawversion, instrument, inp):\n",
-    "    \"\"\" A function for calculated the relative gain of a single AGIPD module\n",
-    "    \"\"\"\n",
-    "    \n",
-    "    # import needed inline for parallel processing\n",
-    "    import XFELDetAna.xfelpycaltools as xcal\n",
-    "    import numpy as np\n",
-    "    import h5py\n",
-    "    from XFELDetAna.util import env\n",
-    "    env.iprofile = profile\n",
-    "    \n",
-    "    channel, offset, thresholds, noise, pc = inp\n",
-    "    \n",
-    "    def baseline_correct_via_noise(d, noise, g, baseline_corr_noise_threshold=1000):\n",
-    "        \"\"\" Correct baseline shifts by evaluating position of the noise peak\n",
-    "\n",
-    "        :param: d: the data to correct, should be a single image\n",
-    "        :param noise: the mean noise for the cell id of `d`\n",
-    "        :param g: gain array matching `d` array\n",
-    "\n",
-    "        Correction is done by identifying the left-most (significant) peak\n",
-    "        in the histogram of `d` and shifting it to be centered on 0.\n",
-    "        This is done via a continous wavelet transform (CWT), using a Ricker\n",
-    "        wavelet.\n",
-    "\n",
-    "        Only high gain data is evaulated, and data larger than 50 ADU,\n",
-    "        equivalent of roughly a 9 keV photon is ignored.\n",
-    "\n",
-    "        Two passes are executed: the first shift is accurate to 10 ADU and\n",
-    "        will catch baseline shifts smaller then -2000 ADU. CWT is evaluated\n",
-    "        for peaks of widths one, three and five time the noise.\n",
-    "        The baseline is then shifted by the position of the summed CWT maximum.\n",
-    "\n",
-    "        In a second pass histogram is evaluated within a range of\n",
-    "        +- 5*noise of the initial shift value, for peak of width noise.\n",
-    "\n",
-    "        Baseline shifts larger than the maximum threshold or positive shifts\n",
-    "        larger 10 are discarded, and the original data is returned, otherwise\n",
-    "        the shift corrected data is returned.\n",
-    "\n",
-    "        \"\"\"\n",
-    "        import copy\n",
-    "        from scipy.signal import cwt, ricker\n",
-    "        # we work on a copy to be able to filter values by setting them to\n",
-    "        # np.nan\n",
-    "        dd = copy.copy(d)\n",
-    "        dd[g != 0] = np.nan  # only high gain data\n",
-    "        dd[dd > 50] = np.nan  # only noise data\n",
-    "        h, e = np.histogram(dd, bins=210, range=(-2000, 100))\n",
-    "        c = (e[1:] + e[:-1]) / 2\n",
-    "\n",
-    "        try:\n",
-    "            cwtmatr = cwt(h, ricker, [noise, 3. * noise, 5. * noise])\n",
-    "        except:\n",
-    "            return d\n",
-    "        cwtall = np.sum(cwtmatr, axis=0)\n",
-    "        marg = np.argmax(cwtall)\n",
-    "        pc = c[marg]\n",
-    "\n",
-    "        high_res_range = (dd > pc - 5 * noise) & (dd < pc + 5 * noise)\n",
-    "        dd[~high_res_range] = np.nan\n",
-    "        h, e = np.histogram(dd, bins=200,\n",
-    "                            range=(pc - 5 * noise, pc + 5 * noise))\n",
-    "        c = (e[1:] + e[:-1]) / 2\n",
+    "def fit_batch(batch_start):\n",
+    "    current_result = {}\n",
+    "    prev = None\n",
+    "    for c_idx, x_idx, y_idx in idx_gen(batch_start, batch_size):\n",
     "        try:\n",
-    "            cwtmatr = cwt(h, ricker, [noise, ])\n",
-    "        except:\n",
-    "            return d\n",
-    "        marg = np.argmax(cwtmatr)\n",
-    "        pc = c[marg]\n",
-    "        # too large shift to be easily decernable via noise\n",
-    "        if pc > 10 or pc < -baseline_corr_noise_threshold:\n",
-    "            return d\n",
-    "        return d - pc\n",
-    "\n",
-    "    \n",
-    "    # function needs to be inline for parallell processing\n",
-    "    def read_fun(filename, channel):\n",
-    "        \"\"\" A reader function used by pyDetLib\n",
-    "        \"\"\"\n",
-    "        infile = h5py.File(filename, \"r\", driver=\"core\")\n",
-    "        if rawversion == 2:\n",
-    "            count = np.squeeze(infile[\"/INDEX/{}_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/count\".format(instrument, channel)])\n",
-    "            first = np.squeeze(infile[\"/INDEX/{}_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/first\".format(instrument, channel)])\n",
-    "            last_index = int(first[count != 0][-1]+count[count != 0][-1])\n",
-    "            first_index = int(first[count != 0][0])\n",
-    "        else:\n",
-    "            status = np.squeeze(infile[\"/INDEX/{}_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/status\".format(instrument, channel)])\n",
-    "            if np.count_nonzero(status != 0) == 0:\n",
-    "                return\n",
-    "            last = np.squeeze(infile[\"/INDEX/{}_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/last\".format(instrument, channel)])\n",
-    "            last_index = int(last[status != 0][-1])\n",
-    "            first_index = int(last[status != 0][0])\n",
-    "        if limit_trains is not None:\n",
-    "            last_index = min(limit_trains*memory_cells+first_index, last_index)\n",
-    "        im = np.array(infile[\"/INSTRUMENT/{}_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/data\".format(instrument, channel)][first_index:last_index,...])    \n",
-    "        carr = infile[\"/INSTRUMENT/{}_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/cellId\".format(instrument, channel)][first_index:last_index]\n",
-    "        cells = np.squeeze(np.array(carr))\n",
-    "        infile.close()\n",
+    "            y = hist_data['hist'][c_idx, :, x_idx, y_idx]\n",
+    "\n",
+    "            if prev is None:\n",
+    "                prev, _ = get_starting_parameters(x, y, peak_range, n_peaks=n_peaks_fit)\n",
+    "\n",
+    "            if fit_range == [0, 0]:\n",
+    "                frange = (prev[f'g0mean']-2*prev[f'g0sigma'],\n",
+    "                          prev[f'g{n_peaks_fit-1}mean'] + prev[f'g{n_peaks_fit-1}sigma'])\n",
+    "            else:\n",
+    "                frange = fit_range\n",
+    "\n",
+    "            set_par_limits(prev, peak_range, peak_norm_range,\n",
+    "                           peak_width_range, n_peaks_fit)\n",
+    "            minuit = fit_n_peaks(x, y, prev, frange,\n",
+    "                                 do_minos=do_minos, n_peaks=n_peaks_fit,\n",
+    "                                 fix_d01=fix_peaks)\n",
+    "\n",
+    "            ndof = np.rint(frange[1]-frange[0])-len(minuit.args)\n",
+    "            current_result['chi2_ndof'] = minuit.fval/ndof\n",
+    "            current_result.update(minuit.fitarg)\n",
+    "            current_result.update(minuit.get_fmin())\n",
+    "\n",
+    "            fit_result['chi2_ndof'][c_idx, x_idx, y_idx] = current_result['chi2_ndof']\n",
+    "            fit_result['g0mean'][c_idx, x_idx, y_idx] = current_result['g0mean']\n",
+    "            fit_result['g1mean'][c_idx, x_idx, y_idx] = current_result['g1mean']\n",
+    "            fit_result['g2mean'][c_idx, x_idx, y_idx] = current_result['g2mean']\n",
+    "            fit_result['g3mean'][c_idx, x_idx, y_idx] = current_result['g3mean']\n",
+    "\n",
+    "            for key in minuit.fitarg.keys():\n",
+    "                if key in fit_result:\n",
+    "                    fit_result[key][c_idx, x_idx, y_idx] = minuit.fitarg[key]\n",
+    "\n",
+    "            fit_result['mask'][c_idx, x_idx, y_idx] = get_mask(current_result,\n",
+    "                                                                    peak_lim,\n",
+    "                                                                    d0_lim, chi2_lim,\n",
+    "                                                                    peak_width_lim)\n",
+    "        except Exception as e:\n",
+    "            fit_result['mask'][c_idx, x_idx,\n",
+    "                                    y_idx] = BadPixelsFF.FIT_FAILED.value\n",
+    "            print(c_idx, x_idx, y_idx, e, traceback.format_exc())\n",
     "\n",
-    "        if il_mode:\n",
-    "            ga = im[1::2, 0, ...]\n",
-    "            im = im[0::2, 0, ...].astype(np.float32)\n",
+    "        if fit_result['mask'][c_idx, x_idx, y_idx] == 0:\n",
+    "            prev = minuit.fitarg\n",
     "        else:\n",
-    "            ga = im[:, 1, ...]\n",
-    "            im = im[:, 0, ...].astype(np.float32)\n",
-    "            \n",
-    "        im = np.rollaxis(im, 2)\n",
-    "        im = np.rollaxis(im, 2, 1)\n",
-    "\n",
-    "        ga = np.rollaxis(ga, 2)\n",
-    "        ga = np.rollaxis(ga, 2, 1)\n",
-    "        return im, ga, cells\n",
-    "    \n",
-    "    offset_cor = xcal.OffsetCorrection(sensor_size, offset, nCells=memory_cells,\n",
-    "                                       blockSize=block_size, gains=[0,1,2])\n",
-    "    offset_cor.mapper = offset_cor._view.map_sync\n",
-    "\n",
-    "    rel_gain = xcal.GainMapCalculator(sensor_size,\n",
-    "                                      peak_estimates,\n",
-    "                                      peak_sigma,\n",
-    "                                      nCells=memory_cells,\n",
-    "                                      peakHeights = peak_heights,\n",
-    "                                      noiseMap=noise,\n",
-    "                                      deviationType=\"relative\")\n",
-    "    rel_gain.mapper = rel_gain._view.map_sync\n",
-    "    for run in runs:\n",
-    "        for seq in sequences:\n",
-    "            fname = fbase.format(run, run.upper(), channel, seq)\n",
-    "            d, ga, c = read_fun(fname, channel)\n",
-    "            # we need to do proper gain thresholding\n",
-    "            vidx = (c < offset.shape[2]) & (c < thresholds.shape[2])\n",
-    "            c = c[vidx]\n",
-    "            d = d[:,:,vidx]\n",
-    "            ga = ga[:,:,vidx]\n",
-    "            # we need to do proper gain thresholding\n",
-    "            g = np.zeros(ga.shape, np.uint8)\n",
-    "            g[...] = 2\n",
-    "            \n",
-    "            g[ga < thresholds[...,c,1]] = 1\n",
-    "            g[ga < thresholds[...,c,0]] = 0\n",
-    "            d = offset_cor.correct(d, cellTable=c, gainMap=g)\n",
-    "            \n",
-    "            for i in range(d.shape[2]):\n",
-    "                mn_noise = np.nanmean(noise[...,c[i]])\n",
-    "                d[...,i] = baseline_correct_via_noise(d[...,i],\n",
-    "                                                      mn_noise,\n",
-    "                                                      g[..., i])\n",
-    "            \n",
-    "            d *= np.moveaxis(pc[c,...], 0, 2)\n",
-    "            rel_gain.fill(d, cellTable=c)\n",
-    "    \n",
-    "    gain_map = rel_gain.get()\n",
-    "    gain_map_func = rel_gain.getUsingFunc(inverse=False)\n",
-    "    \n",
-    "    pks, stds = rel_gain.getRawPeaks()\n",
-    "    return gain_map, pks, stds, gain_map_func\n",
-    "\n",
-    "inp = []\n",
-    "for i in modules:\n",
-    "    qm = \"Q{}M{}\".format(i//4+1, i%4+1)\n",
-    "    inp.append((i, offset_g[qm], thresholds_g[qm], noise_g[qm][...,0], pc_g[qm][0,...]))\n",
-    "start = datetime.now()\n",
-    "p = partial(relgain_single_module, fbase, runs, sequences,\n",
-    "            peak_estimates, peak_heights, peak_sigma, memory_cells,\n",
-    "            sensor_size, block_size, IL_MODE, profile, limit_trains_eval, rawversion, instrument)\n",
-    "res_gain = list(map(p, inp)) # don't run concurently as inner function are parelllized\n",
-    "duration = (datetime.now()-start).total_seconds()\n",
-    "logger.runtime_summary_entry(success=True, runtime=duration)\n",
-    "logger.send()"
+    "            prev = None"
    ]
   },
   {
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "Finally, we inspect the results, by plotting the number of entries, peak locations and resulting gain maps for each peak. In the course of doing so, we identify bad pixels by either having 0 entries for a peak, or having `nan` values for the peak location."
+    "# Single fit ##\n",
+    "\n",
+    "Left plot shows starting parameters for fitting. Right plot shows result of the fit. Errors are evaluated with minos."
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2019-09-13T08:16:54.261782Z",
-     "start_time": "2019-09-13T08:16:50.657594Z"
-    }
-   },
-   "outputs": [],
-   "source": [
-    "\n",
-    "gain_m = {}\n",
-    "flatsff = {}\n",
-    "gainoff_g = {}\n",
-    "entries_g = {}\n",
-    "peaks_g = {}\n",
-    "mask_g = {}\n",
-    "cell_to_preview = 4\n",
-    "masks_eval_peaks = [1, 2]\n",
-    "global_eval_peaks = [1]\n",
-    "global_meds = {}\n",
-    "\n",
-    "for i, r in enumerate(res_gain):\n",
-    "    ii = list(modules)[i]\n",
-    "    qm = \"Q{}M{}\".format(ii//4+1, ii%4+1)\n",
-    "    gain, pks, std, gfunc = r\n",
-    "    gains, errors, chisq, valid, max_dev, stats = gfunc\n",
-    "    _, entries, stds, sow = gain\n",
-    "    gain_db = np.zeros((gains.shape[0], gains.shape[1], memory_cells))\n",
-    "    gain_mdb = np.zeros((gains.shape[0], gains.shape[1], memory_cells))\n",
-    "    entries_db = np.zeros((gains.shape[0], gains.shape[1], memory_cells, 5))    \n",
-    "    gainoff_db = np.zeros((gains.shape[0], gains.shape[1], memory_cells))\n",
-    "    mask_db = np.zeros((gains.shape[0], gains.shape[1], memory_cells), np.uint32)\n",
-    "    \n",
-    "    gainoff_g[qm] = gainoff_db\n",
-    "    gain_m[qm] = gain_mdb\n",
-    "    entries_g[qm] = entries_db\n",
-    "    peaks_g[qm] = pks\n",
-    "    \n",
-    "    # create a mask for unregular pixels\n",
-    "    # first bit set if first peak has nan entries\n",
-    "    for pk in masks_eval_peaks:\n",
-    "        mask_db[~(np.isfinite(gain_mdb) & np.isfinite(gainoff_db))] |= BadPixels.FF_GAIN_EVAL_ERROR.value\n",
-    "        mask_db[(np.abs(1-gain_mdb/np.nanmedian(gain_mdb) > deviation_threshold) )] |= BadPixels.FF_GAIN_DEVIATION.value\n",
-    "    # second bit set if entries are 0 for first peak\n",
-    "    mask_db[entries[...,1] == 0] |= BadPixels.FF_NO_ENTRIES.value\n",
-    "    zero_oc = np.count_nonzero((entries > 0).astype(np.int), axis=3)\n",
-    "    mask_db[zero_oc <= len(peak_estimates)-2] |= BadPixels.FF_NO_ENTRIES.value \n",
-    "    # third bit set if entries of a given adc show significant noise\n",
-    "    stds = []\n",
-    "    for ii in range(8):\n",
-    "        for jj in range(8):\n",
-    "            stds.append(np.std(entries_db[ii*16:(ii+1)*16,jj*64+2:(jj+1)*64-2,:,1], axis=(0,1)))\n",
-    "    avg_stds = np.median(np.array(stds), axis=0)\n",
-    "    \n",
-    "    for ii in range(8):\n",
-    "        for jj in range(8):\n",
-    "            std = np.std(entries_db[ii*16:(ii+1)*16,jj*64+2:(jj+1)*64-2,:,1], axis=(0,1))\n",
-    "            if np.any(std > 5*avg_stds):\n",
-    "                mask_db[ii*16:(ii+1)*16,jj*64:(jj+1)*64,std > 5*avg_stds] |= BadPixels.FF_GAIN_DEVIATION\n",
-    "                \n",
-    "    \n",
-    "    mask_g[qm] = mask_db\n",
-    "    \n",
-    "    flat = np.zeros((gains.shape[0], gains.shape[1], memory_cells, 3))\n",
-    "    for j in range(2,len(peak_estimates)):\n",
-    "        flat[...,j-2] = np.mean(entries[...,j]/np.mean(entries[...,j]))\n",
-    "    flat = np.mean(flat, axis=3)\n",
-    "    #flat_db = np.zeros((gains.shape[0], gains.shape[1], memory_cells))\n",
-    "    #for j in range(2):\n",
-    "    #    flat_db[...,j::2] = flat\n",
-    "    flatsff[qm] = flat\n",
-    "    \n",
-    "    global_meds[qm] = np.nanmedian(pks[...,global_eval_peaks][np.max(mask_db, axis=2) != 0])"
-   ]
-  },
-  {
-   "cell_type": "markdown",
    "metadata": {},
+   "outputs": [],
    "source": [
-    "## Evaluated peak locations ##\n",
+    "hist = hist_data['hist'][1,:,1, 1]\n",
+    "prev, shapes = get_starting_parameters(x, hist, peak_range, n_peaks=n_peaks_fit)\n",
     "\n",
-    "The following plot shows the evaluated peak locations for each peak. Peak ids increase downward:\n"
+    "if fit_range == [0, 0]:\n",
+    "    frange = (prev[f'g0mean']-2*prev[f'g0sigma'],\n",
+    "              prev[f'g3mean'] + prev[f'g3sigma'])\n",
+    "else:\n",
+    "    frange = fit_range\n",
+    "\n",
+    "set_par_limits(prev, peak_range, peak_norm_range,\n",
+    "               peak_width_range, n_peaks=n_peaks_fit)\n",
+    "minuit = fit_n_peaks(x, hist, prev, frange,\n",
+    "                     do_minos=True, n_peaks=n_peaks_fit,\n",
+    "                     fix_d01=fix_peaks)\n",
+    "\n",
+    "\n",
+    "res = minuit.fitarg\n",
+    "err = minuit.errors\n",
+    "p = minuit.args\n",
+    "ya = np.arange(0,1e4)\n",
+    "y = gaussian_sum(x,n_peaks_fit, *p)\n",
+    "peak_colors = ['g', 'y', 'b', 'orange']\n",
+    "\n",
+    "d = [{'x': x,\n",
+    "      'y': hist.astype(np.float64),\n",
+    "      'y_err': np.sqrt(hist),\n",
+    "      'drawstyle': 'bars',\n",
+    "      'errorstyle': 'bars',\n",
+    "      'ecolor': 'navy',\n",
+    "      'errorcoarsing': 3,\n",
+    "      'label': 'X-ray Data'\n",
+    "     },\n",
+    "     {'x': x,\n",
+    "      'y': y,\n",
+    "      'y2': (hist-y)/np.sqrt(hist),\n",
+    "      'color': 'red',\n",
+    "      'drawstyle':'line',\n",
+    "      'drawstyle2': 'steps-mid',\n",
+    "      'label': 'Fit'\n",
+    "     },\n",
+    "    ]\n",
+    "\n",
+    "for i in range(n_peaks_fit):\n",
+    "    d.append({'x': x,\n",
+    "             'y': gaussian_sum(x, 1, res[f'g{i}n'], res[f'g{i}mean'], res[f'g{i}sigma']),\n",
+    "             'drawstyle':'line',\n",
+    "             'color': peak_colors[i],\n",
+    "             })\n",
+    "    d.append({'x': np.full_like(ya, res[f'g{i}mean']),\n",
+    "              'y': ya,\n",
+    "              'drawstyle': 'line',\n",
+    "              'linestyle': 'dashed',\n",
+    "              'color': peak_colors[i],\n",
+    "              'label': f'peak {i} = {res[f\"g{i}mean\"]:0.1f} $ \\pm $ {err[f\"g{i}mean\"]:0.2f} ADU' })\n",
+    "\n",
+    "fig = plt.figure(figsize=(16,7))\n",
+    "ax = fig.add_subplot(121)\n",
+    "for i, shape in enumerate(shapes):\n",
+    "    idx = shape[3]\n",
+    "    plt.errorbar(x[idx], hist[idx], np.sqrt(hist[idx]),\n",
+    "         marker='+', ls=''\n",
+    "         )\n",
+    "    yg = gaussian(x[idx], *shape[:3])\n",
+    "    l = f'Peak {i}: {shape[1]:0.1f} $ \\pm $ {shape[2]:0.2f} ADU'\n",
+    "    plt.plot(x[idx], yg, label=l)\n",
+    "plt.grid(True)\n",
+    "plt.xlabel(\"Signal [ADU]\")\n",
+    "plt.ylabel(\"Counts\")\n",
+    "plt.legend(ncol=2)\n",
+    "\n",
+    "\n",
+    "ax2 = fig.add_subplot(122)\n",
+    "fig2 = xana.simplePlot(d, \n",
+    "                       use_axis=ax2,\n",
+    "                       x_label='Signal [ADU]', \n",
+    "                       y_label='Counts',\n",
+    "                       secondpanel=True, y_log=False, \n",
+    "                       x_range=(frange[0], frange[1]), \n",
+    "                       y_range=(1., np.max(hist)*1.6), \n",
+    "                       legend='top-left-frame-ncol2')\n",
+    "\n",
+    "print (minuit.get_fmin())\n",
+    "minuit.print_matrix()\n",
+    "print(minuit.get_param_states())"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2019-09-13T08:16:55.562017Z",
-     "start_time": "2019-09-13T08:16:54.264843Z"
-    },
-    "scrolled": false
-   },
-   "outputs": [],
-   "source": [
-    "from mpl_toolkits.axes_grid1 import AxesGrid\n",
-    "cell_to_preview = 4\n",
-    "for qm, data in peaks_g.items():\n",
-    "    print(\"The module shown is: {}\".format(qm))\n",
-    "    print(\"The cell shown is: {}\".format(cell_to_preview))\n",
-    "    print(\"Evaluated peaks: {}\".format(data.shape[-1]))\n",
-    "    fig = plt.figure(figsize=(15,15))\n",
-    "    grid = AxesGrid(fig, 111,  \n",
-    "                    nrows_ncols=(data.shape[-1], 1),\n",
-    "                    axes_pad=0.1,\n",
-    "                    share_all=True,\n",
-    "                    label_mode=\"L\",\n",
-    "                    cbar_location=\"right\",\n",
-    "                    cbar_mode=\"each\",\n",
-    "                    cbar_size=\"7%\",\n",
-    "                    cbar_pad=\"2%\")\n",
-    "                    \n",
-    "    for j in range(data.shape[-1]):\n",
-    "        d = data[...,cell_to_preview,j]\n",
-    "        d[~np.isfinite(d)] = 0\n",
-    "        \n",
-    "        im = grid[j].imshow(d, interpolation=\"nearest\", vmin=0.9*np.nanmedian(d), vmax=max(1.1*np.nanmedian(d), 50))\n",
-    "        cb = grid.cbar_axes[j].colorbar(im)\n",
-    "        cb.set_label_text(\"Peak location (ADU)\")"
-   ]
-  },
-  {
-   "cell_type": "markdown",
    "metadata": {},
+   "outputs": [],
    "source": [
-    "## Gain Slopes And Offsets ##\n",
-    "\n",
-    "The gain slopes and offsets are deduced by fitting a linear function $y = mx + b$ to the evaluated peaks. Gains are normalized to all pixels and all memory cells of a module by using the average peak locations a $x$ values. Thus slopes centered around 1 are to be expected.\n"
+    "# Allocate memory for fit results\n",
+    "fit_result = {}\n",
+    "keys = list(minuit.fitarg.keys())\n",
+    "keys = [x for x in keys if 'limit_' not in x and 'fix_' not in x]\n",
+    "keys += ['chi2_ndof', 'mask', 'gain']\n",
+    "for key in keys:\n",
+    "    dtype = 'f4'\n",
+    "    if key == 'mask':\n",
+    "        dtype = 'i4'\n",
+    "    fit_result[key] = sharedmem.empty([n_cells, n_pixels_x, n_pixels_y], dtype=dtype)"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2019-09-13T08:16:56.879582Z",
-     "start_time": "2019-09-13T08:16:55.564572Z"
-    },
-    "scrolled": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
-    "for i, r in enumerate(res_gain):\n",
-    "    ii = list(modules)[i]\n",
-    "    qm = \"Q{}M{}\".format(ii//4+1, ii%4+1)\n",
-    "    gain, pks, std, gfunc = r\n",
-    "    gains, errors, chisq, valid, max_dev, stats = gfunc    \n",
-    "    _, entries, stds, sow = gain\n",
-    "    \n",
-    "    fig = plt.figure(figsize=(15,8))\n",
-    "    ax = fig.add_subplot(211)\n",
-    "    im = ax.imshow(gains[...,0], interpolation=\"nearest\", vmin=0.85, vmax=1.15)\n",
-    "    cb = fig.colorbar(im)\n",
-    "    cb.set_label(\"Gain slope m\")\n",
-    "    fig.savefig(\"{}/gain_m_mod{}.png\".format(out_folder, qm))\n",
-    "    \n",
-    "    ax = fig.add_subplot(212)\n",
-    "    ax.hist(gains[...,0].flatten(), range=(0.5, 1.5), bins=100)\n",
-    "    ax.set_ylabel(\"Occurences\")\n",
-    "    ax.set_xlabel(\"Gain slope m\")\n",
-    "    ax.semilogy()"
+    "# Perform fitting\n",
+    "with Pool() as pool:\n",
+    "    const_out = pool.map(fit_batch, batches)"
    ]
   },
   {
-   "cell_type": "markdown",
+   "cell_type": "code",
+   "execution_count": null,
    "metadata": {},
+   "outputs": [],
    "source": [
-    "The gain offsets b are expected to be centered around 0 and minimal, as data is already offset substracted."
+    "# Evaluate bad pixels\n",
+    "fit_result['gain'] = (fit_result['g1mean'] - fit_result['g0mean'])/photon_energy\n",
+    "\n",
+    "# Calculate histogram width and evaluate cut\n",
+    "h_sums = np.sum(hist_data['hist'], axis=1)\n",
+    "hist_norm = hist_data['hist'] / h_sums[:, None, :, :]\n",
+    "hist_mean = np.sum(hist_norm[:, :max_bins, ...] *\n",
+    "                   x[None, :, None, None], axis=1)\n",
+    "hist_sqr = (x[None, :, None, None] - hist_mean[:, None, ...])**2\n",
+    "hist_std = np.sqrt(np.sum(hist_norm[:, :max_bins, ...] * hist_sqr, axis=1))\n",
+    "\n",
+    "fit_result['mask'][hist_std<intensity_lim] |= BadPixelsFF.NO_ENTRY.value\n",
+    "\n",
+    "# Bad pixel on gain deviation\n",
+    "gains = np.copy(fit_result['gain'])\n",
+    "gains[fit_result['mask']>0] = np.nan\n",
+    "gain_mean = np.nanmean(gains, axis=(1,2))\n",
+    "\n",
+    "fit_result['mask'][fit_result['gain'] > gain_mean[:,None,None]*gain_lim[1] ] |=  BadPixelsFF.GAIN_DEVIATION.value\n",
+    "fit_result['mask'][fit_result['gain'] < gain_mean[:,None,None]*gain_lim[0] ] |=  BadPixelsFF.GAIN_DEVIATION.value\n"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2019-09-13T08:16:58.113910Z",
-     "start_time": "2019-09-13T08:16:56.881383Z"
-    }
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
-    "for i, r in enumerate(res_gain):\n",
-    "    ii = list(modules)[i]\n",
-    "    qm = \"Q{}M{}\".format(ii//4+1, ii%4+1)\n",
-    "    gain, pks, std, gfunc = r\n",
-    "    gains, errors, chisq, valid, max_dev, stats = gfunc\n",
-    "    _, entries, stds, sow = gain\n",
-    "    \n",
-    "    fig = plt.figure(figsize=(15,8))\n",
-    "    ax = fig.add_subplot(211)\n",
-    "    \n",
-    "    im = ax.imshow(gains[...,1], interpolation=\"nearest\", vmin=-25, vmax=25)\n",
-    "    cb = fig.colorbar(im)\n",
-    "    cb.set_label(\"Gain offset b\")\n",
-    "    fig.savefig(\"{}/gain_b_mod{}.png\".format(out_folder, qm))\n",
-    "    \n",
-    "    ax = fig.add_subplot(212)\n",
-    "    ax.hist(gains[...,1].flatten(), range=(-25, 25), bins=100)\n",
-    "    ax.set_ylabel(\"Occurences\")\n",
-    "    ax.set_xlabel(\"Gain offset b\")\n",
-    "    ax.semilogy()"
+    "# Save fit results\n",
+    "os.makedirs(out_folder, exist_ok=True)\n",
+    "out_name = f'{out_folder}/fits_m{module:02d}.h5'\n",
+    "print(f'Save to file: {out_name}')\n",
+    "save_dict_to_hdf5({'data': fit_result}, out_name)"
    ]
   },
   {
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "## Bad Pixels ##\n",
-    "\n",
-    "Bad pixels are defined as any of the following criteria: \n",
-    "\n",
-    "* the gain evaluation did not converge, or location of noise peak deviates by more than than the deviation threshold from the median location.\n",
-    "* the number of entries for the noise peak in 0.\n",
-    "* the standard deviation of the number of entries is larger than 5 times the standard deviation for the ASIC the pixel is on."
+    "## Summary across cells ##"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2019-09-13T08:18:08.277881Z",
-     "start_time": "2019-09-13T08:18:08.272390Z"
-    }
-   },
-   "outputs": [],
-   "source": [
-    "print(\"The deviation threshold is: {}\".format(deviation_threshold))"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2019-09-13T08:18:10.840520Z",
-     "start_time": "2019-09-13T08:18:09.769451Z"
-    }
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
-    "for i, r in enumerate(res_gain):\n",
-    "    ii = list(modules)[i]\n",
-    "    qm = \"Q{}M{}\".format(ii//4+1, ii%4+1)\n",
-    "    mask_db = mask_g[qm]\n",
-    "    \n",
-    "    fig = plt.figure(figsize=(15,8))\n",
-    "    ax = fig.add_subplot(211)\n",
-    "    im = ax.imshow(np.log2(mask_db[...,cell_to_preview]), interpolation=\"nearest\", vmin=0, vmax=32)\n",
-    "    cb = fig.colorbar(im)\n",
-    "    cb.set_label(\"Mask value\")\n",
-    "    fig.savefig(\"{}/mask_mod{}.png\".format(out_folder, qm))\n",
-    "    \n",
-    "    ax = fig.add_subplot(212)\n",
-    "    ax.hist(np.log2(mask_db.flatten()), range=(0, 32), bins=32, normed=True)\n",
-    "    ax.set_ylabel(\"Occurences\")\n",
-    "    ax.set_xlabel(\"Mask value\")\n",
-    "    ax.semilogy()"
+    "labels = ['Noise peak [ADU]',\n",
+    "         'First photon peak [ADU]',\n",
+    "         f\"gain [ADU/keV], $\\gamma$={photon_energy} [keV]\",\n",
+    "         \"$\\chi^2$/nDOF\",\n",
+    "         'Fraction of bad pixels']\n",
+    "for i, key in enumerate(['g0mean', 'g1mean', 'gain', 'chi2_ndof', 'mask']):\n",
+    "\n",
+    "    fig = plt.figure(figsize=(20,5))\n",
+    "    ax = fig.add_subplot(121)\n",
+    "    data = fit_result[key]\n",
+    "    if key == 'mask':\n",
+    "        data = data>0\n",
+    "        vmin, vmax = [0, 1]\n",
+    "    else:\n",
+    "        vmin, vmax = get_range(data, 5)\n",
+    "    _ = heatmapPlot(np.mean(data, axis=0).T,\n",
+    "                    add_panels=False, cmap='viridis', use_axis=ax,\n",
+    "                    vmin=vmin, vmax=vmax, lut_label=labels[i] )\n",
+    " \n",
+    "    if key != 'mask':\n",
+    "        vmin, vmax = get_range(data, 7)\n",
+    "        ax1 = fig.add_subplot(122)\n",
+    "        _ = xana.histPlot(ax1,data.flatten(), \n",
+    "                      bins=45,range=[vmin, vmax],\n",
+    "                      log=True,color='red',histtype='stepfilled')\n",
+    "        plt.xlabel(labels[i])\n",
+    "        plt.ylabel(\"Counts\")   "
    ]
   },
   {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2019-09-13T08:18:14.349244Z",
-     "start_time": "2019-09-13T08:18:11.849053Z"
-    }
-   },
-   "outputs": [],
+   "cell_type": "markdown",
+   "metadata": {},
    "source": [
-    "cols = {BadPixels.FF_GAIN_EVAL_ERROR.value: (BadPixels.FF_GAIN_EVAL_ERROR.name, '#FF000080'),\n",
-    "        BadPixels.FF_GAIN_DEVIATION.value: (BadPixels.FF_GAIN_DEVIATION.name, '#0000FF80'),\n",
-    "        BadPixels.FF_NO_ENTRIES.value: (BadPixels.FF_NO_ENTRIES.name, '#00FF0080'),\n",
-    "        BadPixels.FF_GAIN_EVAL_ERROR.value |\n",
-    "        BadPixels.FF_GAIN_DEVIATION.value: ('EVAL+DEV', '#DD00DD80'),\n",
-    "        BadPixels.FF_GAIN_EVAL_ERROR.value |\n",
-    "        BadPixels.FF_NO_ENTRIES.value: ('EVAL+NO_ENTRY', '#00DDDD80'),\n",
-    "        BadPixels.FF_GAIN_DEVIATION.value |\n",
-    "        BadPixels.FF_NO_ENTRIES.value: ('DEV+NO_ENTRY', '#DDDD0080'),\n",
-    "       }\n",
-    "\n",
-    "rebin = 32 if not high_res_badpix_3d else 2\n",
-    "\n",
-    "gain = 0\n",
-    "for mod, data in mask_g.items():\n",
-    "    plot_badpix_3d(data, cols, title=mod, rebin_fac=rebin)"
+    "## histograms of fit parameters ##"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2019-07-23T18:02:58.599579Z",
-     "start_time": "2019-07-23T18:02:58.316311Z"
-    }
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
-    "if local_output:\n",
-    "    ofile = \"{}/agipd_gain_store_{}_modules_{}.h5\".format(out_folder,\n",
-    "                                                          \"_\".join([str(r) for r in runs]),\n",
-    "                                                          \"_\".join([str(m) for m in modules]))\n",
-    "    store_file = h5py.File(ofile, \"w\")\n",
-    "    for i, r in enumerate(res_gain):\n",
-    "        ii = list(modules)[i]\n",
-    "        qm = \"Q{}M{}\".format(ii//4+1, ii%4+1)\n",
-    "        gain, pks, std, gfunc = r\n",
-    "        gains, errors, chisq, valid, max_dev, stats = gfunc\n",
-    "        gainmap, entires, stds, sow = gain\n",
-    "        store_file[\"/{}/Gain/0/data\".format(qm)] = gains[...,0]\n",
-    "        store_file[\"/{}/GainOffset/0/data\".format(qm)] = gains[...,1]\n",
-    "        store_file[\"/{}/Flat/0/data\".format(qm)] = flatsff[qm]\n",
-    "        store_file[\"/{}/Entries/0/data\".format(qm)] = entires\n",
-    "        store_file[\"/{}/BadPixels/0/data\".format(qm)] = mask_g[qm]  \n",
-    "    store_file.close()"
+    "fig = plt.figure(figsize=(10, 5))\n",
+    "ax0 = fig.add_subplot(111)\n",
+    "a = ax0.hist(hist_std.flatten(), bins=100, range=(0,100) )\n",
+    "ax0.plot([intensity_lim, intensity_lim], [0, np.nanmax(a[0])], linewidth=1.5, color='red' ) \n",
+    "ax0.set_xlabel('Histogram width [ADU]', fontsize=14)\n",
+    "ax0.set_ylabel('Number of histograms', fontsize=14)\n",
+    "ax0.set_title(f'{hist_std[hist_std<intensity_lim].shape[0]} histograms below threshold in {intensity_lim} ADU',\n",
+    "              fontsize=14, fontweight='bold')\n",
+    "ax0.grid()\n",
+    "plt.yscale('log')"
    ]
   },
   {
@@ -1128,446 +496,124 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "proposal = list(filter(None, in_folder.strip('/').split('/')))[-2]\n",
-    "file_loc = proposal + ' ' + ' '.join(list(map(str,runs)))"
+    "num_bins = int(frange[1] - frange[0])\n",
+    "fig = plt.figure(figsize=(16,5))\n",
+    "ax = fig.add_subplot(131)\n",
+    "_ = xana.histPlot(ax,fit_result['g1mean'].flatten(), \n",
+    "                  bins= num_bins,range=[frange[0] ,frange[1]],\n",
+    "                  log=True,color='red',histtype='stepfilled')  \n",
+    "plt.xlabel(\"g1 mean [ADU]\")\n",
+    "\n",
+    "ax1 = fig.add_subplot(132)\n",
+    "_ = xana.histPlot(ax1,fit_result['g2mean'].flatten(), \n",
+    "#                  bins=45,range=[80 ,140],\n",
+    "                  bins= num_bins,range=[frange[0] ,frange[1]],\n",
+    "                  log=True,color='red',histtype='stepfilled')  \n",
+    "plt.xlabel(\"g2 mean [ADU]\")\n",
+    "\n",
+    "ax2 = fig.add_subplot(133)\n",
+    "_ = xana.histPlot(ax2,fit_result['g3mean'].flatten(), \n",
+    "                  bins= num_bins,range=[frange[0] ,frange[1]],\n",
+    "                  log=True,color='red',histtype='stepfilled')  \n",
+    "_ = plt.xlabel(\"g3 mean [ADU]\")"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2019-09-12T14:49:28.355922Z",
-     "start_time": "2019-09-12T14:49:28.340063Z"
-    }
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
-    "if db_output:\n",
-    "    for i, r in enumerate(res_gain):\n",
-    "        ii = list(modules)[i]\n",
-    "        qm = \"Q{}M{}\".format(ii//4+1, ii%4+1)\n",
-    "        \n",
-    "        gain, pks, std, gfunc = r\n",
-    "        gains, errors, chisq, valid, max_dev, stats = gfunc\n",
-    "        gainmap, entires, stds, sow = gain\n",
-    "        \n",
-    "        det = getattr(Detectors, dinstance)\n",
-    "        device = getattr(det, qm)\n",
-    "        # gains related\n",
-    "        metadata = ConstantMetaData()\n",
-    "        cgain = Constants.AGIPD.SlopesFF()\n",
-    "        cgain.data = gains\n",
-    "        metadata.calibration_constant = cgain\n",
-    "\n",
-    "        # set the operating condition\n",
-    "        condition = Conditions.Illuminated.AGIPD(memory_cells, bias_voltage, 9.2,\n",
-    "                                                 pixels_x=512, pixels_y=128, beam_energy=None,\n",
-    "                                                 acquisition_rate=acqrate, gain_setting=gain_setting)\n",
-    "        \n",
-    "        metadata.detector_condition = condition\n",
-    "\n",
-    "        # specify the a version for this constant\n",
-    "        if creation_time is None:\n",
-    "            metadata.calibration_constant_version = Versions.Now(device=getattr(det, qm))\n",
-    "        else:\n",
-    "            metadata.calibration_constant_version = Versions.Timespan(device=getattr(det, qm), start=creation_time)\n",
-    "\n",
-    "        metadata.calibration_constant_version.raw_data_location = file_loc\n",
-    "        metadata.send(cal_db_interface, timeout=300000)\n",
-    "        \n",
-    "        \n",
-    "        # bad pixels        \n",
-    "        metadata = ConstantMetaData()\n",
-    "        bp = Constants.AGIPD.BadPixelsFF()\n",
-    "        bp.data = mask_g[qm]  \n",
-    "        metadata.calibration_constant = bp\n",
-    "\n",
-    "        # set the operating condition\n",
-    "        condition = Conditions.Illuminated.AGIPD(memory_cells, bias_voltage, 9.2,\n",
-    "                                                 pixels_x=512, pixels_y=128, beam_energy=None,\n",
-    "                                                 acquisition_rate=acqrate, gain_setting=gain_setting)\n",
-    "        \n",
-    "        metadata.detector_condition = condition\n",
-    "\n",
-    "        # specify the a version for this constant\n",
-    "        if creation_time is None:\n",
-    "            metadata.calibration_constant_version = Versions.Now(device=getattr(det, qm))\n",
-    "        else:\n",
-    "            metadata.calibration_constant_version = Versions.Timespan(device=getattr(det, qm), start=creation_time)\n",
-    "        metadata.calibration_constant_version.raw_data_location = file_loc\n",
-    "        metadata.send(cal_db_interface, timeout=300000)"
+    "fig = plt.figure(figsize=(16,5))\n",
+    "ax = fig.add_subplot(131)\n",
+    "_ = xana.histPlot(ax,fit_result['g0sigma'].flatten(), \n",
+    "                  bins=45,range=[0 ,50],\n",
+    "                  log=True,color='red',histtype='stepfilled')  \n",
+    "plt.xlabel(\"g1 sigma [ADU]\")\n",
+    "\n",
+    "ax1 = fig.add_subplot(132)\n",
+    "_ = xana.histPlot(ax1,fit_result['g1sigma'].flatten(), \n",
+    "                  bins=45,range=[0 ,50],\n",
+    "                  log=True,color='red',histtype='stepfilled')  \n",
+    "plt.xlabel(\"g2 sigma [ADU]\")\n",
+    "\n",
+    "ax2 = fig.add_subplot(133)\n",
+    "_ = xana.histPlot(ax2,fit_result['g2sigma'].flatten(), \n",
+    "                  bins=45,range=[0 ,50],\n",
+    "                  log=True,color='red',histtype='stepfilled')  \n",
+    "_ = plt.xlabel(\"g3 sigma [ADU]\")"
    ]
   },
   {
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "## Sanity check ##\n",
+    "## Summary across pixels ##\n",
     "\n",
-    "Finally, we perform a correction of the data used to derive the gain constants with said constants. We expected an improvement in peak FWHM, if constants have been deduced correctly."
+    "Mean and median values are calculated across all pixels for each memory cell. "
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
    "metadata": {
-    "ExecuteTime": {
-     "end_time": "2019-09-13T09:23:54.999086Z",
-     "start_time": "2019-09-13T09:16:57.293565Z"
-    },
     "scrolled": false
    },
    "outputs": [],
    "source": [
-    "def hist_single_module(fbase, runs, sequences, il_mode, profile, limit_trains, memory_cells, rawversion, instrument, inp):\n",
-    "    channel, offset, thresholds, relgain, noise, pc = inp\n",
-    "    gain, pks, std, gfunc = relgain\n",
-    "    gains, errors, chisq, valid, max_dev, stats = gfunc\n",
-    "    \n",
-    "    import XFELDetAna.xfelpycaltools as xcal\n",
-    "    import numpy as np\n",
-    "    import h5py\n",
-    "    import copy\n",
-    "    from XFELDetAna.util import env\n",
-    "    env.iprofile = profile\n",
-    "    sensor_size = [128, 512]\n",
-    "    \n",
-    "    block_size = sensor_size\n",
-    "    \n",
-    "    \n",
-    "    def baseline_correct_via_noise(d, noise, g, baseline_corr_noise_threshold=1000):\n",
-    "        \"\"\" Correct baseline shifts by evaluating position of the noise peak\n",
-    "\n",
-    "        :param: d: the data to correct, should be a single image\n",
-    "        :param noise: the mean noise for the cell id of `d`\n",
-    "        :param g: gain array matching `d` array\n",
-    "\n",
-    "        Correction is done by identifying the left-most (significant) peak\n",
-    "        in the histogram of `d` and shifting it to be centered on 0.\n",
-    "        This is done via a continous wavelet transform (CWT), using a Ricker\n",
-    "        wavelet.\n",
-    "\n",
-    "        Only high gain data is evaulated, and data larger than 50 ADU,\n",
-    "        equivalent of roughly a 9 keV photon is ignored.\n",
-    "\n",
-    "        Two passes are executed: the first shift is accurate to 10 ADU and\n",
-    "        will catch baseline shifts smaller then -2000 ADU. CWT is evaluated\n",
-    "        for peaks of widths one, three and five time the noise.\n",
-    "        The baseline is then shifted by the position of the summed CWT maximum.\n",
-    "\n",
-    "        In a second pass histogram is evaluated within a range of\n",
-    "        +- 5*noise of the initial shift value, for peak of width noise.\n",
-    "\n",
-    "        Baseline shifts larger than the maximum threshold or positive shifts\n",
-    "        larger 10 are discarded, and the original data is returned, otherwise\n",
-    "        the shift corrected data is returned.\n",
-    "\n",
-    "        \"\"\"\n",
-    "        import copy\n",
-    "        from scipy.signal import cwt, ricker\n",
-    "        # we work on a copy to be able to filter values by setting them to\n",
-    "        # np.nan\n",
-    "        dd = copy.copy(d)\n",
-    "        dd[g != 0] = np.nan  # only high gain data\n",
-    "        dd[dd > 50] = np.nan  # only noise data\n",
-    "        h, e = np.histogram(dd, bins=210, range=(-2000, 100))\n",
-    "        c = (e[1:] + e[:-1]) / 2\n",
-    "\n",
-    "        try:\n",
-    "            cwtmatr = cwt(h, ricker, [noise, 3. * noise, 5. * noise])\n",
-    "        except:\n",
-    "            return d\n",
-    "        cwtall = np.sum(cwtmatr, axis=0)\n",
-    "        marg = np.argmax(cwtall)\n",
-    "        pc = c[marg]\n",
-    "\n",
-    "        high_res_range = (dd > pc - 5 * noise) & (dd < pc + 5 * noise)\n",
-    "        dd[~high_res_range] = np.nan\n",
-    "        h, e = np.histogram(dd, bins=200,\n",
-    "                            range=(pc - 5 * noise, pc + 5 * noise))\n",
-    "        c = (e[1:] + e[:-1]) / 2\n",
-    "        try:\n",
-    "            cwtmatr = cwt(h, ricker, [noise, ])\n",
-    "        except:\n",
-    "            return d\n",
-    "        marg = np.argmax(cwtmatr)\n",
-    "        pc = c[marg]\n",
-    "        # too large shift to be easily decernable via noise\n",
-    "        if pc > 10 or pc < -baseline_corr_noise_threshold:\n",
-    "            return d\n",
-    "        return d - pc\n",
-    "    \n",
-    "    def read_fun(filename, channel):\n",
-    "        \n",
-    "        infile = h5py.File(filename, \"r\", driver=\"core\")\n",
-    "        if rawversion == 2:\n",
-    "            count = np.squeeze(infile[\"/INDEX/{}_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/count\".format(instrument, channel)])\n",
-    "            first = np.squeeze(infile[\"/INDEX/{}_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/first\".format(instrument, channel)])\n",
-    "            last_index = int(first[count != 0][-1]+count[count != 0][-1])\n",
-    "            first_index = int(first[count != 0][0])\n",
-    "        else:\n",
-    "            status = np.squeeze(infile[\"/INDEX/{}_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/status\".format(instrument, channel)])\n",
-    "            if np.count_nonzero(status != 0) == 0:\n",
-    "                return\n",
-    "            last = np.squeeze(infile[\"/INDEX/{}_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/last\".format(instrument, channel)])\n",
-    "            last_index = int(last[status != 0][-1])\n",
-    "            first_index = int(last[status != 0][0])\n",
-    "        \n",
-    "        if limit_trains is not None:\n",
-    "            last_index = min(limit_trains*memory_cells+first_index, last_index)\n",
-    "        \n",
-    "        im = np.array(infile[\"/INSTRUMENT/{}_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/data\".format(instrument, channel)][first_index:last_index,...])    \n",
-    "        carr = infile[\"/INSTRUMENT/{}_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/cellId\".format(instrument, channel)][first_index:last_index]\n",
-    "        cells = np.squeeze(np.array(carr))\n",
-    "        infile.close()\n",
+    "def plot_error_band(key, x, ax):\n",
+    "    \n",
+    "    cdata = np.copy(fit_result[key])\n",
+    "    cdata[fit_result['mask']>0] = np.nan\n",
+    "    \n",
+    "    mean = np.nanmean(cdata, axis=(1,2))\n",
+    "    median = np.nanmedian(cdata, axis=(1,2))\n",
+    "    std = np.nanstd(cdata, axis=(1,2))\n",
+    "    mad = np.nanmedian(np.abs(cdata - median[:,None,None]), axis=(1,2))\n",
+    "\n",
+    "    ax0 = fig.add_subplot(111)\n",
+    "    ax0.plot(x, mean, 'k', color='#3F7F4C', label=\" mean value \")\n",
+    "    ax0.plot(x, median, 'o', color='red', label=\" median value \")\n",
+    "    ax0.fill_between(x, mean-std, mean+std,\n",
+    "                     alpha=0.6, edgecolor='#3F7F4C', facecolor='#7EFF99',\n",
+    "                     linewidth=1, linestyle='dashdot', antialiased=True,\n",
+    "                     label=\" mean value $ \\pm $ std \")\n",
+    "\n",
+    "    ax0.fill_between(x, median-mad, median+mad,\n",
+    "                     alpha=0.3, edgecolor='red', facecolor='red',\n",
+    "                     linewidth=1, linestyle='dashdot', antialiased=True,\n",
+    "                     label=\" median value $ \\pm $ mad \")\n",
+    "    \n",
+    "    if f'error_{key}' in fit_result:\n",
+    "        cerr = np.copy(fit_result[f'error_{key}'])\n",
+    "        cerr[fit_result['mask']>0] = np.nan\n",
     "        \n",
+    "        meanerr = np.nanmean(cerr, axis=(1,2))\n",
+    "        ax0.fill_between(x, mean-meanerr, mean+meanerr,\n",
+    "                 alpha=0.6, edgecolor='#089FFF', facecolor='#089FFF',\n",
+    "                 linewidth=1, linestyle='dashdot', antialiased=True,\n",
+    "                 label=\" mean fit error \")\n",
     "    \n",
-    "        if il_mode:\n",
-    "            ga = im[1::2, 0, ...]\n",
-    "            im = im[0::2, 0, ...].astype(np.float32)\n",
-    "        else:\n",
-    "            ga = im[:, 1, ...]\n",
-    "            im = im[:, 0, ...].astype(np.float32)\n",
     "\n",
-    "        im = np.rollaxis(im, 2)\n",
-    "        im = np.rollaxis(im, 2, 1)\n",
+    "x = np.linspace(*cell_range, n_cells)\n",
     "\n",
-    "        ga = np.rollaxis(ga, 2)\n",
-    "        ga = np.rollaxis(ga, 2, 1)\n",
-    "        return im, ga, cells\n",
-    "    \n",
-    "    offset_cor = xcal.OffsetCorrection(sensor_size, offset, nCells=memory_cells, blockSize=block_size, gains=[0,1,2])\n",
-    "    offset_cor.debug()\n",
-    "    \n",
-    "    hist_calc = xcal.HistogramCalculator(sensor_size, bins=2000, range=(0, 2000), blockSize=block_size)\n",
-    "    hist_calc.debug()\n",
-    "\n",
-    "    hist_calc_uncorr = xcal.HistogramCalculator(sensor_size, bins=2000, range=(0, 2000), blockSize=block_size)\n",
-    "    hist_calc_uncorr.debug()\n",
-    "    \n",
-    "    \n",
-    "    for run in runs:\n",
-    "        for seq in sequences[:2]:\n",
-    "            \n",
-    "            fname = fbase.format(run, run.upper(), channel, seq)\n",
-    "            \n",
-    "            d, ga, c = read_fun(fname, channel)\n",
-    "            \n",
-    "            # we need to do proper gain thresholding\n",
-    "            vidx = (c < offset.shape[2]) & (c < thresholds.shape[2])\n",
-    "            c = c[vidx]\n",
-    "            d = d[:,:,vidx]\n",
-    "            ga = ga[:,:,vidx]\n",
+    "for i, key in enumerate(['g0mean', 'g1mean', 'gain', 'chi2_ndof']):\n",
     "\n",
-    "            g = np.zeros(ga.shape, np.uint8)\n",
-    "            g[...] = 2\n",
-    "            \n",
-    "            g[ga < thresholds[...,c,1]] = 1\n",
-    "            g[ga < thresholds[...,c,0]] = 0\n",
-    "            d = offset_cor.correct(d, cellTable=c, gainMap=g)\n",
-    "            for i in range(d.shape[2]):\n",
-    "                mn_noise = np.nanmean(noise[...,c[i]])\n",
-    "                d[...,i] = baseline_correct_via_noise(d[...,i],\n",
-    "                                                      mn_noise,\n",
-    "                                                      g[..., i])\n",
-    "            \n",
-    "            d *= np.moveaxis(pc[c,...], 0, 2)\n",
-    "            \n",
-    "            hist_calc_uncorr.fill(d)\n",
-    "            d = (d-gains[..., 1][...,None])/gains[..., 0][...,None]\n",
-    "            #d = d/gains[..., 0][...,None]\n",
-    "            hist_calc.fill(d)    \n",
-    "    \n",
-    "    h, e, c, _ = hist_calc.get()\n",
-    "    hu  = hist_calc_uncorr.get()\n",
-    "    return h, e, c, hu[0]\n",
+    "    fig = plt.figure(figsize=(10, 5))\n",
+    "    ax0 = fig.add_subplot(111)\n",
+    "    plot_error_band(key, x, ax0)\n",
     "\n",
-    "inp = []\n",
-    "idx = 0\n",
-    "for i in modules:\n",
-    "    qm = \"Q{}M{}\".format(i//4+1, i%4+1)\n",
-    "    \n",
-    "    inp.append((i, offset_g[qm], thresholds_g[qm], res_gain[idx], noise_g[qm][...,0], pc_g[qm][0,...]))\n",
-    "    idx += 1\n",
-    "    \n",
-    "p = partial(hist_single_module, fbase, runs, sequences, IL_MODE, profile, limit_trains, memory_cells, rawversion, instrument)\n",
-    "#res = view.map_sync(p, inp)\n",
-    "res = list(map(p, inp))\n"
+    "    ax0.set_xlabel('Memory Cell ID', fontsize=14)\n",
+    "    ax0.set_ylabel(labels[i], fontsize=14)\n",
+    "    ax0.grid()\n",
+    "    _ = ax0.legend()\n"
    ]
   },
   {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2019-09-13T09:23:56.023378Z",
-     "start_time": "2019-09-13T09:23:55.002226Z"
-    }
-   },
-   "outputs": [],
+   "cell_type": "markdown",
+   "metadata": {},
    "source": [
-    "from iminuit import Minuit\n",
-    "from iminuit.util import make_func_code, describe\n",
-    "from IPython.display import HTML, display\n",
-    "import tabulate\n",
-    "\n",
-    "# fitting\n",
-    "par_ests = {}\n",
-    "par_ests[\"mu0\"] = 0\n",
-    "par_ests[\"mu1\"] = 50\n",
-    "par_ests[\"mu2\"] = 100\n",
-    "par_ests[\"limit_mu0\"] = [-5, 5]\n",
-    "par_ests[\"limit_mu1\"] = [35, 65]\n",
-    "par_ests[\"limit_mu2\"] = [100, 150]\n",
-    "par_ests[\"s0\"] = 10\n",
-    "par_ests[\"s1\"] = 10\n",
-    "par_ests[\"s2\"] = 10\n",
-    "par_ests[\"limit_A0\"] = [1e5, 1e12]\n",
-    "par_ests[\"limit_A1\"] = [1e4, 1e10]\n",
-    "par_ests[\"limit_A2\"] = [1e3, 1e10]\n",
-    "\n",
-    "\n",
-    "par_ests[\"throw_nan\"] = False\n",
-    "par_ests[\"pedantic\"] = False\n",
-    "par_ests[\"print_level\"] = 1\n",
-    "\n",
-    "def gaussian3(x, mu0, s0, A0, mu1, s1, A1, mu2, s2, A2):\n",
-    "    return (A0/np.sqrt(2*np.pi*s0**2)*np.exp(-0.5*((x-mu0)/s0)**2) +\n",
-    "            A1/np.sqrt(2*np.pi*s1**2)*np.exp(-0.5*((x-mu1)/s1)**2) +\n",
-    "            A2/np.sqrt(2*np.pi*s2**2)*np.exp(-0.5*((x-mu2)/s2)**2))\n",
-    "\n",
-    "\n",
-    "f_sig = describe(gaussian3)[1:]\n",
-    "\n",
-    "class _Chi2Functor:\n",
-    "    def __init__(self, f, x, y, err):\n",
-    "        self.f = f\n",
-    "        self.x = x\n",
-    "        self.y = y\n",
-    "        self.err = err\n",
-    "        f_sig = describe(f)\n",
-    "        # this is how you fake function\n",
-    "        # signature dynamically\n",
-    "        self.func_code = make_func_code(\n",
-    "            f_sig[1:])  # docking off independent variable\n",
-    "        self.func_defaults = None  # this keeps numpy.vectorize happy\n",
-    "\n",
-    "    def __call__(self, *arg):\n",
-    "        # notice that it accept variable length\n",
-    "        # positional arguments\n",
-    "        # chi2 = sum((y-self.f(x,*arg))**2 for x,y in zip(self.x,self.y))\n",
-    "        return np.sum(((self.f(self.x, *arg) - self.y) ** 2) / self.err)\n",
-    "    \n",
-    "    \n",
-    "d = []\n",
-    "y_range_max = 0\n",
-    "table = []\n",
-    "headers = ['Module',\n",
-    "           'FWHM (cor.) [ADU]', 'Separation (cor.) [$\\sigma$]',\n",
-    "           'FWHM (uncor.) [ADU]', 'Separation (uncor.) [$\\sigma$]',           \n",
-    "           'Improvement'\n",
-    "          ]\n",
-    "for i, r in enumerate(res):\n",
-    "    qm = \"Q{}M{}\".format(i//4+1, i%4+1)\n",
-    "    row = []\n",
-    "    row.append(qm)\n",
-    "    \n",
-    "    h, e, c, hu = r\n",
-    "        \n",
-    "    \n",
-    "    d.append({\n",
-    "            'x': c, \n",
-    "            'y': h,\n",
-    "            'drawstyle': 'steps-mid',\n",
-    "            'label': '{}: corrected'.format(qm),\n",
-    "            'marker': '.',\n",
-    "            'color': 'blue',\n",
-    "        })\n",
-    "    \n",
-    "    idx = (h > 0) & np.isfinite(h)\n",
-    "    h_non_zero = h[idx]\n",
-    "    c_non_zero = c[idx]\n",
-    "    par_ests[\"A0\"] = np.float(h[np.argmin(abs(c-0))])\n",
-    "    par_ests[\"A1\"] = np.float(h[np.argmin(abs(c-50))])\n",
-    "    par_ests[\"A2\"] = np.float(h[np.argmin(abs(c-100))])\n",
-    "    wrapped = _Chi2Functor(gaussian3, c_non_zero, h_non_zero,\n",
-    "                                     np.sqrt(h_non_zero))\n",
-    "    \n",
-    "    m = Minuit(wrapped, **par_ests)\n",
-    "    fmin = m.migrad()\n",
-    "    \n",
-    "    xt = np.arange(0, 200)\n",
-    "    \n",
-    "    yt = gaussian3(xt, m.values['mu0'],  m.values['s0'],  m.values['A0'],\n",
-    "                   m.values['mu1'],  m.values['s1'],  m.values['A1'],\n",
-    "                   m.values['mu2'],  m.values['s2'],  m.values['A2'])\n",
-    "\n",
-    "    d.append({\n",
-    "            'x': xt, \n",
-    "            'y': yt,\n",
-    "            'label': '{}: corrected (fit)'.format(qm),\n",
-    "            'color': 'green',\n",
-    "            'drawstyle': 'line',\n",
-    "            'linewidth': 2\n",
-    "        })\n",
-    "    \n",
-    "    \n",
-    "    d.append({\n",
-    "            'x': c, \n",
-    "            'y': hu,\n",
-    "            'drawstyle': 'steps-mid',\n",
-    "            'label': '{}: uncorrected'.format(qm),\n",
-    "            'marker': '.',\n",
-    "            'color': 'grey',\n",
-    "            'alpha': 0.5\n",
-    "        })\n",
-    "    \n",
-    "    row += [m.values['s1']*2.35, (m.values['mu1']-m.values['mu0'])/m.values['s1']]\n",
-    "    \n",
-    "    \n",
-    "    idx = (hu > 0) & np.isfinite(hu)\n",
-    "    h_non_zero = hu[idx]\n",
-    "    c_non_zero = c[idx]\n",
-    "    wrapped = _Chi2Functor(gaussian3, c_non_zero, h_non_zero,\n",
-    "                                     np.sqrt(h_non_zero))\n",
-    "    \n",
-    "    m = Minuit(wrapped, **par_ests)\n",
-    "    fmin = m.migrad()\n",
-    "    \n",
-    "    xt = np.arange(0, 200)\n",
-    "    \n",
-    "    yt = gaussian3(xt, m.values['mu0'],  m.values['s0'],  m.values['A0'],\n",
-    "                   m.values['mu1'],  m.values['s1'],  m.values['A1'],\n",
-    "                   m.values['mu2'],  m.values['s2'],  m.values['A2'])\n",
-    "\n",
-    "    d.append({\n",
-    "            'x': xt, \n",
-    "            'y': yt,\n",
-    "            'label': '{}: uncorrected (fit)'.format(qm),\n",
-    "            'color': 'red',\n",
-    "            'linewidth': 2\n",
-    "        })\n",
-    "    \n",
-    "    row += [m.values['s1']*2.35, (m.values['mu1']-m.values['mu0'])/m.values['s1']]\n",
-    "    \n",
-    "    row.append(\"{:0.2f} %\".format(100*(row[3]/row[1]-1)))\n",
-    "    \n",
-    "    y_range_max = max(y_range_max, np.max(h[c>25])*1.5)\n",
-    "    \n",
-    "    # output table\n",
-    "    table.append(row)\n",
-    "    \n",
-    "fig = xana.simplePlot(d, y_log=False, figsize=\"2col\",\n",
-    "                      aspect=2,\n",
-    "                      x_range=(0, 200),\n",
-    "                      legend='top-right-frame',\n",
-    "                      y_range=(0, y_range_max),\n",
-    "                      x_label='Energy (ADU)',\n",
-    "                      y_label='Counts')\n",
-    "\n",
-    "display(HTML(tabulate.tabulate(table, tablefmt='html', headers=headers,\n",
-    "                               numalign=\"right\", floatfmt=\"0.2f\")))"
+    "## Cut flow ##"
    ]
   },
   {
@@ -1575,7 +621,68 @@
    "execution_count": null,
    "metadata": {},
    "outputs": [],
-   "source": []
+   "source": [
+    "fig = plt.figure(figsize=(10, 5))\n",
+    "ax = fig.add_subplot(111)\n",
+    "\n",
+    "n_bars = 8\n",
+    "x = np.arange(n_bars)\n",
+    "width = 0.3\n",
+    "\n",
+    "msk = fit_result['mask']\n",
+    "n_fits = np.prod(msk.shape)\n",
+    "y = [any_in(msk, BadPixelsFF.FIT_FAILED.value),\n",
+    "     any_in(msk, BadPixelsFF.FIT_FAILED.value | BadPixelsFF.ACCURATE_COVAR.value),\n",
+    "     any_in(msk, BadPixelsFF.FIT_FAILED.value | BadPixelsFF.ACCURATE_COVAR.value |\n",
+    "           BadPixelsFF.CHI2_THRESHOLD.value),\n",
+    "     any_in(msk, BadPixelsFF.FIT_FAILED.value | BadPixelsFF.ACCURATE_COVAR.value |\n",
+    "           BadPixelsFF.CHI2_THRESHOLD.value | BadPixelsFF.GAIN_THRESHOLD.value),\n",
+    "     any_in(msk, BadPixelsFF.FIT_FAILED.value | BadPixelsFF.ACCURATE_COVAR.value |\n",
+    "           BadPixelsFF.CHI2_THRESHOLD.value | BadPixelsFF.GAIN_THRESHOLD.value |\n",
+    "           BadPixelsFF.NOISE_PEAK_THRESHOLD.value),\n",
+    "     any_in(msk, BadPixelsFF.FIT_FAILED.value | BadPixelsFF.ACCURATE_COVAR.value |\n",
+    "           BadPixelsFF.CHI2_THRESHOLD.value | BadPixelsFF.GAIN_THRESHOLD.value |\n",
+    "           BadPixelsFF.NOISE_PEAK_THRESHOLD.value | BadPixelsFF.PEAK_WIDTH_THRESHOLD.value),\n",
+    "     any_in(msk, BadPixelsFF.FIT_FAILED.value | BadPixelsFF.ACCURATE_COVAR.value |\n",
+    "           BadPixelsFF.CHI2_THRESHOLD.value | BadPixelsFF.GAIN_THRESHOLD.value |\n",
+    "           BadPixelsFF.NOISE_PEAK_THRESHOLD.value | BadPixelsFF.PEAK_WIDTH_THRESHOLD.value\n",
+    "           | BadPixelsFF.NO_ENTRY.value),\n",
+    "     any_in(msk, BadPixelsFF.FIT_FAILED.value | BadPixelsFF.ACCURATE_COVAR.value |\n",
+    "           BadPixelsFF.CHI2_THRESHOLD.value | BadPixelsFF.GAIN_THRESHOLD.value |\n",
+    "           BadPixelsFF.NOISE_PEAK_THRESHOLD.value | BadPixelsFF.PEAK_WIDTH_THRESHOLD.value\n",
+    "           | BadPixelsFF.NO_ENTRY.value| BadPixelsFF.GAIN_DEVIATION.value)\n",
+    "    ]\n",
+    "\n",
+    "y2 = [any_in(msk, BadPixelsFF.FIT_FAILED.value),\n",
+    "     any_in(msk, BadPixelsFF.ACCURATE_COVAR.value),\n",
+    "     any_in(msk, BadPixelsFF.CHI2_THRESHOLD.value),\n",
+    "     any_in(msk, BadPixelsFF.GAIN_THRESHOLD.value),\n",
+    "     any_in(msk, BadPixelsFF.NOISE_PEAK_THRESHOLD.value),\n",
+    "     any_in(msk, BadPixelsFF.PEAK_WIDTH_THRESHOLD.value),\n",
+    "     any_in(msk, BadPixelsFF.NO_ENTRY.value),\n",
+    "     any_in(msk, BadPixelsFF.GAIN_DEVIATION.value)\n",
+    "    ]\n",
+    "\n",
+    "y = (1 - np.sum(y, axis=(1,2,3))/n_fits)*100\n",
+    "y2 = (1 - np.sum(y2, axis=(1,2,3))/n_fits)*100\n",
+    "\n",
+    "labels = ['Fit failes',\n",
+    "         'Accurate covar',\n",
+    "         'Chi2/nDOF',\n",
+    "         'Gain',\n",
+    "         'Noise peak',\n",
+    "         'Peak width',\n",
+    "         'No Entry',\n",
+    "         'Gain deviation']\n",
+    "\n",
+    "ax.bar(x, y2, width, label='Only this cut')\n",
+    "ax.bar(x, y, width, label='Cut flow')\n",
+    "plt.xticks(x, labels, rotation=90)\n",
+    "plt.ylim(y[5]-0.5,100)\n",
+    "plt.grid(True)\n",
+    "plt.legend()\n",
+    "plt.show()"
+   ]
   }
  ],
  "metadata": {
@@ -1594,7 +701,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.6.7"
+   "version": "3.7.6"
   }
  },
  "nbformat": 4,
diff --git a/notebooks/AGIPD/Characterize_AGIPD_Gain_FlatFields_Summary.ipynb b/notebooks/AGIPD/Characterize_AGIPD_Gain_FlatFields_Summary.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..da71858d2a7021f315e5692bb4c4a057574a5199
--- /dev/null
+++ b/notebooks/AGIPD/Characterize_AGIPD_Gain_FlatFields_Summary.ipynb
@@ -0,0 +1,617 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Gain Characterization Summary #\n",
+    "\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "in_folder = \"\" # in this notebook, in_folder is not used as the data source is in the destination folder\n",
+    "out_folder = \"/gpfs/exfel/exp/SPB/202030/p900138/scratch/karnem/r0203_r0204_v02\"  # the folder to output to, required\n",
+    "hist_file_template = \"hists_m{:02d}_sum.h5\"\n",
+    "modules = [10] # modules to correct, set to -1 for all, range allowed\n",
+    "\n",
+    "image_data_path = \"\"\n",
+    "run = 449 # runs of image data used to create histograms\n",
+    "\n",
+    "karabo_id = \"MID_DET_AGIPD1M-1\" # karabo karabo_id\n",
+    "karabo_da = ['-1']  # a list of data aggregators names, Default [-1] for selecting all data aggregators\n",
+    "receiver_id = \"{}CH0\" # inset for receiver devices\n",
+    "path_template = 'RAW-R{:04d}-{}-S{:05d}.h5' # the template to use to access data\n",
+    "h5path = 'INSTRUMENT/{}/DET/{}:xtdf/' # path in the HDF5 file to images\n",
+    "h5path_idx = 'INDEX/{}/DET/{}:xtdf/' # path in the HDF5 file to images\n",
+    "h5path_ctrl = '/CONTROL/{}/MDL/FPGA_COMP' # path to control information\n",
+    "karabo_id_control = \"MID_IRU_AGIPD1M1\" # karabo-id for control device\n",
+    "karabo_da_control = 'AGIPD1MCTRL00' # karabo DA for control infromation\n",
+    "\n",
+    "use_dir_creation_date = True # use the creation data of the input dir for database queries\n",
+    "cal_db_interface = \"tcp://max-exfl016:8015#8045\" # the database interface to use\n",
+    "cal_db_timeout = 30000 # in milli seconds\n",
+    "local_output = True # output constants locally\n",
+    "db_output = False # output constants to database\n",
+    "\n",
+    "# Fit parameters\n",
+    "peak_range = [-30,30,35,65,80,130,145,200] # where to look for the peaks, [a0, b0, a1, b1, ...] exactly 8 elements\n",
+    "peak_width_range = [0, 30, 0, 35, 0, 40, 0, 45] # fit limits on the peak widths, [a0, b0, a1, b1, ...] exactly 8 elements\n",
+    "\n",
+    "# Bad-pixel thresholds\n",
+    "d0_lim = [10, 70] # hard limits for d0 value (distance between noise and first peak)\n",
+    "peak_width_lim = [0.97, 1.43, 1.03, 1.57] # hard limits on the peak widths, [a0, b0, a1, b1, ...] in units of the noise peak. 4 parameters.\n",
+    "chi2_lim = [0,3.0] # Hard limit on chi2/nDOF value\n",
+    "\n",
+    "cell_range = [1,5] # range of cell to be considered, [0,0] for all\n",
+    "pixel_range = [0,0,512,128] # range of pixels x1,y1,x2,y2 to consider [0,0,512,128] for all\n",
+    "max_bins = 250 # Maximum number of bins to consider\n",
+    "batch_size = [1,8,8] # batch size: [cell,x,y]\n",
+    "n_peaks_fit = 4 # Number of gaussian peaks to fit including noise peak\n",
+    "fix_peaks = True # Fix distance between photon peaks\n",
+    "\n",
+    "\n",
+    "# Detector conditions\n",
+    "max_cells = 0 # number of memory cells used, set to 0 to automatically infer\n",
+    "bias_voltage = 300 # Bias voltage\n",
+    "acq_rate = 0. # the detector acquisition rate, use 0 to try to auto-determine\n",
+    "gain_setting = 0.1 # the gain setting, use 0.1 to try to auto-determine\n",
+    "photon_energy = 9.2 # photon energy in keV"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import glob\n",
+    "from multiprocessing import Pool\n",
+    "import re\n",
+    "import os\n",
+    "import traceback\n",
+    "import warnings\n",
+    "\n",
+    "from dateutil import parser\n",
+    "from extra_data import RunDirectory, stack_detector_data\n",
+    "from extra_geom import AGIPD_1MGeometry\n",
+    "import h5py\n",
+    "from iCalibrationDB import Detectors, Conditions, Constants\n",
+    "from iminuit import Minuit\n",
+    "from IPython.display import HTML, display, Markdown, Latex\n",
+    "import matplotlib.pyplot as plt\n",
+    "import numpy as np\n",
+    "import tabulate\n",
+    "from XFELDetAna.plotting.heatmap import heatmapPlot\n",
+    "from XFELDetAna.plotting.simpleplot import simplePlot\n",
+    "\n",
+    "from cal_tools.ana_tools import get_range, save_dict_to_hdf5\n",
+    "from cal_tools.agipdlib import get_acq_rate, get_bias_voltage, get_gain_setting, get_num_cells\n",
+    "from cal_tools.agipdutils_ff import any_in, fit_n_peaks, gaussian_sum, get_starting_parameters\n",
+    "from cal_tools.tools import get_dir_creation_date, send_to_db\n",
+    "from cal_tools.enums import BadPixels, BadPixelsFF\n",
+    "\n",
+    "%matplotlib inline\n",
+    "warnings.filterwarnings('ignore')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Get operation conditions\n",
+    "\n",
+    "filename = glob.glob(f\"{image_data_path}/r{run:04d}/*-AGIPD[0-1][0-9]-*\")[0]\n",
+    "channel = int(re.findall(r\".*-AGIPD([0-9]+)-.*\", filename)[0])\n",
+    "control_fname = f'{image_data_path}/r{run:04d}/RAW-R{run:04d}-{karabo_da_control}-S00000.h5'\n",
+    "\n",
+    "# Evaluate number of memory cells\n",
+    "mem_cells = get_num_cells(filename, karabo_id, channel)\n",
+    "if mem_cells is None:\n",
+    "    raise ValueError(f\"No raw images found in {filename}\")\n",
+    "\n",
+    "# Evaluate aquisition rate\n",
+    "if acq_rate == 0.:\n",
+    "    acq_rate = get_acq_rate((filename, karabo_id, channel))\n",
+    "\n",
+    "# Evaluate creation time\n",
+    "creation_time = None\n",
+    "if use_dir_creation_date:\n",
+    "    creation_time = get_dir_creation_date(image_data_path, run)\n",
+    "    \n",
+    "# Evaluate gain setting\n",
+    "if gain_setting == 0.1:\n",
+    "    if creation_time.replace(tzinfo=None) < parser.parse('2020-01-31'):\n",
+    "        print(\"Set gain-setting to None for runs taken before 2020-01-31\")\n",
+    "        gain_setting = None\n",
+    "    else:\n",
+    "        try:\n",
+    "            gain_setting = get_gain_setting(control_fname, h5path_ctrl)\n",
+    "        except Exception as e:\n",
+    "            print(f'Error while reading gain setting from: \\n{control_fname}')\n",
+    "            print(e)\n",
+    "            print(\"Set gain settion to 0\")\n",
+    "            gain_setting = 0\n",
+    "            \n",
+    "# Evaluate detector instance for mapping\n",
+    "instrument = karabo_id.split(\"_\")[0]\n",
+    "if instrument == \"SPB\":\n",
+    "    dinstance = \"AGIPD1M1\"\n",
+    "else:\n",
+    "    dinstance = \"AGIPD1M2\"\n",
+    "            \n",
+    "print(f\"Using {creation_time} as creation time\")\n",
+    "print(f\"Operating conditions are:\\n• Bias voltage: {bias_voltage}\\n• Memory cells: {mem_cells}\\n\"\n",
+    "              f\"• Acquisition rate: {acq_rate}\\n• Gain setting: {gain_setting}\\n• Photon Energy: {photon_energy}\\n\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Load constants for all modules\n",
+    "keys = ['g0mean', 'g1mean', 'gain', 'chi2_ndof', 'mask']\n",
+    "\n",
+    "fit_data = {}\n",
+    "labels = {'g0mean': 'Noise peak position [ADU]',\n",
+    "          'g1mean': 'First photon peak [ADU]',\n",
+    "          'gain': f\"Gain [ADU/keV], $\\gamma$={photon_energy} [keV]\",\n",
+    "          'chi2_ndof': '$\\chi^2$/nDOF',\n",
+    "          'mask': 'Fraction of bad pixels over cells' }\n",
+    "\n",
+    "modules = []\n",
+    "for mod in range(16):\n",
+    "    qm = f\"Q{mod // 4 + 1}M{mod % 4 + 1}\"\n",
+    "    fit_data[mod] = {}\n",
+    "    try:\n",
+    "        hf = h5py.File(f'{out_folder}/fits_m{mod:02d}.h5', 'r')\n",
+    "        shape = hf['data/g0mean'].shape\n",
+    "        for key in keys:\n",
+    "            fit_data[mod][key] = hf[f'data/{key}'][()]\n",
+    "        #print(shape)\n",
+    "        print(f\"{in_folder}/{hist_file_template.format(mod)}\")     \n",
+    "        modules.append(mod)\n",
+    "    except Exception as e:\n",
+    "        err = f\"Error: {e}\\nError traceback: {traceback.format_exc()}\"\n",
+    "        print(f\"No fit data available for module {qm}\")\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Calculate SlopesFF and BadPixels to be send to DB\n",
+    "bpmask = {}\n",
+    "slopesFF = {}\n",
+    "\n",
+    "for mod in modules:\n",
+    "    bpmask[mod] = np.zeros(fit_data[mod]['mask'].shape).astype(np.int32)\n",
+    "    bpmask[mod][ any_in(fit_data[mod]['mask'], BadPixelsFF.NO_ENTRY.value) ] = BadPixels.FF_NO_ENTRIES.value\n",
+    "    bpmask[mod][ any_in(fit_data[mod]['mask'], \n",
+    "                        BadPixelsFF.GAIN_DEVIATION.value) ] |= BadPixels.FF_GAIN_DEVIATION.value\n",
+    "    bpmask[mod][ any_in(fit_data[mod]['mask'], \n",
+    "                        BadPixelsFF.FIT_FAILED.value | BadPixelsFF.ACCURATE_COVAR.value |\n",
+    "                        BadPixelsFF.CHI2_THRESHOLD.value | BadPixelsFF.GAIN_THRESHOLD.value |\n",
+    "                        BadPixelsFF.NOISE_PEAK_THRESHOLD.value | BadPixelsFF.PEAK_WIDTH_THRESHOLD.value) ] |= BadPixels.FF_GAIN_EVAL_ERROR.value\n",
+    "    \n",
+    "    # Set value for bad pixel to average across pixels for a given module\n",
+    "    slopesFF[mod] = np.copy(fit_data[mod]['gain'])\n",
+    "    slopesFF[mod][fit_data[mod]['mask']>0] = np.nan\n",
+    "    gain_mean = np.nanmean(slopesFF[mod], axis=(1,2))\n",
+    "    \n",
+    "    for i in range(slopesFF[mod].shape[0]):\n",
+    "        slopesFF[mod][i][ fit_data[mod]['mask'][i] > 0 ] = gain_mean[i]\n",
+    "    "
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Send constants to DB\n",
+    "def send_const(mod):\n",
+    "    try:\n",
+    "        device = getattr(getattr(Detectors, dinstance), f'Q{mod // 4 + 1}M{mod % 4 + 1}')\n",
+    "\n",
+    "        # gain\n",
+    "        constant = Constants.AGIPD.SlopesFF()\n",
+    "        constant.data = np.moveaxis(np.moveaxis(slopesFF[mod],0,2),0,1)\n",
+    "        send_to_db(device, constant, condition, file_loc,\n",
+    "                   cal_db_interface, creation_time,\n",
+    "                   verbosity=1, timeout=cal_db_timeout)\n",
+    "\n",
+    "        # bad pixels        \n",
+    "        constant_bp = Constants.AGIPD.BadPixelsFF()\n",
+    "        constant_bp.data = np.moveaxis(np.moveaxis(bpmask[mod],0,2),0,1)\n",
+    "        send_to_db(device, constant_bp, condition, file_loc,\n",
+    "                   cal_db_interface, creation_time,\n",
+    "                   verbosity=1, timeout=cal_db_timeout)\n",
+    "    \n",
+    "    except Exception as e:\n",
+    "        err = f\"Error: {e}\\nError traceback: {traceback.format_exc()}\"\n",
+    "        when = None\n",
+    "\n",
+    "# set the operating condition\n",
+    "condition = Conditions.Illuminated.AGIPD(mem_cells, bias_voltage, 9.2,\n",
+    "                                         pixels_x=512, pixels_y=128, beam_energy=None,\n",
+    "                                         acquisition_rate=acq_rate, gain_setting=gain_setting)\n",
+    "\n",
+    "# Check, if we have a shape we expect\n",
+    "if db_output:\n",
+    "    if slopesFF[modules[0]].shape == (mem_cells, 512, 128):\n",
+    "        with Pool(processes=len(modules)) as pool:\n",
+    "            const_out = pool.map(send_const, modules)\n",
+    "    else:\n",
+    "        print(f\"Constants are not sent to the DB because of the shape mismatsh\")\n",
+    "        print(f\"Expected {(mem_cells, 512, 128)}, observed {slopesFF[modules[0]].shape}\")\n",
+    "\n",
+    "\n",
+    "condition_dict ={}\n",
+    "    \n",
+    "for entry in condition.to_dict()['parameters']:\n",
+    "    key = entry.pop('parameter_name')\n",
+    "    del entry['description']\n",
+    "    del entry['flg_available']\n",
+    "    condition_dict[key] = entry\n",
+    "\n",
+    "# Create the same file structure as database constants files, in which\n",
+    "# each constant type has its corresponding condition and data.\n",
+    "if local_output:\n",
+    "    for mod in modules:\n",
+    "        qm = f\"Q{mod // 4 + 1}M{mod % 4 + 1}\"    \n",
+    "        device = getattr(getattr(Detectors, dinstance), qm).device_name\n",
+    "        file = f\"{out_folder}/slopesff_bpmask_module_{qm}.h5\"\n",
+    "        dic = {\n",
+    "            device:{\n",
+    "               'SlopesFF': {\n",
+    "                   0:{\n",
+    "                       'condition': condition_dict,\n",
+    "                       'data': np.moveaxis(np.moveaxis(slopesFF[mod],0,2),0,1)}\n",
+    "               },\n",
+    "               'BadPixelsFF':{\n",
+    "                   0:{\n",
+    "                       'condition': condition_dict,\n",
+    "                       'data': np.moveaxis(np.moveaxis(bpmask[mod],0,2),0,1)}\n",
+    "               },\n",
+    "           }\n",
+    "        }        \n",
+    "        save_dict_to_hdf5(dic, file)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Define AGIPD geometry\n",
+    "geom = AGIPD_1MGeometry.from_quad_positions(quad_pos=[\n",
+    "    (-525, 625),\n",
+    "    (-550, -10),\n",
+    "    (520, -160),\n",
+    "    (542.5, 475),\n",
+    "])"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Create the arrays that will be used for figures.\n",
+    "# A dictionary contains all the data for each of the processing stages (gains, mean, slopesFF...).\n",
+    "# Each array correponds to the data for all 16 modules.\n",
+    "# These are updated with their fit/slopes data in the following loops.\n",
+    "if cell_range==[0,0]:\n",
+    "    cell_range[1] = shape[0]\n",
+    "\n",
+    "const_data = {}\n",
+    "for key in keys:\n",
+    "    const_data[key] = np.full((16,shape[0],512,128), np.nan)\n",
+    "    for i in range(16):\n",
+    "        if key in fit_data[i]:\n",
+    "            const_data[key][i,:,pixel_range[0]:pixel_range[2],\n",
+    "                               pixel_range[1]:pixel_range[3]] = fit_data[i][key]\n",
+    "            \n",
+    "const_data['slopesFF'] = np.full((16,shape[0],512,128), np.nan)\n",
+    "labels['slopesFF'] = f'slopesFF [ADU/keV], $\\gamma$={photon_energy} [keV]'\n",
+    "for i in range(16):\n",
+    "    if i in slopesFF:\n",
+    "        const_data['slopesFF'][i,:,pixel_range[0]:pixel_range[2],\n",
+    "                               pixel_range[1]:pixel_range[3]] = slopesFF[i]\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Summary across pixels ##"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "scrolled": true
+   },
+   "outputs": [],
+   "source": [
+    "for key in const_data.keys():\n",
+    "    fig = plt.figure(figsize=(20,20))\n",
+    "    ax = fig.add_subplot(111)\n",
+    "    if key=='mask':\n",
+    "        data = np.nanmean(const_data[key]>0, axis=1)\n",
+    "        vmin, vmax = (0,1)\n",
+    "    else:\n",
+    "        data = np.nanmean(const_data[key], axis=1)\n",
+    "        vmin, vmax = get_range(data, 5)\n",
+    "    ax = geom.plot_data_fast(data, ax=ax, cmap=\"jet\", vmin=vmin, vmax=vmax, figsize=(20,20))\n",
+    "    _ = ax.set_title(labels[key])"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Summary across cells ##\n",
+    "\n",
+    "Good pixels only."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "for key in const_data.keys():\n",
+    "    data = np.copy(const_data[key])\n",
+    "    if key=='mask':\n",
+    "        data = data>0\n",
+    "    else:\n",
+    "        data[const_data['mask']>0] = np.nan\n",
+    "        \n",
+    "    d = []\n",
+    "    for i in range(16):\n",
+    "        d.append({'x': np.arange(data[i].shape[0]),\n",
+    "                  'y': np.nanmean(data[i], axis=(1,2)),\n",
+    "                  'drawstyle': 'steps-pre',\n",
+    "                  'label': f'{i}',\n",
+    "                  'linewidth': 2,\n",
+    "                  'linestyle': '--' if i>7 else '-'\n",
+    "                  })\n",
+    "\n",
+    "    fig = plt.figure(figsize=(15, 6))\n",
+    "    ax = fig.add_subplot(111)\n",
+    "\n",
+    "    _ = simplePlot(d, xrange=(-12, 510),\n",
+    "                        x_label='Memory Cell ID',\n",
+    "                        y_label=labels[key],\n",
+    "                        use_axis=ax,\n",
+    "                        legend='top-left-frame-ncol8',)\n",
+    "    ylim = ax.get_ylim()\n",
+    "    ax.set_ylim(ylim[0], ylim[1] + np.abs(ylim[1]-ylim[0])*0.2)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Summary table ##"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "table = []\n",
+    "for i in modules:\n",
+    "    table.append((i,\n",
+    "                  f\"{np.nanmean(slopesFF[i]):0.1f} +- {np.nanstd(slopesFF[i]):0.2f}\",\n",
+    "                  f\"{np.nanmean(bpmask[i]>0)*100:0.1f} ({np.nansum(bpmask[i]>0)})\"\n",
+    "                        ))\n",
+    "md = display(Latex(tabulate.tabulate(table, tablefmt='latex',\n",
+    "                                     headers=[\"Module\", \"Gain [ADU/keV]\", \"Bad pixels [%(Count)]\"])))"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Performance plots"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def get_trains_data(run_folder, source, include, tid=None):\n",
+    "    \"\"\"\n",
+    "    Load single train for all module\n",
+    "    \n",
+    "    :param run_folder: Path to folder with data\n",
+    "    :param source: Data source to be loaded\n",
+    "    :param include: Inset of file name to be considered \n",
+    "    :param tid: Train Id to be loaded. First train is considered if None is given\n",
+    "    \n",
+    "    \"\"\"\n",
+    "    run_data = RunDirectory(run_folder, include)\n",
+    "    if tid:\n",
+    "        tid, data = run_data.select('*/DET/*', source).train_from_id(tid)\n",
+    "        return tid, stack_detector_data(data, source)\n",
+    "    else:\n",
+    "        for tid, data in run_data.select('*/DET/*', source).trains(require_all=True):\n",
+    "            return tid, stack_detector_data(data, source)\n",
+    "    return None, None\n",
+    "\n",
+    "\n",
+    "include = '*S00000*'\n",
+    "tid, orig = get_trains_data(f'{image_data_path}/r{run:04d}/', 'image.data', include)\n",
+    "orig = orig[cell_range[0]:cell_range[1], ...]"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# FIXME: mask bad pixels from median\n",
+    "# mask = const_data['BadPixelsFF']\n",
+    "\n",
+    "corrections = const_data['slopesFF'] # (16,shape[0],512,128) shape[0]= cell_range[1]-cell_range[0] /\n",
+    "corrections = np.moveaxis(corrections,1,0) # (shape[0],16,512,128)\n",
+    "rel_corr = corrections/np.nanmedian(corrections)\n",
+    "corrected = orig / rel_corr\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Mean value not corrected (train 0)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "fig = plt.figure(figsize=(20,20))\n",
+    "ax = fig.add_subplot(111)\n",
+    "odata = np.nanmean(orig, axis=0)\n",
+    "vmin, vmax = get_range(odata, 5)\n",
+    "ax = geom.plot_data_fast(odata, ax=ax, cmap=\"jet\", vmin=vmin, vmax=vmax, figsize=(20,20))\n",
+    "_ = ax.set_title(\"Original data, mean across one train\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Mean value corrected (train 0)\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "fig = plt.figure(figsize=(20,20))\n",
+    "ax = fig.add_subplot(111)\n",
+    "cdata = np.nanmean(corrected, axis=0)\n",
+    "vmin, vmax = get_range(cdata, 5)\n",
+    "ax = geom.plot_data_fast(cdata, ax=ax, cmap=\"jet\", vmin=vmin, vmax=vmax, figsize=(20,20))\n",
+    "_ = ax.set_title(\"Corrected data, mean across one train\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Histogram of corrected and uncorrected spectrum (train 0)\n",
+    "\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "######################################\n",
+    "#            FIT PEAKS \n",
+    "######################################\n",
+    "\n",
+    "limits = np.reshape(peak_range,(4,2))\n",
+    "x_range = [limits[0][0], limits[-1][-1]]\n",
+    "nb = x_range[1] - x_range[0]+1\n",
+    "\n",
+    "sel = ~np.isnan(corrected)\n",
+    "\n",
+    "fig = plt.figure(figsize=(20, 10))\n",
+    "ax = fig.add_subplot(111)\n",
+    "y,xe, _ = ax.hist(corrected[sel].flatten(), bins=nb, range=x_range, label='corrected', alpha=0.5)\n",
+    "\n",
+    "# get the bin centers from the bin edges \n",
+    "xc=xe[:-1]+(xe[1]-xe[0])/2\n",
+    "\n",
+    "pars, _ = get_starting_parameters(xc, y, limits,4)\n",
+    "minuit = fit_n_peaks(xc, y, pars, x_range,fix_d01=False)\n",
+    "\n",
+    "pc = minuit.args\n",
+    "yfc = multi_gauss(xc,4, *pc)\n",
+    "plt.plot(xc, yfc, label='corrected fit')\n",
+    "\n",
+    "y,_, _ = ax.hist(orig[sel].flatten(), bins=nb, range=x_range, label='original',alpha=0.5)\n",
+    "\n",
+    "minuit = fit_n_peaks(xc, y, pars, x_range,fix_d01=False)\n",
+    "\n",
+    "po = minuit.args\n",
+    "yfo = multi_gauss(xc,4, *po)\n",
+    "plt.plot(xc, yfo, label='original fit')\n",
+    "\n",
+    "plt.title(f\"Signal spectrum, first train\")\n",
+    "plt.xlabel('[ADU]')\n",
+    "plt.legend()\n",
+    "plt.yscale('log')\n",
+    "plt.show()\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "table = []\n",
+    "for i in range(4):\n",
+    "    table.append((f\"Sigma{i} \",\n",
+    "                  f\"{po[2+3*i]:0.2f} \",\n",
+    "                  f\"{pc[2+3*i]:0.2f} \"))\n",
+    "md = display(Latex(tabulate.tabulate(table, tablefmt='latex',\n",
+    "                                     headers=[\"Parameter\", \"Value (original data)\", \"Value (corrected data)\"])))"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.6.7"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 1
+}
diff --git a/notebooks/AGIPD/playground/Characterize_AGIPD_Gain_FlatFields_NBC.ipynb b/notebooks/AGIPD/playground/Characterize_AGIPD_Gain_FlatFields_NBC.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..9f4a3351b56b1857bd6f79465aa94b5249a03bd5
--- /dev/null
+++ b/notebooks/AGIPD/playground/Characterize_AGIPD_Gain_FlatFields_NBC.ipynb
@@ -0,0 +1,1602 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Gain Characterization (Flat Fields) #\n",
+    "\n",
+    "The following code characterizes the gain of the AGIPD detector from flat field data, i.e. data with X-rays of defined intensity. This data should fullfil the following requirements:\n",
+    "\n",
+    "* intensity should be such that single photon peaks are visible\n",
+    "* data for all modules should be present\n",
+    "* no shadowing should occur on any of the modules\n",
+    "* each pixel should have at minimum arround 100 events per photon peak per memory cell\n",
+    "* if central beam edges are visible, they should not be significantly more intense\n",
+    "\n",
+    "Characterization is done by a weighted average algorithm, which evaluates the peak locations for all pixels\n",
+    "and memory cells of a given module. These locations are then fitted to a linear function of the average peak\n",
+    "location in each module, such that it yield a relative gain correction."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2019-09-13T07:34:24.467244Z",
+     "start_time": "2019-09-13T07:34:24.457261Z"
+    }
+   },
+   "outputs": [],
+   "source": [
+    "# the following lines should be adjusted depending on data\n",
+    "in_folder = '/gpfs/exfel/exp/MID/201931/p900091/raw/' # path to input data, required\n",
+    "modules = [3] # modules to work on, required, range allowed\n",
+    "out_folder = \"/gpfs/exfel/exp/MID/201931/p900091/usr/FF/2.2\" # path to output to, required\n",
+    "runs = [484, 485,] # runs to use, required, range allowed\n",
+    "sequences = [0,1,2,3]#,4,5,6,7,8] #,5,6,7,8,9,10] # sequences files to use, range allowed\n",
+    "cluster_profile = \"noDB\" # The ipcluster profile to use\n",
+    "local_output = True # output constants locally\n",
+    "db_output = False # output constants to database\n",
+    "bias_voltage = 300 # detector bias voltage\n",
+    "cal_db_interface = \"tcp://max-exfl016:8026#8035\"  # the database interface to use\n",
+    "mem_cells = 0  # number of memory cells used\n",
+    "interlaced = False # assume interlaced data format, for data prior to Dec. 2017\n",
+    "fit_hook = True # fit a hook function to medium gain slope\n",
+    "rawversion = 2 # RAW file format version\n",
+    "instrument = \"MID\"\n",
+    "photon_energy = 9.2 # the photon energy in keV\n",
+    "offset_store = \"\" # for file-baed access\n",
+    "high_res_badpix_3d = False # set this to True if you need high-resolution 3d bad pixel plots. Runtime: ~ 1h\n",
+    "db_input = True # retreive data from calibration database, setting offset-store will overwrite this\n",
+    "deviation_threshold = 0.75 # peaks with an absolute location deviation larger than the medium are are considere bad\n",
+    "acqrate = 0. # acquisition rate\n",
+    "use_dir_creation_date = True\n",
+    "creation_time = \"\" # To overwrite the measured creation_time. Required Format: YYYY-MM-DD HR:MN:SC.ms e.g. 2019-07-04 11:02:41.00\n",
+    "gain_setting = 0.1 # gain setting can have value 0 or 1, Default=0.1 for no (None) gain-setting\n",
+    "karabo_da_control = \"AGIPD1MCTRL00\" # karabo DA for control infromation\n",
+    "h5path_ctrl = '/CONTROL/{}/MDL/FPGA_COMP_TEST' # path to control information"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# std library imports\n",
+    "from datetime import datetime\n",
+    "import dateutil\n",
+    "from functools import partial\n",
+    "import warnings\n",
+    "warnings.filterwarnings('ignore')\n",
+    "import os\n",
+    "\n",
+    "import h5py\n",
+    "# numpy and matplot lib specific\n",
+    "import numpy as np\n",
+    "import matplotlib\n",
+    "matplotlib.use(\"Agg\")\n",
+    "import matplotlib.pyplot as plt\n",
+    "%matplotlib inline\n",
+    "\n",
+    "# parallel processing via ipcluster\n",
+    "# make sure a cluster is running with ipcluster start --n=32, give it a while to start\n",
+    "from ipyparallel import Client\n",
+    "\n",
+    "client = Client(profile=cluster_profile)\n",
+    "view = client[:]\n",
+    "view.use_dill()\n",
+    "\n",
+    "# pyDetLib imports\n",
+    "import XFELDetAna.xfelpycaltools as xcal\n",
+    "import XFELDetAna.xfelpyanatools as xana\n",
+    "from XFELDetAna.util import env\n",
+    "env.iprofile = cluster_profile\n",
+    "profile = cluster_profile\n",
+    "\n",
+    "from iCalibrationDB import ConstantMetaData, Constants, Conditions, Detectors, Versions\n",
+    "from cal_tools.agipdlib import get_num_cells, get_acq_rate, get_gain_setting\n",
+    "from cal_tools.enums import BadPixels\n",
+    "from cal_tools.influx import InfluxLogger\n",
+    "from cal_tools.plotting import show_overview, plot_badpix_3d\n",
+    "from cal_tools.tools import gain_map_files, parse_runs, run_prop_seq_from_path, get_notebook_name, get_dir_creation_date, get_random_db_interface\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2019-09-13T07:34:24.830847Z",
+     "start_time": "2019-09-13T07:34:24.745094Z"
+    }
+   },
+   "outputs": [],
+   "source": [
+    "# usually no need to change these lines\n",
+    "sensor_size = [128, 512]\n",
+    "block_size = [128, 512]\n",
+    "QUADRANTS = 4\n",
+    "MODULES_PER_QUAD = 4\n",
+    "DET_FILE_INSET = \"AGIPD\"\n",
+    "\n",
+    "# Define constant creation time.\n",
+    "if creation_time:\n",
+    "    try:\n",
+    "        creation_time = datetime.strptime(creation_time, '%Y-%m-%d %H:%M:%S.%f')\n",
+    "    except Exception as e:\n",
+    "        print(f\"creation_time value error: {e}.\" \n",
+    "               \"Use same format as YYYY-MM-DD HR:MN:SC.ms e.g. 2019-07-04 11:02:41.00/n\")\n",
+    "        creation_time = None\n",
+    "        print(\"Given creation time wont be used.\")\n",
+    "else:\n",
+    "    creation_time = None\n",
+    "\n",
+    "if not creation_time and use_dir_creation_date:\n",
+    "    ntries = 100\n",
+    "    while ntries > 0:\n",
+    "        try:\n",
+    "            creation_time = get_dir_creation_date(in_folder, runs[0])\n",
+    "            break\n",
+    "        except OSError:\n",
+    "            pass\n",
+    "        ntries -= 1\n",
+    "    \n",
+    "print(\"Using {} as creation time\".format(creation_time))\n",
+    "    \n",
+    "runs = parse_runs(runs)\n",
+    "\n",
+    "if offset_store != \"\":\n",
+    "    db_input = False\n",
+    "else:\n",
+    "    db_input = True \n",
+    "\n",
+    "limit_trains = 20\n",
+    "limit_trains_eval = None\n",
+    "\n",
+    "print(\"Parameters are:\")\n",
+    "\n",
+    "print(\"Runs: {}\".format(runs))\n",
+    "print(\"Modules: {}\".format(modules))\n",
+    "print(\"Sequences: {}\".format(sequences))\n",
+    "print(\"Using DB: {}\".format(db_output))\n",
+    "\n",
+    "\n",
+    "if instrument == \"SPB\":\n",
+    "    loc = \"SPB_DET_AGIPD1M-1\"\n",
+    "    dinstance = \"AGIPD1M1\"\n",
+    "    karabo_id_control = \"SPB_IRU_AGIPD1M1\"\n",
+    "else:\n",
+    "    loc = \"MID_DET_AGIPD1M-1\"\n",
+    "    dinstance = \"AGIPD1M2\"\n",
+    "    karabo_id_control = \"MID_EXP_AGIPD1M1\"\n",
+    "\n",
+    "cal_db_interface = get_random_db_interface(cal_db_interface)\n",
+    "\n",
+    "# these lines can usually stay as is\n",
+    "fbase = \"{}/{{}}/RAW-{{}}-AGIPD{{:02d}}-S{{:05d}}.h5\".format(in_folder)\n",
+    "gains = np.arange(3)\n",
+    "\n",
+    "\n",
+    "run, prop, seq = run_prop_seq_from_path(in_folder)\n",
+    "logger = InfluxLogger(detector=\"AGIPD\", instrument=instrument, mem_cells=mem_cells,\n",
+    "                      notebook=get_notebook_name(), proposal=prop)\n",
+    "\n",
+    "# extract the memory cells and acquisition rate from \n",
+    "# the file of the first module, first sequence and first run\n",
+    "channel = 0\n",
+    "fname = fbase.format(runs[0], runs[0].upper(), channel, sequences[0])\n",
+    "if acqrate == 0.:\n",
+    "    acqrate = get_acq_rate(fname, loc, channel)\n",
+    "    \n",
+    "\n",
+    "if mem_cells == 0:\n",
+    "    cells = get_num_cells(fname, loc, channel)\n",
+    "    mem_cells = cells  # avoid setting twice\n",
+    "    \n",
+    "\n",
+    "IL_MODE = interlaced\n",
+    "max_cells = mem_cells if not interlaced else mem_cells*2\n",
+    "memory_cells = mem_cells\n",
+    "print(\"Interlaced mode: {}\".format(IL_MODE))\n",
+    "\n",
+    "cells = np.arange(max_cells)\n",
+    "\n",
+    "print(f\"Acquisition rate and memory cells set from file {fname} are \" \n",
+    "      f\"{acqrate} MHz and {max_cells}, respectively\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "control_fname = f'{in_folder}/{runs[0]}/RAW-{runs[0].upper()}-{karabo_da_control}-S00000.h5'\n",
+    "\n",
+    "if \"{\" in h5path_ctrl:\n",
+    "    h5path_ctrl = h5path_ctrl.format(karabo_id_control)\n",
+    "\n",
+    "if gain_setting == 0.1:\n",
+    "    if creation_time.replace(tzinfo=None) < dateutil.parser.parse('2020-01-31'):\n",
+    "        print(\"Set gain-setting to None for runs taken before 2020-01-31\")\n",
+    "        gain_setting = None\n",
+    "    else:\n",
+    "        try:\n",
+    "            gain_setting = get_gain_setting(control_fname, h5path_ctrl)\n",
+    "        except Exception as e:\n",
+    "            print(f'Error while reading gain setting from: \\n{control_fname}')\n",
+    "            print(e)\n",
+    "            print(\"Gain setting is not found in the control information\")\n",
+    "            print(\"Data will not be processed\")\n",
+    "            sequences = []\n",
+    "print(f\"Gain setting: {gain_setting}\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "For the characterization offset maps for each module are needed. The are read in either locally or through the XFEL calibration database."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2019-09-13T07:34:45.876476Z",
+     "start_time": "2019-09-13T07:34:25.656979Z"
+    }
+   },
+   "outputs": [],
+   "source": [
+    "from dateutil import parser\n",
+    "offset_g = {}\n",
+    "noise_g = {}\n",
+    "thresholds_g = {}\n",
+    "pc_g = {}\n",
+    "if not db_input:\n",
+    "    print(\"Offset, noise and thresholds have been read in from: {}\".format(offset_store))\n",
+    "    store_file = h5py.File(offset_store, \"r\")\n",
+    "    for i in modules:\n",
+    "        qm = \"Q{}M{}\".format(i//4+1, i%4+1)\n",
+    "        offset_g[qm] = np.array(store_file[\"{}/Offset/0/data\".format(qm)])\n",
+    "        noise_g[qm] = np.array(store_file[\"{}/Noise/0/data\".format(qm)])\n",
+    "        thresholds_g[qm] = np.array(store_file[\"{}/Threshold/0/data\".format(qm)])\n",
+    "    store_file.close()\n",
+    "else:\n",
+    "    print(\"Offset, noise and thresholds have been read in from calibration database:\")\n",
+    "    first_ex = True\n",
+    "    for i in modules:\n",
+    "        qm = \"Q{}M{}\".format(i//4+1, i%4+1)\n",
+    "        metadata = ConstantMetaData()\n",
+    "        offset = Constants.AGIPD.Offset()\n",
+    "        metadata.calibration_constant = offset\n",
+    "        det = getattr(Detectors, dinstance)\n",
+    "\n",
+    "        # set the operating condition\n",
+    "        condition = Conditions.Dark.AGIPD(memory_cells=max_cells, bias_voltage=bias_voltage,\n",
+    "                                          acquisition_rate=acqrate, gain_setting=gain_setting)\n",
+    "        metadata.detector_condition = condition\n",
+    "\n",
+    "        # specify the a version for this constant\n",
+    "        if creation_time is None:\n",
+    "            metadata.calibration_constant_version = Versions.Now(device=getattr(det, qm))\n",
+    "        else:\n",
+    "            metadata.calibration_constant_version = Versions.Timespan(device=getattr(det, qm), start=creation_time)\n",
+    "        metadata.retrieve(cal_db_interface, when=creation_time.isoformat(), timeout=300000)\n",
+    "        offset_g[qm] = offset.data\n",
+    "        if first_ex:\n",
+    "            print(\"Offset map was injected on: {}\".format(metadata.calibration_constant_version.begin_at))\n",
+    "        \n",
+    "        metadata = ConstantMetaData()\n",
+    "        noise = Constants.AGIPD.Noise()\n",
+    "        metadata.calibration_constant = noise\n",
+    "\n",
+    "        # set the operating condition\n",
+    "        condition = Conditions.Dark.AGIPD(memory_cells=max_cells, bias_voltage=bias_voltage,\n",
+    "                                          acquisition_rate=acqrate, gain_setting=gain_setting)\n",
+    "        metadata.detector_condition = condition\n",
+    "\n",
+    "        # specify the a version for this constant\n",
+    "        if creation_time is None:\n",
+    "            metadata.calibration_constant_version = Versions.Now(device=getattr(det, qm))\n",
+    "        else:\n",
+    "            metadata.calibration_constant_version = Versions.Timespan(device=getattr(det, qm), start=creation_time)\n",
+    "        metadata.retrieve(cal_db_interface, when=creation_time.isoformat(), timeout=300000)\n",
+    "        noise_g[qm] = noise.data\n",
+    "        if first_ex:\n",
+    "            print(\"Noise map was injected on: {}\".format(metadata.calibration_constant_version.begin_at))\n",
+    "        \n",
+    "        metadata = ConstantMetaData()\n",
+    "        thresholds = Constants.AGIPD.ThresholdsDark()\n",
+    "        metadata.calibration_constant = thresholds\n",
+    "\n",
+    "        # set the operating condition\n",
+    "        condition = Conditions.Dark.AGIPD(memory_cells=max_cells, bias_voltage=bias_voltage,\n",
+    "                                          acquisition_rate=acqrate, gain_setting=gain_setting)\n",
+    "        metadata.detector_condition = condition\n",
+    "\n",
+    "        # specify the a version for this constant\n",
+    "        if creation_time is None:\n",
+    "            metadata.calibration_constant_version = Versions.Now(device=getattr(det, qm))\n",
+    "        else:\n",
+    "            metadata.calibration_constant_version = Versions.Timespan(device=getattr(det, qm), start=creation_time)\n",
+    "        metadata.retrieve(cal_db_interface, when=creation_time.isoformat(), timeout=300000)\n",
+    "        thresholds_g[qm] = thresholds.data\n",
+    "        if first_ex:\n",
+    "            print(\"Threshold map was injected on: {}\".format(metadata.calibration_constant_version.begin_at))\n",
+    "            \n",
+    "            \n",
+    "        metadata = ConstantMetaData()\n",
+    "        pc = Constants.AGIPD.SlopesPC()\n",
+    "        metadata.calibration_constant = pc\n",
+    "\n",
+    "        # set the operating condition\n",
+    "        condition = Conditions.Dark.AGIPD(memory_cells=max_cells, bias_voltage=bias_voltage,\n",
+    "                                          acquisition_rate=acqrate, gain_setting=gain_setting)\n",
+    "        metadata.detector_condition = condition\n",
+    "\n",
+    "        # specify the a version for this constant\n",
+    "        if creation_time is None:\n",
+    "            metadata.calibration_constant_version = Versions.Now(device=getattr(det, qm))\n",
+    "        else:\n",
+    "            metadata.calibration_constant_version = Versions.Timespan(device=getattr(det, qm), start=creation_time)\n",
+    "        metadata.retrieve(cal_db_interface, when=creation_time.isoformat(), timeout=300000)\n",
+    "        pc_g[qm] = np.nanmedian(pc.data[0,...])/pc.data\n",
+    "        if first_ex:\n",
+    "            print(\"PC map was injected on: {}\".format(metadata.calibration_constant_version.begin_at))\n",
+    "            first_ex = False\n",
+    "            "
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Initial peak estimates ##\n",
+    "\n",
+    "First initial peak estimates need to be defined. This is done by inspecting histograms created from (a subset of) the offset-corrected data for peak locations and peak heights."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2019-09-13T07:51:37.815384Z",
+     "start_time": "2019-09-13T07:34:45.879121Z"
+    }
+   },
+   "outputs": [],
+   "source": [
+    "def hist_single_module(fbase, runs, sequences, sensor_size, memory_cells, block_size,\n",
+    "                       il_mode, limit_trains, profile, rawversion, instrument, inp):\n",
+    "    \"\"\" This function calculates a per-pixel histogram for a single module\n",
+    "    \n",
+    "    Runs and sequences give the data to calculate histogram from\n",
+    "    \"\"\"\n",
+    "    channel, offset, thresholds, pc, noise = inp\n",
+    "    \n",
+    "    import XFELDetAna.xfelpycaltools as xcal\n",
+    "    import numpy as np\n",
+    "    import h5py\n",
+    "    from XFELDetAna.util import env\n",
+    "    \n",
+    "    \n",
+    "    env.iprofile = profile\n",
+    "    \n",
+    "    def baseline_correct_via_noise(d, noise, g, baseline_corr_noise_threshold=1000):\n",
+    "        \"\"\" Correct baseline shifts by evaluating position of the noise peak\n",
+    "\n",
+    "        :param: d: the data to correct, should be a single image\n",
+    "        :param noise: the mean noise for the cell id of `d`\n",
+    "        :param g: gain array matching `d` array\n",
+    "\n",
+    "        Correction is done by identifying the left-most (significant) peak\n",
+    "        in the histogram of `d` and shifting it to be centered on 0.\n",
+    "        This is done via a continous wavelet transform (CWT), using a Ricker\n",
+    "        wavelet.\n",
+    "\n",
+    "        Only high gain data is evaulated, and data larger than 50 ADU,\n",
+    "        equivalent of roughly a 9 keV photon is ignored.\n",
+    "\n",
+    "        Two passes are executed: the first shift is accurate to 10 ADU and\n",
+    "        will catch baseline shifts smaller then -2000 ADU. CWT is evaluated\n",
+    "        for peaks of widths one, three and five time the noise.\n",
+    "        The baseline is then shifted by the position of the summed CWT maximum.\n",
+    "\n",
+    "        In a second pass histogram is evaluated within a range of\n",
+    "        +- 5*noise of the initial shift value, for peak of width noise.\n",
+    "\n",
+    "        Baseline shifts larger than the maximum threshold or positive shifts\n",
+    "        larger 10 are discarded, and the original data is returned, otherwise\n",
+    "        the shift corrected data is returned.\n",
+    "\n",
+    "        \"\"\"\n",
+    "        import copy\n",
+    "        from scipy.signal import cwt, ricker\n",
+    "        # we work on a copy to be able to filter values by setting them to\n",
+    "        # np.nan\n",
+    "        dd = copy.copy(d)\n",
+    "        dd[g != 0] = np.nan  # only high gain data\n",
+    "        dd[dd > 50] = np.nan  # only noise data\n",
+    "        h, e = np.histogram(dd, bins=210, range=(-2000, 100))\n",
+    "        c = (e[1:] + e[:-1]) / 2\n",
+    "\n",
+    "        try:\n",
+    "            cwtmatr = cwt(h, ricker, [noise, 3. * noise, 5. * noise])\n",
+    "        except:\n",
+    "            return d\n",
+    "        cwtall = np.sum(cwtmatr, axis=0)\n",
+    "        marg = np.argmax(cwtall)\n",
+    "        pc = c[marg]\n",
+    "\n",
+    "        high_res_range = (dd > pc - 5 * noise) & (dd < pc + 5 * noise)\n",
+    "        dd[~high_res_range] = np.nan\n",
+    "        h, e = np.histogram(dd, bins=200,\n",
+    "                            range=(pc - 5 * noise, pc + 5 * noise))\n",
+    "        c = (e[1:] + e[:-1]) / 2\n",
+    "        try:\n",
+    "            cwtmatr = cwt(h, ricker, [noise, ])\n",
+    "        except:\n",
+    "            return d\n",
+    "        marg = np.argmax(cwtmatr)\n",
+    "        pc = c[marg]\n",
+    "        # too large shift to be easily decernable via noise\n",
+    "        if pc > 10 or pc < -baseline_corr_noise_threshold:\n",
+    "            return d\n",
+    "        return d - pc\n",
+    "\n",
+    "    \n",
+    "    # function needs to be inline for parallell processing\n",
+    "    def read_fun(filename, channel):\n",
+    "        \"\"\" A reader function used by pyDetLib\n",
+    "        \"\"\"\n",
+    "        infile = h5py.File(filename, \"r\", driver=\"core\")\n",
+    "        if rawversion == 2:\n",
+    "            count = np.squeeze(infile[\"/INDEX/{}_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/count\".format(instrument, channel)])\n",
+    "            first = np.squeeze(infile[\"/INDEX/{}_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/first\".format(instrument, channel)])\n",
+    "            last_index = int(first[count != 0][-1]+count[count != 0][-1])\n",
+    "            first_index = int(first[count != 0][0])\n",
+    "        else:\n",
+    "            status = np.squeeze(infile[\"/INDEX/{}_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/status\".format(instrument, channel)])\n",
+    "            if np.count_nonzero(status != 0) == 0:\n",
+    "                return\n",
+    "            last = np.squeeze(infile[\"/INDEX/{}_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/last\".format(instrument, channel)])\n",
+    "            last_index = int(last[status != 0][-1])\n",
+    "            first_index = int(last[status != 0][0])\n",
+    "        if limit_trains is not None:\n",
+    "            last_index = min(limit_trains*memory_cells+first_index, last_index)\n",
+    "        \n",
+    "        im = np.array(infile[\"/INSTRUMENT/{}_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/data\".format(instrument, channel)][first_index:last_index,...])    \n",
+    "        carr = infile[\"/INSTRUMENT/{}_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/cellId\".format(instrument, channel)][first_index:last_index]\n",
+    "        cells = np.squeeze(np.array(carr))\n",
+    "        infile.close()\n",
+    "        \n",
+    "        if il_mode:\n",
+    "            ga = im[1::2, 0, ...]\n",
+    "            im = im[0::2, 0, ...].astype(np.float32)\n",
+    "        else:\n",
+    "            ga = im[:, 1, ...]\n",
+    "            im = im[:, 0, ...].astype(np.float32)\n",
+    "\n",
+    "        im = np.rollaxis(im, 2)\n",
+    "        im = np.rollaxis(im, 2, 1)\n",
+    "\n",
+    "        ga = np.rollaxis(ga, 2)\n",
+    "        ga = np.rollaxis(ga, 2, 1)\n",
+    "        return im, ga, cells\n",
+    "    \n",
+    "    offset_cor = xcal.OffsetCorrection(sensor_size,\n",
+    "                                       offset,\n",
+    "                                       nCells=memory_cells,\n",
+    "                                       blockSize=block_size,\n",
+    "                                       gains=[0,1,2])\n",
+    "    offset_cor.mapper = offset_cor._view.map_sync\n",
+    "    offset_cor.debug() # force non-parallel processing since outer function will run concurrently\n",
+    "    hist_calc = xcal.HistogramCalculator(sensor_size,\n",
+    "                                         bins=4000,\n",
+    "                                         range=(-4000, 8000),\n",
+    "                                         blockSize=block_size)\n",
+    "    hist_calc.mapper = hist_calc._view.map_sync\n",
+    "    hist_calc.debug() # force non-parallel processing since outer function will run concurrently\n",
+    "    for run in runs:\n",
+    "        for seq in sequences:\n",
+    "            fname = fbase.format(run, run.upper(), channel, seq)\n",
+    "            d, ga, c = read_fun(fname, channel)\n",
+    "            vidx = (c < offset.shape[2]) & (c < thresholds.shape[2])\n",
+    "            c = c[vidx]\n",
+    "            d = d[:,:,vidx]\n",
+    "            ga = ga[:,:,vidx]\n",
+    "            # we need to do proper gain thresholding\n",
+    "            g = np.zeros(ga.shape, np.uint8)\n",
+    "            g[...] = 2\n",
+    "            \n",
+    "            g[ga < thresholds[...,c,1]] = 1\n",
+    "            g[ga < thresholds[...,c,0]] = 0\n",
+    "            d = offset_cor.correct(d, cellTable=c, gainMap=g)\n",
+    "            for i in range(d.shape[2]):\n",
+    "                mn_noise = np.nanmean(noise[...,c[i]])\n",
+    "                d[...,i] = baseline_correct_via_noise(d[...,i],\n",
+    "                                                      mn_noise,\n",
+    "                                                      g[..., i])\n",
+    "            \n",
+    "            d *= np.moveaxis(pc[c,...], 0, 2)\n",
+    "            \n",
+    "            hist_calc.fill(d)\n",
+    "    h, e, c, _ = hist_calc.get()\n",
+    "    return h, e, c\n",
+    "\n",
+    "inp = []\n",
+    "for i in modules:\n",
+    "    qm = \"Q{}M{}\".format(i//4+1, i%4+1)\n",
+    "    inp.append((i, offset_g[qm], thresholds_g[qm], pc_g[qm][0,...], noise_g[qm][...,0]))\n",
+    "    \n",
+    "p = partial(hist_single_module, fbase, runs, sequences,\n",
+    "            sensor_size, memory_cells, block_size, IL_MODE, limit_trains, profile, rawversion, instrument)\n",
+    "res_uncorr = list(map(p, inp))\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2019-09-13T07:51:38.024035Z",
+     "start_time": "2019-09-13T07:51:37.818276Z"
+    },
+    "scrolled": false
+   },
+   "outputs": [],
+   "source": [
+    "d = []\n",
+    "qms = []\n",
+    "for i, r in enumerate(res_uncorr):\n",
+    "    ii = list(modules)[i]\n",
+    "    qm = \"Q{}M{}\".format(ii//4+1, ii%4+1)\n",
+    "    qms.append(qm)\n",
+    "    h, e, c = r \n",
+    "    d.append({\n",
+    "            'x': c, \n",
+    "            'y': h,\n",
+    "            'drawstyle': 'steps-mid'\n",
+    "        })\n",
+    "    \n",
+    "fig = xana.simplePlot(d, y_log=False,\n",
+    "                      figsize=\"2col\",\n",
+    "                      aspect=2,\n",
+    "                      x_range=(-50, 500),\n",
+    "                      x_label=\"Intensity (ADU)\",\n",
+    "                      y_label=\"Counts\")\n",
+    "\n",
+    "fig.savefig(\"{}/FF_module_{}_peak_pos.png\".format(out_folder, \",\".join(qms)))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2019-09-13T07:51:38.029704Z",
+     "start_time": "2019-09-13T07:51:38.026111Z"
+    }
+   },
+   "outputs": [],
+   "source": [
+    "# these should be quite stable\n",
+    "\n",
+    "peak_estimates = [0, 55, 105, 155]\n",
+    "peak_heights = [5e7, 5e6, 1e6, 5e5]\n",
+    "peak_sigma = [5., 5.,5., 5.,]\n",
+    "\n",
+    "print(\"Using the following peak estimates: {}\".format(peak_estimates))"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "collapsed": true
+   },
+   "source": [
+    "## Calculate relative gain per module ##\n",
+    "\n",
+    "Using the so obtained estimates, we calculate the relative gain per module. For this we use the weighted average method implemented in pyDetLib."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2019-09-13T08:16:50.652492Z",
+     "start_time": "2019-09-13T07:51:38.031787Z"
+    }
+   },
+   "outputs": [],
+   "source": [
+    "block_size = [64, 64]\n",
+    "def relgain_single_module(fbase, runs, sequences, peak_estimates,\n",
+    "                          peak_heights, peak_sigma, memory_cells, sensor_size,\n",
+    "                          block_size, il_mode, profile, limit_trains_eval, rawversion, instrument, inp):\n",
+    "    \"\"\" A function for calculated the relative gain of a single AGIPD module\n",
+    "    \"\"\"\n",
+    "    \n",
+    "    # import needed inline for parallel processing\n",
+    "    import XFELDetAna.xfelpycaltools as xcal\n",
+    "    import numpy as np\n",
+    "    import h5py\n",
+    "    from XFELDetAna.util import env\n",
+    "    env.iprofile = profile\n",
+    "    \n",
+    "    channel, offset, thresholds, noise, pc = inp\n",
+    "    \n",
+    "    def baseline_correct_via_noise(d, noise, g, baseline_corr_noise_threshold=1000):\n",
+    "        \"\"\" Correct baseline shifts by evaluating position of the noise peak\n",
+    "\n",
+    "        :param: d: the data to correct, should be a single image\n",
+    "        :param noise: the mean noise for the cell id of `d`\n",
+    "        :param g: gain array matching `d` array\n",
+    "\n",
+    "        Correction is done by identifying the left-most (significant) peak\n",
+    "        in the histogram of `d` and shifting it to be centered on 0.\n",
+    "        This is done via a continous wavelet transform (CWT), using a Ricker\n",
+    "        wavelet.\n",
+    "\n",
+    "        Only high gain data is evaulated, and data larger than 50 ADU,\n",
+    "        equivalent of roughly a 9 keV photon is ignored.\n",
+    "\n",
+    "        Two passes are executed: the first shift is accurate to 10 ADU and\n",
+    "        will catch baseline shifts smaller then -2000 ADU. CWT is evaluated\n",
+    "        for peaks of widths one, three and five time the noise.\n",
+    "        The baseline is then shifted by the position of the summed CWT maximum.\n",
+    "\n",
+    "        In a second pass histogram is evaluated within a range of\n",
+    "        +- 5*noise of the initial shift value, for peak of width noise.\n",
+    "\n",
+    "        Baseline shifts larger than the maximum threshold or positive shifts\n",
+    "        larger 10 are discarded, and the original data is returned, otherwise\n",
+    "        the shift corrected data is returned.\n",
+    "\n",
+    "        \"\"\"\n",
+    "        import copy\n",
+    "        from scipy.signal import cwt, ricker\n",
+    "        # we work on a copy to be able to filter values by setting them to\n",
+    "        # np.nan\n",
+    "        dd = copy.copy(d)\n",
+    "        dd[g != 0] = np.nan  # only high gain data\n",
+    "        dd[dd > 50] = np.nan  # only noise data\n",
+    "        h, e = np.histogram(dd, bins=210, range=(-2000, 100))\n",
+    "        c = (e[1:] + e[:-1]) / 2\n",
+    "\n",
+    "        try:\n",
+    "            cwtmatr = cwt(h, ricker, [noise, 3. * noise, 5. * noise])\n",
+    "        except:\n",
+    "            return d\n",
+    "        cwtall = np.sum(cwtmatr, axis=0)\n",
+    "        marg = np.argmax(cwtall)\n",
+    "        pc = c[marg]\n",
+    "\n",
+    "        high_res_range = (dd > pc - 5 * noise) & (dd < pc + 5 * noise)\n",
+    "        dd[~high_res_range] = np.nan\n",
+    "        h, e = np.histogram(dd, bins=200,\n",
+    "                            range=(pc - 5 * noise, pc + 5 * noise))\n",
+    "        c = (e[1:] + e[:-1]) / 2\n",
+    "        try:\n",
+    "            cwtmatr = cwt(h, ricker, [noise, ])\n",
+    "        except:\n",
+    "            return d\n",
+    "        marg = np.argmax(cwtmatr)\n",
+    "        pc = c[marg]\n",
+    "        # too large shift to be easily decernable via noise\n",
+    "        if pc > 10 or pc < -baseline_corr_noise_threshold:\n",
+    "            return d\n",
+    "        return d - pc\n",
+    "\n",
+    "    \n",
+    "    # function needs to be inline for parallell processing\n",
+    "    def read_fun(filename, channel):\n",
+    "        \"\"\" A reader function used by pyDetLib\n",
+    "        \"\"\"\n",
+    "        infile = h5py.File(filename, \"r\", driver=\"core\")\n",
+    "        if rawversion == 2:\n",
+    "            count = np.squeeze(infile[\"/INDEX/{}_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/count\".format(instrument, channel)])\n",
+    "            first = np.squeeze(infile[\"/INDEX/{}_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/first\".format(instrument, channel)])\n",
+    "            last_index = int(first[count != 0][-1]+count[count != 0][-1])\n",
+    "            first_index = int(first[count != 0][0])\n",
+    "        else:\n",
+    "            status = np.squeeze(infile[\"/INDEX/{}_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/status\".format(instrument, channel)])\n",
+    "            if np.count_nonzero(status != 0) == 0:\n",
+    "                return\n",
+    "            last = np.squeeze(infile[\"/INDEX/{}_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/last\".format(instrument, channel)])\n",
+    "            last_index = int(last[status != 0][-1])\n",
+    "            first_index = int(last[status != 0][0])\n",
+    "        if limit_trains is not None:\n",
+    "            last_index = min(limit_trains*memory_cells+first_index, last_index)\n",
+    "        im = np.array(infile[\"/INSTRUMENT/{}_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/data\".format(instrument, channel)][first_index:last_index,...])    \n",
+    "        carr = infile[\"/INSTRUMENT/{}_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/cellId\".format(instrument, channel)][first_index:last_index]\n",
+    "        cells = np.squeeze(np.array(carr))\n",
+    "        infile.close()\n",
+    "\n",
+    "        if il_mode:\n",
+    "            ga = im[1::2, 0, ...]\n",
+    "            im = im[0::2, 0, ...].astype(np.float32)\n",
+    "        else:\n",
+    "            ga = im[:, 1, ...]\n",
+    "            im = im[:, 0, ...].astype(np.float32)\n",
+    "            \n",
+    "        im = np.rollaxis(im, 2)\n",
+    "        im = np.rollaxis(im, 2, 1)\n",
+    "\n",
+    "        ga = np.rollaxis(ga, 2)\n",
+    "        ga = np.rollaxis(ga, 2, 1)\n",
+    "        return im, ga, cells\n",
+    "    \n",
+    "    offset_cor = xcal.OffsetCorrection(sensor_size, offset, nCells=memory_cells,\n",
+    "                                       blockSize=block_size, gains=[0,1,2])\n",
+    "    offset_cor.mapper = offset_cor._view.map_sync\n",
+    "\n",
+    "    rel_gain = xcal.GainMapCalculator(sensor_size,\n",
+    "                                      peak_estimates,\n",
+    "                                      peak_sigma,\n",
+    "                                      nCells=memory_cells,\n",
+    "                                      peakHeights = peak_heights,\n",
+    "                                      noiseMap=noise,\n",
+    "                                      deviationType=\"relative\")\n",
+    "    rel_gain.mapper = rel_gain._view.map_sync\n",
+    "    for run in runs:\n",
+    "        for seq in sequences:\n",
+    "            fname = fbase.format(run, run.upper(), channel, seq)\n",
+    "            d, ga, c = read_fun(fname, channel)\n",
+    "            # we need to do proper gain thresholding\n",
+    "            vidx = (c < offset.shape[2]) & (c < thresholds.shape[2])\n",
+    "            c = c[vidx]\n",
+    "            d = d[:,:,vidx]\n",
+    "            ga = ga[:,:,vidx]\n",
+    "            # we need to do proper gain thresholding\n",
+    "            g = np.zeros(ga.shape, np.uint8)\n",
+    "            g[...] = 2\n",
+    "            \n",
+    "            g[ga < thresholds[...,c,1]] = 1\n",
+    "            g[ga < thresholds[...,c,0]] = 0\n",
+    "            d = offset_cor.correct(d, cellTable=c, gainMap=g)\n",
+    "            \n",
+    "            for i in range(d.shape[2]):\n",
+    "                mn_noise = np.nanmean(noise[...,c[i]])\n",
+    "                d[...,i] = baseline_correct_via_noise(d[...,i],\n",
+    "                                                      mn_noise,\n",
+    "                                                      g[..., i])\n",
+    "            \n",
+    "            d *= np.moveaxis(pc[c,...], 0, 2)\n",
+    "            rel_gain.fill(d, cellTable=c)\n",
+    "    \n",
+    "    gain_map = rel_gain.get()\n",
+    "    gain_map_func = rel_gain.getUsingFunc(inverse=False)\n",
+    "    \n",
+    "    pks, stds = rel_gain.getRawPeaks()\n",
+    "    return gain_map, pks, stds, gain_map_func\n",
+    "\n",
+    "inp = []\n",
+    "for i in modules:\n",
+    "    qm = \"Q{}M{}\".format(i//4+1, i%4+1)\n",
+    "    inp.append((i, offset_g[qm], thresholds_g[qm], noise_g[qm][...,0], pc_g[qm][0,...]))\n",
+    "start = datetime.now()\n",
+    "p = partial(relgain_single_module, fbase, runs, sequences,\n",
+    "            peak_estimates, peak_heights, peak_sigma, memory_cells,\n",
+    "            sensor_size, block_size, IL_MODE, profile, limit_trains_eval, rawversion, instrument)\n",
+    "res_gain = list(map(p, inp)) # don't run concurently as inner function are parelllized\n",
+    "duration = (datetime.now()-start).total_seconds()\n",
+    "logger.runtime_summary_entry(success=True, runtime=duration)\n",
+    "logger.send()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Finally, we inspect the results, by plotting the number of entries, peak locations and resulting gain maps for each peak. In the course of doing so, we identify bad pixels by either having 0 entries for a peak, or having `nan` values for the peak location."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2019-09-13T08:16:54.261782Z",
+     "start_time": "2019-09-13T08:16:50.657594Z"
+    }
+   },
+   "outputs": [],
+   "source": [
+    "\n",
+    "gain_m = {}\n",
+    "flatsff = {}\n",
+    "gainoff_g = {}\n",
+    "entries_g = {}\n",
+    "peaks_g = {}\n",
+    "mask_g = {}\n",
+    "cell_to_preview = 4\n",
+    "masks_eval_peaks = [1, 2]\n",
+    "global_eval_peaks = [1]\n",
+    "global_meds = {}\n",
+    "\n",
+    "for i, r in enumerate(res_gain):\n",
+    "    ii = list(modules)[i]\n",
+    "    qm = \"Q{}M{}\".format(ii//4+1, ii%4+1)\n",
+    "    gain, pks, std, gfunc = r\n",
+    "    gains, errors, chisq, valid, max_dev, stats = gfunc\n",
+    "    _, entries, stds, sow = gain\n",
+    "    gain_db = np.zeros((gains.shape[0], gains.shape[1], memory_cells))\n",
+    "    gain_mdb = np.zeros((gains.shape[0], gains.shape[1], memory_cells))\n",
+    "    entries_db = np.zeros((gains.shape[0], gains.shape[1], memory_cells, 5))    \n",
+    "    gainoff_db = np.zeros((gains.shape[0], gains.shape[1], memory_cells))\n",
+    "    mask_db = np.zeros((gains.shape[0], gains.shape[1], memory_cells), np.uint32)\n",
+    "    \n",
+    "    gainoff_g[qm] = gainoff_db\n",
+    "    gain_m[qm] = gain_mdb\n",
+    "    entries_g[qm] = entries_db\n",
+    "    peaks_g[qm] = pks\n",
+    "    \n",
+    "    # create a mask for unregular pixels\n",
+    "    # first bit set if first peak has nan entries\n",
+    "    for pk in masks_eval_peaks:\n",
+    "        mask_db[~(np.isfinite(gain_mdb) & np.isfinite(gainoff_db))] |= BadPixels.FF_GAIN_EVAL_ERROR.value\n",
+    "        mask_db[(np.abs(1-gain_mdb/np.nanmedian(gain_mdb) > deviation_threshold) )] |= BadPixels.FF_GAIN_DEVIATION.value\n",
+    "    # second bit set if entries are 0 for first peak\n",
+    "    mask_db[entries[...,1] == 0] |= BadPixels.FF_NO_ENTRIES.value\n",
+    "    zero_oc = np.count_nonzero((entries > 0).astype(np.int), axis=3)\n",
+    "    mask_db[zero_oc <= len(peak_estimates)-2] |= BadPixels.FF_NO_ENTRIES.value \n",
+    "    # third bit set if entries of a given adc show significant noise\n",
+    "    stds = []\n",
+    "    for ii in range(8):\n",
+    "        for jj in range(8):\n",
+    "            stds.append(np.std(entries_db[ii*16:(ii+1)*16,jj*64+2:(jj+1)*64-2,:,1], axis=(0,1)))\n",
+    "    avg_stds = np.median(np.array(stds), axis=0)\n",
+    "    \n",
+    "    for ii in range(8):\n",
+    "        for jj in range(8):\n",
+    "            std = np.std(entries_db[ii*16:(ii+1)*16,jj*64+2:(jj+1)*64-2,:,1], axis=(0,1))\n",
+    "            if np.any(std > 5*avg_stds):\n",
+    "                mask_db[ii*16:(ii+1)*16,jj*64:(jj+1)*64,std > 5*avg_stds] |= BadPixels.FF_GAIN_DEVIATION\n",
+    "                \n",
+    "    \n",
+    "    mask_g[qm] = mask_db\n",
+    "    \n",
+    "    flat = np.zeros((gains.shape[0], gains.shape[1], memory_cells, 3))\n",
+    "    for j in range(2,len(peak_estimates)):\n",
+    "        flat[...,j-2] = np.mean(entries[...,j]/np.mean(entries[...,j]))\n",
+    "    flat = np.mean(flat, axis=3)\n",
+    "    #flat_db = np.zeros((gains.shape[0], gains.shape[1], memory_cells))\n",
+    "    #for j in range(2):\n",
+    "    #    flat_db[...,j::2] = flat\n",
+    "    flatsff[qm] = flat\n",
+    "    \n",
+    "    global_meds[qm] = np.nanmedian(pks[...,global_eval_peaks][np.max(mask_db, axis=2) != 0])"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Evaluated peak locations ##\n",
+    "\n",
+    "The following plot shows the evaluated peak locations for each peak. Peak ids increase downward:\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2019-09-13T08:16:55.562017Z",
+     "start_time": "2019-09-13T08:16:54.264843Z"
+    },
+    "scrolled": false
+   },
+   "outputs": [],
+   "source": [
+    "from mpl_toolkits.axes_grid1 import AxesGrid\n",
+    "cell_to_preview = 4\n",
+    "for qm, data in peaks_g.items():\n",
+    "    print(\"The module shown is: {}\".format(qm))\n",
+    "    print(\"The cell shown is: {}\".format(cell_to_preview))\n",
+    "    print(\"Evaluated peaks: {}\".format(data.shape[-1]))\n",
+    "    fig = plt.figure(figsize=(15,15))\n",
+    "    grid = AxesGrid(fig, 111,  \n",
+    "                    nrows_ncols=(data.shape[-1], 1),\n",
+    "                    axes_pad=0.1,\n",
+    "                    share_all=True,\n",
+    "                    label_mode=\"L\",\n",
+    "                    cbar_location=\"right\",\n",
+    "                    cbar_mode=\"each\",\n",
+    "                    cbar_size=\"7%\",\n",
+    "                    cbar_pad=\"2%\")\n",
+    "                    \n",
+    "    for j in range(data.shape[-1]):\n",
+    "        d = data[...,cell_to_preview,j]\n",
+    "        d[~np.isfinite(d)] = 0\n",
+    "        \n",
+    "        im = grid[j].imshow(d, interpolation=\"nearest\", vmin=0.9*np.nanmedian(d), vmax=max(1.1*np.nanmedian(d), 50))\n",
+    "        cb = grid.cbar_axes[j].colorbar(im)\n",
+    "        cb.set_label_text(\"Peak location (ADU)\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Gain Slopes And Offsets ##\n",
+    "\n",
+    "The gain slopes and offsets are deduced by fitting a linear function $y = mx + b$ to the evaluated peaks. Gains are normalized to all pixels and all memory cells of a module by using the average peak locations a $x$ values. Thus slopes centered around 1 are to be expected.\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2019-09-13T08:16:56.879582Z",
+     "start_time": "2019-09-13T08:16:55.564572Z"
+    },
+    "scrolled": false
+   },
+   "outputs": [],
+   "source": [
+    "for i, r in enumerate(res_gain):\n",
+    "    ii = list(modules)[i]\n",
+    "    qm = \"Q{}M{}\".format(ii//4+1, ii%4+1)\n",
+    "    gain, pks, std, gfunc = r\n",
+    "    gains, errors, chisq, valid, max_dev, stats = gfunc    \n",
+    "    _, entries, stds, sow = gain\n",
+    "    \n",
+    "    fig = plt.figure(figsize=(15,8))\n",
+    "    ax = fig.add_subplot(211)\n",
+    "    im = ax.imshow(gains[...,0], interpolation=\"nearest\", vmin=0.85, vmax=1.15)\n",
+    "    cb = fig.colorbar(im)\n",
+    "    cb.set_label(\"Gain slope m\")\n",
+    "    fig.savefig(\"{}/gain_m_mod{}.png\".format(out_folder, qm))\n",
+    "    \n",
+    "    ax = fig.add_subplot(212)\n",
+    "    ax.hist(gains[...,0].flatten(), range=(0.5, 1.5), bins=100)\n",
+    "    ax.set_ylabel(\"Occurences\")\n",
+    "    ax.set_xlabel(\"Gain slope m\")\n",
+    "    ax.semilogy()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "The gain offsets b are expected to be centered around 0 and minimal, as data is already offset substracted."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2019-09-13T08:16:58.113910Z",
+     "start_time": "2019-09-13T08:16:56.881383Z"
+    }
+   },
+   "outputs": [],
+   "source": [
+    "for i, r in enumerate(res_gain):\n",
+    "    ii = list(modules)[i]\n",
+    "    qm = \"Q{}M{}\".format(ii//4+1, ii%4+1)\n",
+    "    gain, pks, std, gfunc = r\n",
+    "    gains, errors, chisq, valid, max_dev, stats = gfunc\n",
+    "    _, entries, stds, sow = gain\n",
+    "    \n",
+    "    fig = plt.figure(figsize=(15,8))\n",
+    "    ax = fig.add_subplot(211)\n",
+    "    \n",
+    "    im = ax.imshow(gains[...,1], interpolation=\"nearest\", vmin=-25, vmax=25)\n",
+    "    cb = fig.colorbar(im)\n",
+    "    cb.set_label(\"Gain offset b\")\n",
+    "    fig.savefig(\"{}/gain_b_mod{}.png\".format(out_folder, qm))\n",
+    "    \n",
+    "    ax = fig.add_subplot(212)\n",
+    "    ax.hist(gains[...,1].flatten(), range=(-25, 25), bins=100)\n",
+    "    ax.set_ylabel(\"Occurences\")\n",
+    "    ax.set_xlabel(\"Gain offset b\")\n",
+    "    ax.semilogy()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Bad Pixels ##\n",
+    "\n",
+    "Bad pixels are defined as any of the following criteria: \n",
+    "\n",
+    "* the gain evaluation did not converge, or location of noise peak deviates by more than than the deviation threshold from the median location.\n",
+    "* the number of entries for the noise peak in 0.\n",
+    "* the standard deviation of the number of entries is larger than 5 times the standard deviation for the ASIC the pixel is on."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2019-09-13T08:18:08.277881Z",
+     "start_time": "2019-09-13T08:18:08.272390Z"
+    }
+   },
+   "outputs": [],
+   "source": [
+    "print(\"The deviation threshold is: {}\".format(deviation_threshold))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2019-09-13T08:18:10.840520Z",
+     "start_time": "2019-09-13T08:18:09.769451Z"
+    }
+   },
+   "outputs": [],
+   "source": [
+    "for i, r in enumerate(res_gain):\n",
+    "    ii = list(modules)[i]\n",
+    "    qm = \"Q{}M{}\".format(ii//4+1, ii%4+1)\n",
+    "    mask_db = mask_g[qm]\n",
+    "    \n",
+    "    fig = plt.figure(figsize=(15,8))\n",
+    "    ax = fig.add_subplot(211)\n",
+    "    im = ax.imshow(np.log2(mask_db[...,cell_to_preview]), interpolation=\"nearest\", vmin=0, vmax=32)\n",
+    "    cb = fig.colorbar(im)\n",
+    "    cb.set_label(\"Mask value\")\n",
+    "    fig.savefig(\"{}/mask_mod{}.png\".format(out_folder, qm))\n",
+    "    \n",
+    "    ax = fig.add_subplot(212)\n",
+    "    ax.hist(np.log2(mask_db.flatten()), range=(0, 32), bins=32, normed=True)\n",
+    "    ax.set_ylabel(\"Occurences\")\n",
+    "    ax.set_xlabel(\"Mask value\")\n",
+    "    ax.semilogy()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2019-09-13T08:18:14.349244Z",
+     "start_time": "2019-09-13T08:18:11.849053Z"
+    }
+   },
+   "outputs": [],
+   "source": [
+    "cols = {BadPixels.FF_GAIN_EVAL_ERROR.value: (BadPixels.FF_GAIN_EVAL_ERROR.name, '#FF000080'),\n",
+    "        BadPixels.FF_GAIN_DEVIATION.value: (BadPixels.FF_GAIN_DEVIATION.name, '#0000FF80'),\n",
+    "        BadPixels.FF_NO_ENTRIES.value: (BadPixels.FF_NO_ENTRIES.name, '#00FF0080'),\n",
+    "        BadPixels.FF_GAIN_EVAL_ERROR.value |\n",
+    "        BadPixels.FF_GAIN_DEVIATION.value: ('EVAL+DEV', '#DD00DD80'),\n",
+    "        BadPixels.FF_GAIN_EVAL_ERROR.value |\n",
+    "        BadPixels.FF_NO_ENTRIES.value: ('EVAL+NO_ENTRY', '#00DDDD80'),\n",
+    "        BadPixels.FF_GAIN_DEVIATION.value |\n",
+    "        BadPixels.FF_NO_ENTRIES.value: ('DEV+NO_ENTRY', '#DDDD0080'),\n",
+    "       }\n",
+    "\n",
+    "rebin = 32 if not high_res_badpix_3d else 2\n",
+    "\n",
+    "gain = 0\n",
+    "for mod, data in mask_g.items():\n",
+    "    plot_badpix_3d(data, cols, title=mod, rebin_fac=rebin)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2019-07-23T18:02:58.599579Z",
+     "start_time": "2019-07-23T18:02:58.316311Z"
+    }
+   },
+   "outputs": [],
+   "source": [
+    "if local_output:\n",
+    "    ofile = \"{}/agipd_gain_store_{}_modules_{}.h5\".format(out_folder,\n",
+    "                                                          \"_\".join([str(r) for r in runs]),\n",
+    "                                                          \"_\".join([str(m) for m in modules]))\n",
+    "    store_file = h5py.File(ofile, \"w\")\n",
+    "    for i, r in enumerate(res_gain):\n",
+    "        ii = list(modules)[i]\n",
+    "        qm = \"Q{}M{}\".format(ii//4+1, ii%4+1)\n",
+    "        gain, pks, std, gfunc = r\n",
+    "        gains, errors, chisq, valid, max_dev, stats = gfunc\n",
+    "        gainmap, entires, stds, sow = gain\n",
+    "        store_file[\"/{}/Gain/0/data\".format(qm)] = gains[...,0]\n",
+    "        store_file[\"/{}/GainOffset/0/data\".format(qm)] = gains[...,1]\n",
+    "        store_file[\"/{}/Flat/0/data\".format(qm)] = flatsff[qm]\n",
+    "        store_file[\"/{}/Entries/0/data\".format(qm)] = entires\n",
+    "        store_file[\"/{}/BadPixels/0/data\".format(qm)] = mask_g[qm]  \n",
+    "    store_file.close()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "proposal = list(filter(None, in_folder.strip('/').split('/')))[-2]\n",
+    "file_loc = proposal + ' ' + ' '.join(list(map(str,runs)))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2019-09-12T14:49:28.355922Z",
+     "start_time": "2019-09-12T14:49:28.340063Z"
+    }
+   },
+   "outputs": [],
+   "source": [
+    "if db_output:\n",
+    "    for i, r in enumerate(res_gain):\n",
+    "        ii = list(modules)[i]\n",
+    "        qm = \"Q{}M{}\".format(ii//4+1, ii%4+1)\n",
+    "        \n",
+    "        gain, pks, std, gfunc = r\n",
+    "        gains, errors, chisq, valid, max_dev, stats = gfunc\n",
+    "        gainmap, entires, stds, sow = gain\n",
+    "        \n",
+    "        det = getattr(Detectors, dinstance)\n",
+    "        device = getattr(det, qm)\n",
+    "        # gains related\n",
+    "        metadata = ConstantMetaData()\n",
+    "        cgain = Constants.AGIPD.SlopesFF()\n",
+    "        cgain.data = gains\n",
+    "        metadata.calibration_constant = cgain\n",
+    "\n",
+    "        # set the operating condition\n",
+    "        condition = Conditions.Illuminated.AGIPD(memory_cells, bias_voltage, 9.2,\n",
+    "                                                 pixels_x=512, pixels_y=128, beam_energy=None,\n",
+    "                                                 acquisition_rate=acqrate, gain_setting=gain_setting)\n",
+    "        \n",
+    "        metadata.detector_condition = condition\n",
+    "\n",
+    "        # specify the a version for this constant\n",
+    "        if creation_time is None:\n",
+    "            metadata.calibration_constant_version = Versions.Now(device=getattr(det, qm))\n",
+    "        else:\n",
+    "            metadata.calibration_constant_version = Versions.Timespan(device=getattr(det, qm), start=creation_time)\n",
+    "\n",
+    "        metadata.calibration_constant_version.raw_data_location = file_loc\n",
+    "        metadata.send(cal_db_interface, timeout=300000)\n",
+    "        \n",
+    "        \n",
+    "        # bad pixels        \n",
+    "        metadata = ConstantMetaData()\n",
+    "        bp = Constants.AGIPD.BadPixelsFF()\n",
+    "        bp.data = mask_g[qm]  \n",
+    "        metadata.calibration_constant = bp\n",
+    "\n",
+    "        # set the operating condition\n",
+    "        condition = Conditions.Illuminated.AGIPD(memory_cells, bias_voltage, 9.2,\n",
+    "                                                 pixels_x=512, pixels_y=128, beam_energy=None,\n",
+    "                                                 acquisition_rate=acqrate, gain_setting=gain_setting)\n",
+    "        \n",
+    "        metadata.detector_condition = condition\n",
+    "\n",
+    "        # specify the a version for this constant\n",
+    "        if creation_time is None:\n",
+    "            metadata.calibration_constant_version = Versions.Now(device=getattr(det, qm))\n",
+    "        else:\n",
+    "            metadata.calibration_constant_version = Versions.Timespan(device=getattr(det, qm), start=creation_time)\n",
+    "        metadata.calibration_constant_version.raw_data_location = file_loc\n",
+    "        metadata.send(cal_db_interface, timeout=300000)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Sanity check ##\n",
+    "\n",
+    "Finally, we perform a correction of the data used to derive the gain constants with said constants. We expected an improvement in peak FWHM, if constants have been deduced correctly."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2019-09-13T09:23:54.999086Z",
+     "start_time": "2019-09-13T09:16:57.293565Z"
+    },
+    "scrolled": false
+   },
+   "outputs": [],
+   "source": [
+    "def hist_single_module(fbase, runs, sequences, il_mode, profile, limit_trains, memory_cells, rawversion, instrument, inp):\n",
+    "    channel, offset, thresholds, relgain, noise, pc = inp\n",
+    "    gain, pks, std, gfunc = relgain\n",
+    "    gains, errors, chisq, valid, max_dev, stats = gfunc\n",
+    "    \n",
+    "    import XFELDetAna.xfelpycaltools as xcal\n",
+    "    import numpy as np\n",
+    "    import h5py\n",
+    "    import copy\n",
+    "    from XFELDetAna.util import env\n",
+    "    env.iprofile = profile\n",
+    "    sensor_size = [128, 512]\n",
+    "    \n",
+    "    block_size = sensor_size\n",
+    "    \n",
+    "    \n",
+    "    def baseline_correct_via_noise(d, noise, g, baseline_corr_noise_threshold=1000):\n",
+    "        \"\"\" Correct baseline shifts by evaluating position of the noise peak\n",
+    "\n",
+    "        :param: d: the data to correct, should be a single image\n",
+    "        :param noise: the mean noise for the cell id of `d`\n",
+    "        :param g: gain array matching `d` array\n",
+    "\n",
+    "        Correction is done by identifying the left-most (significant) peak\n",
+    "        in the histogram of `d` and shifting it to be centered on 0.\n",
+    "        This is done via a continous wavelet transform (CWT), using a Ricker\n",
+    "        wavelet.\n",
+    "\n",
+    "        Only high gain data is evaulated, and data larger than 50 ADU,\n",
+    "        equivalent of roughly a 9 keV photon is ignored.\n",
+    "\n",
+    "        Two passes are executed: the first shift is accurate to 10 ADU and\n",
+    "        will catch baseline shifts smaller then -2000 ADU. CWT is evaluated\n",
+    "        for peaks of widths one, three and five time the noise.\n",
+    "        The baseline is then shifted by the position of the summed CWT maximum.\n",
+    "\n",
+    "        In a second pass histogram is evaluated within a range of\n",
+    "        +- 5*noise of the initial shift value, for peak of width noise.\n",
+    "\n",
+    "        Baseline shifts larger than the maximum threshold or positive shifts\n",
+    "        larger 10 are discarded, and the original data is returned, otherwise\n",
+    "        the shift corrected data is returned.\n",
+    "\n",
+    "        \"\"\"\n",
+    "        import copy\n",
+    "        from scipy.signal import cwt, ricker\n",
+    "        # we work on a copy to be able to filter values by setting them to\n",
+    "        # np.nan\n",
+    "        dd = copy.copy(d)\n",
+    "        dd[g != 0] = np.nan  # only high gain data\n",
+    "        dd[dd > 50] = np.nan  # only noise data\n",
+    "        h, e = np.histogram(dd, bins=210, range=(-2000, 100))\n",
+    "        c = (e[1:] + e[:-1]) / 2\n",
+    "\n",
+    "        try:\n",
+    "            cwtmatr = cwt(h, ricker, [noise, 3. * noise, 5. * noise])\n",
+    "        except:\n",
+    "            return d\n",
+    "        cwtall = np.sum(cwtmatr, axis=0)\n",
+    "        marg = np.argmax(cwtall)\n",
+    "        pc = c[marg]\n",
+    "\n",
+    "        high_res_range = (dd > pc - 5 * noise) & (dd < pc + 5 * noise)\n",
+    "        dd[~high_res_range] = np.nan\n",
+    "        h, e = np.histogram(dd, bins=200,\n",
+    "                            range=(pc - 5 * noise, pc + 5 * noise))\n",
+    "        c = (e[1:] + e[:-1]) / 2\n",
+    "        try:\n",
+    "            cwtmatr = cwt(h, ricker, [noise, ])\n",
+    "        except:\n",
+    "            return d\n",
+    "        marg = np.argmax(cwtmatr)\n",
+    "        pc = c[marg]\n",
+    "        # too large shift to be easily decernable via noise\n",
+    "        if pc > 10 or pc < -baseline_corr_noise_threshold:\n",
+    "            return d\n",
+    "        return d - pc\n",
+    "    \n",
+    "    def read_fun(filename, channel):\n",
+    "        \n",
+    "        infile = h5py.File(filename, \"r\", driver=\"core\")\n",
+    "        if rawversion == 2:\n",
+    "            count = np.squeeze(infile[\"/INDEX/{}_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/count\".format(instrument, channel)])\n",
+    "            first = np.squeeze(infile[\"/INDEX/{}_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/first\".format(instrument, channel)])\n",
+    "            last_index = int(first[count != 0][-1]+count[count != 0][-1])\n",
+    "            first_index = int(first[count != 0][0])\n",
+    "        else:\n",
+    "            status = np.squeeze(infile[\"/INDEX/{}_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/status\".format(instrument, channel)])\n",
+    "            if np.count_nonzero(status != 0) == 0:\n",
+    "                return\n",
+    "            last = np.squeeze(infile[\"/INDEX/{}_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/last\".format(instrument, channel)])\n",
+    "            last_index = int(last[status != 0][-1])\n",
+    "            first_index = int(last[status != 0][0])\n",
+    "        \n",
+    "        if limit_trains is not None:\n",
+    "            last_index = min(limit_trains*memory_cells+first_index, last_index)\n",
+    "        \n",
+    "        im = np.array(infile[\"/INSTRUMENT/{}_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/data\".format(instrument, channel)][first_index:last_index,...])    \n",
+    "        carr = infile[\"/INSTRUMENT/{}_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/cellId\".format(instrument, channel)][first_index:last_index]\n",
+    "        cells = np.squeeze(np.array(carr))\n",
+    "        infile.close()\n",
+    "        \n",
+    "    \n",
+    "        if il_mode:\n",
+    "            ga = im[1::2, 0, ...]\n",
+    "            im = im[0::2, 0, ...].astype(np.float32)\n",
+    "        else:\n",
+    "            ga = im[:, 1, ...]\n",
+    "            im = im[:, 0, ...].astype(np.float32)\n",
+    "\n",
+    "        im = np.rollaxis(im, 2)\n",
+    "        im = np.rollaxis(im, 2, 1)\n",
+    "\n",
+    "        ga = np.rollaxis(ga, 2)\n",
+    "        ga = np.rollaxis(ga, 2, 1)\n",
+    "        return im, ga, cells\n",
+    "    \n",
+    "    offset_cor = xcal.OffsetCorrection(sensor_size, offset, nCells=memory_cells, blockSize=block_size, gains=[0,1,2])\n",
+    "    offset_cor.debug()\n",
+    "    \n",
+    "    hist_calc = xcal.HistogramCalculator(sensor_size, bins=2000, range=(0, 2000), blockSize=block_size)\n",
+    "    hist_calc.debug()\n",
+    "\n",
+    "    hist_calc_uncorr = xcal.HistogramCalculator(sensor_size, bins=2000, range=(0, 2000), blockSize=block_size)\n",
+    "    hist_calc_uncorr.debug()\n",
+    "    \n",
+    "    \n",
+    "    for run in runs:\n",
+    "        for seq in sequences[:2]:\n",
+    "            \n",
+    "            fname = fbase.format(run, run.upper(), channel, seq)\n",
+    "            \n",
+    "            d, ga, c = read_fun(fname, channel)\n",
+    "            \n",
+    "            # we need to do proper gain thresholding\n",
+    "            vidx = (c < offset.shape[2]) & (c < thresholds.shape[2])\n",
+    "            c = c[vidx]\n",
+    "            d = d[:,:,vidx]\n",
+    "            ga = ga[:,:,vidx]\n",
+    "\n",
+    "            g = np.zeros(ga.shape, np.uint8)\n",
+    "            g[...] = 2\n",
+    "            \n",
+    "            g[ga < thresholds[...,c,1]] = 1\n",
+    "            g[ga < thresholds[...,c,0]] = 0\n",
+    "            d = offset_cor.correct(d, cellTable=c, gainMap=g)\n",
+    "            for i in range(d.shape[2]):\n",
+    "                mn_noise = np.nanmean(noise[...,c[i]])\n",
+    "                d[...,i] = baseline_correct_via_noise(d[...,i],\n",
+    "                                                      mn_noise,\n",
+    "                                                      g[..., i])\n",
+    "            \n",
+    "            d *= np.moveaxis(pc[c,...], 0, 2)\n",
+    "            \n",
+    "            hist_calc_uncorr.fill(d)\n",
+    "            d = (d-gains[..., 1][...,None])/gains[..., 0][...,None]\n",
+    "            #d = d/gains[..., 0][...,None]\n",
+    "            hist_calc.fill(d)    \n",
+    "    \n",
+    "    h, e, c, _ = hist_calc.get()\n",
+    "    hu  = hist_calc_uncorr.get()\n",
+    "    return h, e, c, hu[0]\n",
+    "\n",
+    "inp = []\n",
+    "idx = 0\n",
+    "for i in modules:\n",
+    "    qm = \"Q{}M{}\".format(i//4+1, i%4+1)\n",
+    "    \n",
+    "    inp.append((i, offset_g[qm], thresholds_g[qm], res_gain[idx], noise_g[qm][...,0], pc_g[qm][0,...]))\n",
+    "    idx += 1\n",
+    "    \n",
+    "p = partial(hist_single_module, fbase, runs, sequences, IL_MODE, profile, limit_trains, memory_cells, rawversion, instrument)\n",
+    "#res = view.map_sync(p, inp)\n",
+    "res = list(map(p, inp))\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "ExecuteTime": {
+     "end_time": "2019-09-13T09:23:56.023378Z",
+     "start_time": "2019-09-13T09:23:55.002226Z"
+    }
+   },
+   "outputs": [],
+   "source": [
+    "from iminuit import Minuit\n",
+    "from iminuit.util import make_func_code, describe\n",
+    "from IPython.display import HTML, display\n",
+    "import tabulate\n",
+    "\n",
+    "# fitting\n",
+    "par_ests = {}\n",
+    "par_ests[\"mu0\"] = 0\n",
+    "par_ests[\"mu1\"] = 50\n",
+    "par_ests[\"mu2\"] = 100\n",
+    "par_ests[\"limit_mu0\"] = [-5, 5]\n",
+    "par_ests[\"limit_mu1\"] = [35, 65]\n",
+    "par_ests[\"limit_mu2\"] = [100, 150]\n",
+    "par_ests[\"s0\"] = 10\n",
+    "par_ests[\"s1\"] = 10\n",
+    "par_ests[\"s2\"] = 10\n",
+    "par_ests[\"limit_A0\"] = [1e5, 1e12]\n",
+    "par_ests[\"limit_A1\"] = [1e4, 1e10]\n",
+    "par_ests[\"limit_A2\"] = [1e3, 1e10]\n",
+    "\n",
+    "\n",
+    "par_ests[\"throw_nan\"] = False\n",
+    "par_ests[\"pedantic\"] = False\n",
+    "par_ests[\"print_level\"] = 1\n",
+    "\n",
+    "def gaussian3(x, mu0, s0, A0, mu1, s1, A1, mu2, s2, A2):\n",
+    "    return (A0/np.sqrt(2*np.pi*s0**2)*np.exp(-0.5*((x-mu0)/s0)**2) +\n",
+    "            A1/np.sqrt(2*np.pi*s1**2)*np.exp(-0.5*((x-mu1)/s1)**2) +\n",
+    "            A2/np.sqrt(2*np.pi*s2**2)*np.exp(-0.5*((x-mu2)/s2)**2))\n",
+    "\n",
+    "\n",
+    "f_sig = describe(gaussian3)[1:]\n",
+    "\n",
+    "class _Chi2Functor:\n",
+    "    def __init__(self, f, x, y, err):\n",
+    "        self.f = f\n",
+    "        self.x = x\n",
+    "        self.y = y\n",
+    "        self.err = err\n",
+    "        f_sig = describe(f)\n",
+    "        # this is how you fake function\n",
+    "        # signature dynamically\n",
+    "        self.func_code = make_func_code(\n",
+    "            f_sig[1:])  # docking off independent variable\n",
+    "        self.func_defaults = None  # this keeps numpy.vectorize happy\n",
+    "\n",
+    "    def __call__(self, *arg):\n",
+    "        # notice that it accept variable length\n",
+    "        # positional arguments\n",
+    "        # chi2 = sum((y-self.f(x,*arg))**2 for x,y in zip(self.x,self.y))\n",
+    "        return np.sum(((self.f(self.x, *arg) - self.y) ** 2) / self.err)\n",
+    "    \n",
+    "    \n",
+    "d = []\n",
+    "y_range_max = 0\n",
+    "table = []\n",
+    "headers = ['Module',\n",
+    "           'FWHM (cor.) [ADU]', 'Separation (cor.) [$\\sigma$]',\n",
+    "           'FWHM (uncor.) [ADU]', 'Separation (uncor.) [$\\sigma$]',           \n",
+    "           'Improvement'\n",
+    "          ]\n",
+    "for i, r in enumerate(res):\n",
+    "    qm = \"Q{}M{}\".format(i//4+1, i%4+1)\n",
+    "    row = []\n",
+    "    row.append(qm)\n",
+    "    \n",
+    "    h, e, c, hu = r\n",
+    "        \n",
+    "    \n",
+    "    d.append({\n",
+    "            'x': c, \n",
+    "            'y': h,\n",
+    "            'drawstyle': 'steps-mid',\n",
+    "            'label': '{}: corrected'.format(qm),\n",
+    "            'marker': '.',\n",
+    "            'color': 'blue',\n",
+    "        })\n",
+    "    \n",
+    "    idx = (h > 0) & np.isfinite(h)\n",
+    "    h_non_zero = h[idx]\n",
+    "    c_non_zero = c[idx]\n",
+    "    par_ests[\"A0\"] = np.float(h[np.argmin(abs(c-0))])\n",
+    "    par_ests[\"A1\"] = np.float(h[np.argmin(abs(c-50))])\n",
+    "    par_ests[\"A2\"] = np.float(h[np.argmin(abs(c-100))])\n",
+    "    wrapped = _Chi2Functor(gaussian3, c_non_zero, h_non_zero,\n",
+    "                                     np.sqrt(h_non_zero))\n",
+    "    \n",
+    "    m = Minuit(wrapped, **par_ests)\n",
+    "    fmin = m.migrad()\n",
+    "    \n",
+    "    xt = np.arange(0, 200)\n",
+    "    \n",
+    "    yt = gaussian3(xt, m.values['mu0'],  m.values['s0'],  m.values['A0'],\n",
+    "                   m.values['mu1'],  m.values['s1'],  m.values['A1'],\n",
+    "                   m.values['mu2'],  m.values['s2'],  m.values['A2'])\n",
+    "\n",
+    "    d.append({\n",
+    "            'x': xt, \n",
+    "            'y': yt,\n",
+    "            'label': '{}: corrected (fit)'.format(qm),\n",
+    "            'color': 'green',\n",
+    "            'drawstyle': 'line',\n",
+    "            'linewidth': 2\n",
+    "        })\n",
+    "    \n",
+    "    \n",
+    "    d.append({\n",
+    "            'x': c, \n",
+    "            'y': hu,\n",
+    "            'drawstyle': 'steps-mid',\n",
+    "            'label': '{}: uncorrected'.format(qm),\n",
+    "            'marker': '.',\n",
+    "            'color': 'grey',\n",
+    "            'alpha': 0.5\n",
+    "        })\n",
+    "    \n",
+    "    row += [m.values['s1']*2.35, (m.values['mu1']-m.values['mu0'])/m.values['s1']]\n",
+    "    \n",
+    "    \n",
+    "    idx = (hu > 0) & np.isfinite(hu)\n",
+    "    h_non_zero = hu[idx]\n",
+    "    c_non_zero = c[idx]\n",
+    "    wrapped = _Chi2Functor(gaussian3, c_non_zero, h_non_zero,\n",
+    "                                     np.sqrt(h_non_zero))\n",
+    "    \n",
+    "    m = Minuit(wrapped, **par_ests)\n",
+    "    fmin = m.migrad()\n",
+    "    \n",
+    "    xt = np.arange(0, 200)\n",
+    "    \n",
+    "    yt = gaussian3(xt, m.values['mu0'],  m.values['s0'],  m.values['A0'],\n",
+    "                   m.values['mu1'],  m.values['s1'],  m.values['A1'],\n",
+    "                   m.values['mu2'],  m.values['s2'],  m.values['A2'])\n",
+    "\n",
+    "    d.append({\n",
+    "            'x': xt, \n",
+    "            'y': yt,\n",
+    "            'label': '{}: uncorrected (fit)'.format(qm),\n",
+    "            'color': 'red',\n",
+    "            'linewidth': 2\n",
+    "        })\n",
+    "    \n",
+    "    row += [m.values['s1']*2.35, (m.values['mu1']-m.values['mu0'])/m.values['s1']]\n",
+    "    \n",
+    "    row.append(\"{:0.2f} %\".format(100*(row[3]/row[1]-1)))\n",
+    "    \n",
+    "    y_range_max = max(y_range_max, np.max(h[c>25])*1.5)\n",
+    "    \n",
+    "    # output table\n",
+    "    table.append(row)\n",
+    "    \n",
+    "fig = xana.simplePlot(d, y_log=False, figsize=\"2col\",\n",
+    "                      aspect=2,\n",
+    "                      x_range=(0, 200),\n",
+    "                      legend='top-right-frame',\n",
+    "                      y_range=(0, y_range_max),\n",
+    "                      x_label='Energy (ADU)',\n",
+    "                      y_label='Counts')\n",
+    "\n",
+    "display(HTML(tabulate.tabulate(table, tablefmt='html', headers=headers,\n",
+    "                               numalign=\"right\", floatfmt=\"0.2f\")))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.6.7"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 1
+}
diff --git a/tests/test_agipdutils_ff.py b/tests/test_agipdutils_ff.py
new file mode 100644
index 0000000000000000000000000000000000000000..579c905f662bf7c690874e5b20297e7e9d485db8
--- /dev/null
+++ b/tests/test_agipdutils_ff.py
@@ -0,0 +1,140 @@
+import numpy as np
+from cal_tools.agipdutils_ff import get_mask, set_par_limits
+
+
+def test_get_mask():
+    fit_summary = {
+        'chi2_ndof': 1.674524751845516,
+        'g0n': 6031.641198873036,
+        'error_g0n': 94.63055028459667,
+        'limit_g0n': np.array([0.0, None]),
+        'fix_g0n': False,
+        'g0mean': -13.711814669099589,
+        'error_g0mean': 0.2532017427306297,
+        'limit_g0mean': np.array([-30,  30]),
+        'fix_g0mean': False,
+        'g0sigma': 13.478502058651568,
+        'error_g0sigma': 0.2588135637661919,
+        'limit_g0sigma': np.array([0, 30]),
+        'fix_g0sigma': False,
+        'g1n': 4337.126861254491,
+        'error_g1n': 69.764180118274,
+        'limit_g1n': np.array([0, None]),
+        'fix_g1n': False,
+        'g1mean': 53.90265411499657,
+        'error_g1mean': 0.27585684670864746,
+        'limit_g1mean': None,
+        'fix_g1mean': False,
+        'g1sigma': 15.687448834904817,
+        'error_g1sigma': 0.2951166525483524,
+        'limit_g1sigma': np.array([0, 35]),
+        'fix_g1sigma': False,
+        'g2n': 1542.531700635003,
+        'error_g2n': 43.20145030604567,
+        'limit_g2n': np.array([0, None]),
+        'fix_g2n': False,
+        'g2mean': 120.98535387591575,
+        'error_g2mean': 0.509566354942716,
+        'limit_g2mean': None,
+        'fix_g2mean': False,
+        'g2sigma': 15.550452880533143,
+        'error_g2sigma': 0.5003254358001863,
+        'limit_g2sigma': np.array([0, 40]),
+        'fix_g2sigma': False,
+        'g3n': 1261189.2282171287,
+        'error_g3n': 1261190.2282163086,
+        'limit_g3n': np.array([0, None]),
+        'fix_g3n': False,
+        'g3mean': 348.68766379647343,
+        'error_g3mean': 17.23872285713375,
+        'limit_g3mean': None,
+        'fix_g3mean': False,
+        'g3sigma': 44.83987230934497,
+        'error_g3sigma': 30.956164693249242,
+        'limit_g3sigma': np.array([0, 45]),
+        'fix_g3sigma': False,
+        'fval': 336.5794751209487,
+        'edm': 0.00011660826330754263,
+        'tolerance': 0.1,
+        'nfcn': 4620,
+        'ncalls': 4620,
+        'up': 1.0,
+        'is_valid': True,
+        'has_valid_parameters': True,
+        'has_accurate_covar': True,
+        'has_posdef_covar': True,
+        'has_made_posdef_covar': False,
+        'hesse_failed': False,
+        'has_covariance': True,
+        'is_above_max_edm': False,
+        'has_reached_call_limit': False}
+    peak_lim = [-30, 30]
+    d0_lim = [10, 80]
+    chi2_lim = [0, 3.0]
+    peak_width_lim = np.array([[0.9, 1.55], [0.95, 1.65]])
+    mask = get_mask(fit_summary, peak_lim, d0_lim, chi2_lim, peak_width_lim)
+    assert mask == 0
+
+
+def test_set_par_limits():
+    peak_range = np.array([[-30, 30],
+                           [35, 70],
+                           [95, 135],
+                           [145,  220]])
+
+    peak_norm_range = np.array([[0.0, None],
+                                [0, None],
+                                [0, None],
+                                [0, None]])
+    peak_width_range = np.array([[0, 30],
+                                 [0, 35],
+                                 [0, 40],
+                                 [0, 45]])
+
+    parameters = {
+        'g0sigma': 9.620186459204016,
+        'g0n': 5659.0,
+        'g0mean': -3.224686340342817,
+        'g1sigma': 8.149415371586683,
+        'g1n': 3612.0,
+        'g1mean': 54.6281838316722,
+        'g2sigma': 9.830124777667839,
+        'g2n': 1442.0,
+        'g2mean': 114.92510402219139,
+        'g3sigma': 15.336595220228498,
+        'g3n': 474.0,
+        'g3mean': 167.0295358649789}
+
+    expected = {
+        'g0sigma': 9.620186459204016,
+        'g0n': 5659.0,
+        'g0mean': -3.224686340342817,
+        'g1sigma': 8.149415371586683,
+        'g1n': 3612.0,
+        'g1mean': 54.6281838316722,
+        'g2sigma': 9.830124777667839,
+        'g2n': 1442.0,
+        'g2mean': 114.92510402219139,
+        'g3sigma': 15.336595220228498,
+        'g3n': 474.0,
+        'g3mean': 167.0295358649789,
+        'limit_g0n': np.array([0.0, None]),
+        'limit_g0mean': np.array([-30,  30]),
+        'limit_g0sigma': np.array([0, 30]),
+        'limit_g1n': np.array([0, None]),
+        'limit_g1mean': np.array([35, 70]),
+        'limit_g1sigma': np.array([0, 35]),
+        'limit_g2n': np.array([0, None]),
+        'limit_g2mean': np.array([95, 135]),
+        'limit_g2sigma': np.array([0, 40]),
+        'limit_g3n': np.array([0, None]),
+        'limit_g3mean': np.array([145, 220]),
+        'limit_g3sigma': np.array([0, 45])}
+
+    set_par_limits(parameters, peak_range, peak_norm_range, peak_width_range)
+    assert parameters.keys() == expected.keys()
+    for key in parameters.keys():
+        if isinstance(parameters[key], np.ndarray):
+            assert np.all(parameters[key] == expected[key])
+        else:
+            assert parameters[key] == expected[key]
diff --git a/xfel_calibrate/notebooks.py b/xfel_calibrate/notebooks.py
index 7c73714ea8f52a6bdf061ae1edfe8417e1a7eaf7..5019736610c389a37ec238a1801cb83c62bb3a9b 100644
--- a/xfel_calibrate/notebooks.py
+++ b/xfel_calibrate/notebooks.py
@@ -21,6 +21,8 @@ notebooks = {
         "FF": {
             "notebook":
                 "notebooks/AGIPD/Characterize_AGIPD_Gain_FlatFields_NBC.ipynb",
+            "dep_notebooks": [
+                "notebooks/AGIPD/Characterize_AGIPD_Gain_FlatFields_Summary.ipynb"],
             "concurrency": {"parameter": "modules",
                             "default concurrency": 16,
                             "cluster cores": 16},