diff --git a/cal_tools/cal_tools/agipdlib.py b/cal_tools/cal_tools/agipdlib.py
index 5d0263b11bc5fea27327891d607b9cdf600fc3de..07147199ad651208053b42713a7025b40c887aa5 100644
--- a/cal_tools/cal_tools/agipdlib.py
+++ b/cal_tools/cal_tools/agipdlib.py
@@ -3,22 +3,21 @@ from enum import Enum
 
 import h5py
 import numpy as np
+from cal_tools.enums import BadPixels
+from cal_tools.tools import get_constant_from_db, get_constant_from_db_and_time
+from iCalibrationDB import Constants, Conditions, Detectors
 from scipy.signal import cwt, ricker
 from sklearn.mixture import GaussianMixture
 from sklearn.preprocessing import StandardScaler
 
-from cal_tools.enums import BadPixels
-from cal_tools.tools import get_constant_from_db
-from iCalibrationDB import Constants, Conditions, Detectors
-
 
 def get_num_cells(fname, loc, module):
     with h5py.File(fname, "r") as f:
-        
-        cells = f["INSTRUMENT/{}/DET/{}CH0:xtdf/image/cellId".format(loc, module)][:352]
+        cells = \
+        f["INSTRUMENT/{}/DET/{}CH0:xtdf/image/cellId".format(loc, module)][()]
         maxcell = np.max(cells)
         options = [4, 32, 64, 76, 128, 176]
-        dists = [abs(o-maxcell) for o in options]
+        dists = [abs(o - maxcell) for o in options]
         return options[np.argmin(dists)]
 
 
@@ -65,7 +64,8 @@ class AgipdCorrections:
                  h5_index_path="INDEX/SPB_DET_AGIPD1M-1/DET/{}CH0:xtdf/",
                  do_rel_gain=True, chunk_size_idim=512, il_mode=False,
                  cal_det_instance="AGIPD1M1", karabo_data_mode=False,
-                 force_hg_if_below=None, force_mg_if_below=None, mask_noisy_adc=False):
+                 force_hg_if_below=None, force_mg_if_below=None,
+                 mask_noisy_adc=False, adjust_mg_baseline=False):
         """
         Initialize an AgipdCorrections Class
 
@@ -134,16 +134,18 @@ class AgipdCorrections:
         self.mask_noisy_adc = mask_noisy_adc
         self.adc_mask = None
         self.gain_stats = [0, 0, 0]
+        self.adjust_mg_baseline = adjust_mg_baseline
+        self.mg_bl_adjust = 0
 
     def get_iteration_range(self):
         """Returns a range expression over which to iterate in chunks
         """
         splitlength = self.firange.size // self.chunksize
         splits = np.arange(0, self.firange.size, self.chunksize)
-        return [ a for a in np.array_split(self.firange, splits) if a.size > 0]
+        return [a for a in np.array_split(self.firange, splits) if a.size > 0]
 
     def initialize(self, offset, rel_gain, mask, noise, thresholds,
-                   base_offset, swap_axis=True):
+                   base_offset, slopesPC=None, swap_axis=True):
         """ Initialize the calibration constants and the output data file.
 
         Any data that is not touched by the corrections is copied into the
@@ -183,8 +185,13 @@ class AgipdCorrections:
                 mvd = np.moveaxis(np.moveaxis(noise, 2, 0), 2, 1)
                 noise = np.ascontiguousarray(mvd)
             if base_offset is not None:
+                fallback_bo = np.nanmedian(np.array(
+                    [b for b in base_offset if isinstance(b, np.ndarray)]),
+                                           axis=0)
                 for i in range(len(base_offset)):
                     if base_offset[i] is not None:
+                        if not isinstance(base_offset[i], np.ndarray):
+                            base_offset[i] = fallback_bo
                         mvd = np.moveaxis(base_offset[i], 2, 0)
                         mvd = np.moveaxis(mvd, 2, 1)
                         base_offset[i] = np.ascontiguousarray(mvd)
@@ -221,7 +228,7 @@ class AgipdCorrections:
             for i in range(8):
                 for j in range(2):
                     asico = self.offset[:, i * 64:(i + 1) * 64,
-                                        j * 64:(j + 1) * 64, 0]
+                            j * 64:(j + 1) * 64, 0]
                     asic_m = np.nanmedian(asico, axis=(1, 2))
                     global_m = np.nanmedian(self.offset[:, ..., 0],
                                             axis=(1, 2))
@@ -241,6 +248,27 @@ class AgipdCorrections:
 
             self.offset[..., 1] += doff
 
+        if self.adjust_mg_baseline and slopesPC is not None:
+            x = np.linspace(0, 1000, 1000)
+            m_h = np.moveaxis(
+                np.moveaxis(slopesPC[..., :self.max_cells, 0], 0, 2), 0, 1)
+            b_h = np.moveaxis(
+                np.moveaxis(slopesPC[..., :self.max_cells, 1], 0, 2), 0, 1)
+            m_m = np.moveaxis(
+                np.moveaxis(slopesPC[..., :self.max_cells, 3], 0, 2), 0, 1)
+            b_m = np.moveaxis(
+                np.moveaxis(slopesPC[..., :self.max_cells, 4], 0, 2), 0, 1)
+            b_ho = self.offset[..., 0]
+            b_mo = self.offset[..., 1]
+            mz = -(b_m - b_mo) / m_m
+            cx = np.zeros(mz.shape)
+            for i in range(cx.shape[0]):
+                for j in range(cx.shape[1]):
+                    for k in range(cx.shape[2]):
+                        cx[i, j, k] = x[np.argmin(np.abs(mz[i, j, k] - x))]
+            doz = (m_h * cx + b_h - m_m * cx - b_m)
+            self.mg_bl_adjust = doz
+
         # this is the first call, usually from the database
         # we also generate file structures here, as the second call is optional
         if not self.initialized:
@@ -272,7 +300,7 @@ class AgipdCorrections:
         """ Split gain information off AGIPD Data
         """
         return d[:, 0, ...], d[:, 1, ...]
-    
+
     def split_gain_il(self, d):
         """ Split gain information off AGIPD IL Data
         """
@@ -534,9 +562,9 @@ class AgipdCorrections:
             cme, cms = hist_ends(casic[idxm])
 
             asic = d[(i + 1) * 64 - rsize:(i + 1) * 64 - 1,
-                     64 - rsize:64].flatten()
+                   64 - rsize:64].flatten()
             asic_g = g[(i + 1) * 64 - rsize:(i + 1) * 64 - 1,
-                       64 - rsize:64].flatten()
+                     64 - rsize:64].flatten()
 
             idxm = asic_g == 1
             me, ms = hist_ends(asic[idxm])
@@ -648,9 +676,9 @@ class AgipdCorrections:
                 for j in range(2):
 
                     fidx = gain[k, i * 64:(i + 1) * 64,
-                                j * 64:(j + 1) * 64] == 1
+                           j * 64:(j + 1) * 64] == 1
                     fidx_h = gain[k, i * 64:(i + 1) * 64,
-                                  j * 64:(j + 1) * 64] == 0
+                             j * 64:(j + 1) * 64] == 0
 
                     # need at least two pixels in medium gain to be able to
                     # cluster in two groups
@@ -705,7 +733,7 @@ class AgipdCorrections:
 
                     asic = im[k, i * 64:(i + 1) * 64, j * 64:(j + 1) * 64]
                     asic_msk = snow_mask[k, i * 64:(i + 1) * 64,
-                                         j * 64:(j + 1) * 64]
+                               j * 64:(j + 1) * 64]
                     mim = asic[fidx]
                     asicb = bg_h
                     mimb = asicb[fidx]
@@ -747,24 +775,29 @@ class AgipdCorrections:
                     im[k, i * 64:(i + 1) * 64, j * 64:(j + 1) * 64] = asic
                     gain[k, i * 64:(i + 1) * 64, j * 64:(j + 1) * 64] = asic_g
                     snow_mask[k, i * 64:(i + 1) * 64,
-                              j * 64:(j + 1) * 64] = asic_msk
+                    j * 64:(j + 1) * 64] = asic_msk
         return im, gain, snow_mask
-    
+
     def make_noisy_adc_mask(self, bmask):
         """ Mask entire ADC if they are noise above a relative threshold
-        """        
-        msk = np.count_nonzero(((bmask & BadPixels.NOISE_OUT_OF_THRESHOLD.value != 0)
-                               | (bmask & BadPixels.OFFSET_NOISE_EVAL_ERROR.value != 0)).astype(np.int) , axis=0)
+        """
+        msk = np.count_nonzero(
+            ((bmask & BadPixels.NOISE_OUT_OF_THRESHOLD.value != 0)
+             | (bmask & BadPixels.OFFSET_NOISE_EVAL_ERROR.value != 0)).astype(
+                np.int), axis=0)
 
         fmask = np.zeros_like(msk).astype(np.uint32)
-        adc_i = bmask.shape[1]//8
-        adc_j = bmask.shape[2]//8
+        adc_i = bmask.shape[1] // 8
+        adc_j = bmask.shape[2] // 8
         for i in range(adc_i):
             for j in range(adc_j):
-                adc_cnt = np.count_nonzero(msk[i*adc_i:(i+1)*adc_i, j*adc_j:(j+1)*adc_j] > bmask.shape[0]//32)
-                if adc_cnt / (adc_i*adc_j) >= self.mask_noisy_adc:
-                    fmask[i*adc_i:(i+1)*adc_i, j*adc_j:(j+1)*adc_j] = BadPixels.NOISY_ADC.value
-        return fmask          
+                adc_cnt = np.count_nonzero(
+                    msk[i * adc_i:(i + 1) * adc_i, j * adc_j:(j + 1) * adc_j] >
+                    bmask.shape[0] // 32)
+                if adc_cnt / (adc_i * adc_j) >= self.mask_noisy_adc:
+                    fmask[i * adc_i:(i + 1) * adc_i,
+                    j * adc_j:(j + 1) * adc_j] = BadPixels.NOISY_ADC.value
+        return fmask
 
     def correct_agipd(self, irange):
         """ Correct Raw AGIPD data for offset and relative gain effects
@@ -788,10 +821,12 @@ class AgipdCorrections:
             status = np.squeeze(
                 self.infile[agipd_base + "/image/status"][irange, ...])
             cellid = np.squeeze(
-                np.array(self.infile[agipd_base + "/image/cellId"][irange, ...]))
+                np.array(
+                    self.infile[agipd_base + "/image/cellId"][irange, ...]))
             length = np.squeeze(
-                np.array(self.infile[agipd_base + "/image/length"][irange, ...]))
-            
+                np.array(
+                    self.infile[agipd_base + "/image/length"][irange, ...]))
+
             # this far end of the image index range we are working on
             nidx = int(cidx + irange.size)
         else:
@@ -819,7 +854,7 @@ class AgipdCorrections:
             status = status[0::2]
             cellid = cellid[0::2]
             length = length[0::2]
-        
+
         # first evaluate the gain into 0, 1, 2 --> high, medium, low
         gain = np.zeros(ga.shape, np.uint8)
         gain[...] = 0
@@ -828,18 +863,20 @@ class AgipdCorrections:
         # exceeding first threshold means data is medium or low gain
         gain[ga > t0[cellid, ...]] = 1
         # exceeding also second threshold means data is low gain
-        gain[ga > t1[cellid, ...]] = 2
-        
+        gain[(ga > t1[cellid, ...]) & (
+                t1[cellid, ...] > 1.05 * t0[cellid, ...])] = 2
+
         # force into high or medium gain if requested        
         if self.force_mg_if_below is not None and self.force_mg_if_below > 0:
-            gain[(gain == 2) & ((im - self.offset[cellid, ...,1]) < self.force_mg_if_below)] = 1
-        
+            gain[(gain == 2) & ((im - self.offset[
+                cellid, ..., 1]) < self.force_mg_if_below)] = 1
+
         if self.force_hg_if_below is not None and self.force_hg_if_below > 0:
-            gain[(im - self.offset[cellid, ...,0]) < self.force_hg_if_below] = 0
-                    
+            gain[(gain > 0) & ((im - self.offset[
+                cellid, ..., 0]) < self.force_hg_if_below)] = 0
+
         for i in range(3):
             self.gain_stats[i] = np.sum(gain == i)
-        
 
         # check if any data has zero standard deviation, and mark this in
         # the bad pixel maks
@@ -930,6 +967,10 @@ class AgipdCorrections:
         if self.do_rel_gain:
             im *= rel_cor
 
+        if self.adjust_mg_baseline:
+            mgbc = self.mg_bl_adjust[cellid, ...]
+            im[gain == 1] += 1.5 * mgbc[gain == 1]
+
         # try to identify snowy pixels at this point
         if self.melt_snow is not False:
             ms = self.melt_snow
@@ -987,14 +1028,14 @@ class AgipdCorrections:
             if self.adc_mask is None:
                 self.adc_mask = self.make_noisy_adc_mask(msk)
             msk |= self.adc_mask
-            
+
         # now write out the data
         sd = 1 if not self.il_mode else 2
-        
+
         if not self.karabo_data_mode:
-            self.ddset[cidx//sd:nidx//sd, ...] = im
-            self.gdset[cidx//sd:nidx//sd, ...] = gain
-            self.mdset[cidx//sd:nidx//sd, ...] = msk
+            self.ddset[cidx // sd:nidx // sd, ...] = im
+            self.gdset[cidx // sd:nidx // sd, ...] = gain
+            self.mdset[cidx // sd:nidx // sd, ...] = msk
 
             self.outfile[agipd_base + "image/cellId"][cidx:nidx] = cellid
             self.outfile[agipd_base + "image/trainId"][cidx:nidx] = trainId
@@ -1043,7 +1084,7 @@ class AgipdCorrections:
             valid = status != 0
             last_index = int(last[status != 0][-1]) + 1
             first_index = int(first[status != 0][0])
-            
+
             idxtrains = np.squeeze(self.infile["/INDEX/trainId"])
             medianTrain = np.nanmedian(idxtrains)
             lowok = (idxtrains > medianTrain - 1e4)
@@ -1068,7 +1109,7 @@ class AgipdCorrections:
         agipd_base = self.agipd_base
         allcells = self.infile[agipd_base + "image/cellId"]
         allcells = np.squeeze(allcells[first_index:last_index, ...])
-        
+
         single_image = self.infile[agipd_base + "image/data"][first_index, ...]
         single_image = np.array(single_image)
 
@@ -1078,12 +1119,13 @@ class AgipdCorrections:
         allcells = allcells[can_calibrate]
         firange = np.arange(first_index, last_index)
         firange = firange[can_calibrate]
-        
-        self.oshape = (firange.size if not self.il_mode else firange.size//2,
+
+        self.oshape = (firange.size if not self.il_mode else firange.size // 2,
                        single_image.shape[1],
                        single_image.shape[2])
         self.firange = firange
         self.single_image = single_image
+        self.can_calibrate = can_calibrate
 
     def copy_and_sanitize_non_cal_data(self):
         """ Copy and sanitize data in `infile` that is not touched by
@@ -1099,7 +1141,8 @@ class AgipdCorrections:
         if self.il_mode:
             firange = firange[0::2]
         alltrains = self.infile[agipd_base + "image/trainId"]
-        alltrains = np.squeeze(alltrains[first_index:last_index, ...])
+        alltrains = np.squeeze(
+            alltrains[first_index:last_index, ...])  # [self.can_calibrate]
 
         # these are touched in the correct function, do not copy them here
         dont_copy = ["data", "cellId", "trainId", "pulseId", "status",
@@ -1132,15 +1175,33 @@ class AgipdCorrections:
 
         # sanitize indices
         for do in ["image", ]:
-            print(alltrains.shape, firange.max())
-            uq, fidxv, cntsv = np.unique(alltrains[firange-firange[0]], return_index=True,
-                                       return_counts=True)
-            fidx = np.zeros(self.valid.shape, fidxv.dtype)
-            fidx[self.valid] = fidxv
-            
-            cnts = np.zeros(self.valid.shape, cntsv.dtype)
-            cnts[self.valid] = cntsv
-            
+
+            uq, fidxv, cntsv = np.unique(alltrains[firange - firange[0]],
+                                         return_index=True,
+                                         return_counts=True)
+
+            duq = (uq[1:] - uq[:-1]).astype(np.int64)
+
+            cfidxv = [fidxv[0], ]
+            ccntsv = [cntsv[0], ]
+            for i, du in enumerate(duq.tolist()):
+                if du > 1000:
+                    du = 1
+                    cntsv[i] = 0
+                cfidxv += [0] * (du - 1) + [fidxv[i + 1], ]
+                ccntsv += [0] * (du - 1) + [cntsv[i + 1], ]
+
+            mv = len(cfidxv)
+            fidx = np.zeros(len(cfidxv), fidxv.dtype)
+            fidx[self.valid[:mv]] = np.array(cfidxv)[self.valid[:mv]]
+
+            for i in range(len(fidx) - 1, 2, -1):
+                if fidx[i - 1] == 0 and fidx[i] != 0:
+                    fidx[i - 1] = fidx[i]
+
+            cnts = np.zeros(len(cfidxv), cntsv.dtype)
+            cnts[self.valid[:mv]] = np.array(ccntsv)[self.valid[:mv]]
+
             self.outfile.create_dataset(idx_base + "{}/first".format(do),
                                         fidx.shape,
                                         dtype=fidx.dtype,
@@ -1181,8 +1242,8 @@ class AgipdCorrections:
 
         fsz = list(firange.shape)
         if self.il_mode:
-            fsz[0] = fsz[0]//2
-        
+            fsz[0] = fsz[0] // 2
+
         self.outfile.create_dataset(agipd_base + "image/cellId", fsz,
                                     dtype=np.uint16, fletcher32=True)
         self.outfile.create_dataset(agipd_base + "image/trainId", fsz,
@@ -1311,7 +1372,7 @@ class AgipdCorrections:
             (cal_db_interface, creation_time, max_cells_db,
              bias_voltage, photon_energy) = dbparms
             max_cells_db_dark = max_cells_db
-            timeout_cal_db = 300000
+            timeout_cal_db = 30000
 
         elif len(dbparms) == 6:
             (cal_db_interface, creation_time, max_cells_db,
@@ -1319,7 +1380,7 @@ class AgipdCorrections:
 
             if max_cells_db_dark is None:
                 max_cells_db_dark = max_cells_db
-            timeout_cal_db = 300000
+            timeout_cal_db = 30000
         else:
             (cal_db_interface, creation_time, max_cells_db,
              bias_voltage, photon_energy, max_cells_db_dark,
@@ -1328,22 +1389,24 @@ class AgipdCorrections:
             if max_cells_db_dark is None:
                 max_cells_db_dark = max_cells_db
 
-        if "#" in cal_db_interface:
-            prot, serv, ran = cal_db_interface.split(":")
-            r1, r2 = ran.split("#")
-            cal_db_interface = ":".join(
-                [prot, serv, str(np.random.randint(int(r1), int(r2)))])
+        # if "#" in cal_db_interface:
+        #    prot, serv, ran = cal_db_interface.split(":")
+        #    r1, r2 = ran.split("#")
+        #    cal_db_interface = ":".join(
+        #        [prot, serv, str(np.random.randint(int(r1), int(r2)))])
 
         dinstance = getattr(Detectors, self.cal_det_instance)
-        offset = get_constant_from_db(getattr(dinstance, qm),
-                                      Constants.AGIPD.Offset(),
-                                      Conditions.Dark.AGIPD(
-                                          memory_cells=max_cells_db_dark,
-                                          bias_voltage=bias_voltage),
-                                      np.zeros((128, 512, max_cells_db, 3)),
-                                      cal_db_interface,
-                                      creation_time=creation_time,
-                                      timeout=timeout_cal_db)
+        offset, when = get_constant_from_db_and_time(getattr(dinstance, qm),
+                                                     Constants.AGIPD.Offset(),
+                                                     Conditions.Dark.AGIPD(
+                                                         memory_cells=max_cells_db_dark,
+                                                         bias_voltage=bias_voltage),
+                                                     np.zeros((128, 512,
+                                                               max_cells_db,
+                                                               3)),
+                                                     cal_db_interface,
+                                                     creation_time=creation_time,
+                                                     timeout=timeout_cal_db)
 
         noise = get_constant_from_db(getattr(dinstance, qm),
                                      Constants.AGIPD.Noise(),
@@ -1379,8 +1442,9 @@ class AgipdCorrections:
         # we have all we need for dark image derived constants
         # initialize and return
         if only_dark:
-            self.initialize(offset, None, bpixels, noise, thresholds, None)
-            return
+            self.initialize(offset, None, bpixels, noise, thresholds, None,
+                            slopesPC=None)
+            return when
 
         # load also non-dark constants from the database
         slopesPC = get_constant_from_db(getattr(dinstance, qm),
@@ -1407,7 +1471,8 @@ class AgipdCorrections:
                                                               photon_energy,
                                                               pixels_x=512,
                                                               pixels_y=128,
-                                                              beam_energy=None),  # noqa
+                                                              beam_energy=None),
+                                 # noqa
                                  np.ones((128, 512, max_cells_db, 2)),
                                  cal_db_interface,
                                  creation_time=creation_time,
@@ -1436,9 +1501,11 @@ class AgipdCorrections:
         if slopesFF.shape[2] > 32:
             xraygain = np.nanmedian(slopesFF[..., 32:min(96, self.max_cells)],
                                     axis=2)
-        else:
+        elif slopesFF.shape[2] > 2:
             xraygain = np.nanmedian(slopesFF[..., :min(96, self.max_cells)],
                                     axis=2)
+        else:
+            xraygain = np.squeeze(slopesFF[..., 0])
 
         # relative X-ray gain is then each pixels gain, by the median gain
         # of all pixels
@@ -1486,7 +1553,8 @@ class AgipdCorrections:
                                                                  photon_energy,
                                                                  pixels_x=512,
                                                                  pixels_y=128,
-                                                                 beam_energy=None),  # noqa
+                                                                 beam_energy=None),
+                                    # noqa
                                     np.zeros((128, 512, max_cells_db)),
                                     cal_db_interface,
                                     creation_time=creation_time,
@@ -1495,7 +1563,9 @@ class AgipdCorrections:
         bpixels |= bpff.astype(np.uint32)[..., :bpixels.shape[2], None]
 
         self.initialize(offset, rel_gain, bpixels, noise, thresholds,
-                        base_offset)
+                        base_offset, slopesPC=slopesPC)
+
+        return when
 
     def initialize_from_file(self, filename, qm, with_dark=True):
         """ Initialize calibration constants from a calibration file
@@ -1570,13 +1640,17 @@ class AgipdCorrections:
         with h5py.File(filename, "r") as calfile:
 
             bpixels = calfile["{}/{}/data".format(qm, "BadPixelsFF")][()]
-            bpixels |= np.moveaxis(
-                calfile["{}/{}/data".format(qm, "BadPixelsPC")][()], 0, 2)
+            try:
+                bpixels |= np.moveaxis(
+                    calfile["{}/{}/data".format(qm, "BadPixelsPC")][()], 0, 2)
+            except:
+                pass
             bpixels = bpixels[..., None]  # without gain dimension up to here
             if with_dark:
                 offsets = calfile["{}/{}/data".format(qm, "Offset")][()]
                 noises = calfile["{}/{}/data".format(qm, "Noise")][()]
-                thresholds = calfile["{}/{}/data".format(qm, "ThresholdsDark")][()]  # noqa
+                thresholds = \
+                calfile["{}/{}/data".format(qm, "ThresholdsDark")][()]  # noqa
                 bpixels |= calfile["{}/{}/data".format(qm, "BadPixelsDark")][
                     ()]
 
@@ -1597,9 +1671,11 @@ class AgipdCorrections:
                 slopesFF = slopesFF[..., 0]
             if slopesFF.shape[2] > 32:
                 xraygain = np.nanmedian(slopesFF[..., 32:], axis=2)
-            else:
+            elif slopesFF.shape[2] > 2:
                 xraygain = np.nanmedian(
                     slopesFF[..., :min(96, self.max_cells)], axis=2)
+            else:
+                xraygain = np.squeeze(slopesFF[..., 0])
 
             xraygain /= np.nanmedian(xraygain)
             pcrel = pc_high_m / np.nanmedian(pc_high_m, axis=2)[..., None]
@@ -1614,4 +1690,4 @@ class AgipdCorrections:
                            slopesPC[..., :self.max_cells, 4], xrayOffset]
 
         self.initialize(offsets, rel_gain, bpixels, noises, thresholds,
-                        base_offset)
+                        base_offset, slopesPC=slopesPC)
diff --git a/cal_tools/cal_tools/enums.py b/cal_tools/cal_tools/enums.py
index 0e30568a4683839b1c171d6d8b1bb28611db26c5..4cd37c94679ffcb93a06f4e4e83f729516e5e3de 100644
--- a/cal_tools/cal_tools/enums.py
+++ b/cal_tools/cal_tools/enums.py
@@ -5,24 +5,22 @@ class BadPixels(Enum):
     """ The European XFEL Bad Pixel Encoding
     """
         
-    OFFSET_OUT_OF_THRESHOLD  = 0b00000000000000000001 # bit 1
-    NOISE_OUT_OF_THRESHOLD   = 0b00000000000000000010 # bit 2
-    OFFSET_NOISE_EVAL_ERROR  = 0b00000000000000000100 # bit 3
-    NO_DARK_DATA             = 0b00000000000000001000 # bit 4
-    CI_GAIN_OF_OF_THRESHOLD  = 0b00000000000000010000 # bit 5
-    CI_LINEAR_DEVIATION      = 0b00000000000000100000 # bit 6
-    CI_EVAL_ERROR            = 0b00000000000001000000 # bit 7
-    FF_GAIN_EVAL_ERROR       = 0b00000000000010000000 # bit 8
-    FF_GAIN_DEVIATION        = 0b00000000000100000000 # bit 9
-    FF_NO_ENTRIES            = 0b00000000001000000000 # bit 10
-    CI2_EVAL_ERROR           = 0b00000000010000000000 # bit 11
-    VALUE_IS_NAN             = 0b00000000100000000000 # bit 12
-    VALUE_OUT_OF_RANGE       = 0b00000001000000000000 # bit 13
-    GAIN_THRESHOLDING_ERROR  = 0b00000010000000000000 # bit 14
-    DATA_STD_IS_ZERO         = 0b00000100000000000000 # bit 15
-    ASIC_STD_BELOW_NOISE     = 0b00001000000000000000 # bit 16
-    INTERPOLATED             = 0b00010000000000000000 # bit 17
-    NOISY_ADC                = 0b00100000000000000000 # bit 18
-    OVERSCAN                 = 0b01000000000000000000 # bit 19
-    NON_SENSITIVE            = 0b10000000000000000000 # bit 20
+    OFFSET_OUT_OF_THRESHOLD  = 0b000000000000000001
+    NOISE_OUT_OF_THRESHOLD   = 0b000000000000000010
+    OFFSET_NOISE_EVAL_ERROR  = 0b000000000000000100
+    NO_DARK_DATA             = 0b000000000000001000
+    CI_GAIN_OF_OF_THRESHOLD  = 0b000000000000010000
+    CI_LINEAR_DEVIATION      = 0b000000000000100000
+    CI_EVAL_ERROR            = 0b000000000001000000
+    FF_GAIN_EVAL_ERROR       = 0b000000000010000000
+    FF_GAIN_DEVIATION        = 0b000000000100000000
+    FF_NO_ENTRIES            = 0b000000001000000000
+    CI2_EVAL_ERROR           = 0b000000010000000000
+    VALUE_IS_NAN             = 0b000000100000000000
+    VALUE_OUT_OF_RANGE       = 0b000001000000000000
+    GAIN_THRESHOLDING_ERROR  = 0b000010000000000000
+    DATA_STD_IS_ZERO         = 0b000100000000000000
+    ASIC_STD_BELOW_NOISE     = 0b001000000000000000
+    INTERPOLATED             = 0b010000000000000000
+    NOISY_ADC                = 0b100000000000000000
 
diff --git a/cal_tools/cal_tools/lpdlib.py b/cal_tools/cal_tools/lpdlib.py
index 5c1a0fb6b8f390400c3fa9993227aca3cc72128b..ab6fc0d5066d4e2aea642f44e857b63ba9f47fba 100644
--- a/cal_tools/cal_tools/lpdlib.py
+++ b/cal_tools/cal_tools/lpdlib.py
@@ -2,8 +2,8 @@ import copy
 
 import h5py
 import numpy as np
-from cal_tools.tools import get_constant_from_db
 from cal_tools.enums import BadPixels
+from cal_tools.tools import get_constant_from_db, get_constant_from_db_and_time
 from iCalibrationDB import Constants, Conditions, Detectors
 
 
@@ -41,7 +41,7 @@ class LpdCorrections:
                  raw_fmt_version=2, chunk_size=512,
                  h5_data_path="INSTRUMENT/FXE_DET_LPD1M-1/DET/{}CH0:xtdf/",
                  h5_index_path="INDEX/FXE_DET_LPD1M-1/DET/{}CH0:xtdf/",
-                 do_ff=True, correct_non_linear=False, karabo_data_mode=False):
+                 do_ff=True, correct_non_linear=True, karabo_data_mode=False):
         """
         Initialize an LpdCorrections Class
 
@@ -85,24 +85,19 @@ class LpdCorrections:
         self.bins_signal_high_range = bins_signal_high_range
         self.cidx = 0
         self.do_ff = do_ff
-        filter_modules = [11]
+        filter_modules = []
         self.filter_cells = [0, 1] if channel in filter_modules else []
-        self.cnl = correct_non_linear
+        self.cnl = True  # correct_non_linear
         self.karabo_data_mode = karabo_data_mode
         # emprically determined from APD datasets p900038, r155,r156
-        self.cnl_const = {'med': 
-                          {'lin': (0.10960777594700886,
-                                   859.4778021212404),
-                           'repo': (0.11046365063379418,
-                                    751.5109159229758,
-                                    1436280.868413923,
-                                    9966.92701736398)},
-                          'high': {'sigm': (1333.6836067255365,
-                                            8370.936473950766,
-                                            0.003912416547243528,
-                                            0.00013306099710934553),
-                                   'lin': (0.16284255864598732,
-                                           -29.830797125784763)}}
+        # emprically determined from APD datasets p900038, r155,r156
+        self.cnl_const = {
+            'high': {'A': -0.000815934, 'lam': 0.00811281, 'c': 1908.89,
+                     'm': 0, 'b': 0},  # noqa
+            'med': {'A': 0.0999894, 'lam': -0.00137652, 'c': 3107.83,
+                    'm': 3.89982e-06, 'b': -0.116811},  # noqa
+            'low': {'A': 0.0119132, 'lam': -0.0002, 'c': 36738.6,
+                    'm': 2.00273e-08, 'b': 0.245537}}  # noqa
 
     def get_iteration_range(self):
         """Returns a range expression over which to iterate in chunks
@@ -157,7 +152,7 @@ class LpdCorrections:
             if not hasattr(self, "mask"):
                 self.mask = mask
             else:
-                self.mask |= mask
+                self.mask |= mask[:self.mask.shape[0], ...]
         if noise is not None:
             self.noise = noise
         if flatfield is not None:
@@ -222,7 +217,6 @@ class LpdCorrections:
             cells = np.squeeze(irange['image.cellId'])
             length = np.squeeze(irange['image.length'])
 
-
         # split gain and image info into separate arrays
         im, gain = self.split_gain(im[:, 0, ...])
 
@@ -269,35 +263,30 @@ class LpdCorrections:
             im /= self.flatfield[None, :, :]
 
         # hacky way of smoothening transition region between med and low
-        #im[gain == 2] -= im[gain == 2] * cfac[gain == 2]
-        
+        # im[gain == 2] -= im[gain == 2] * cfac[gain == 2]
+
         # perform non-linear corrections if requested
         if self.cnl:
-            largs = self.cnl_const['high']['lin']
-            sargs = self.cnl_const['high']['sigm']
-            n, c, b, k = sargs
-            y = im[(gain==0) & (im > 1000)]
-            x = (-np.arctanh(b-y/n+1)+c*k)/k
-            dy = largs[0]*x+largs[1] - n*(1+b+np.tanh((x-c)*k))            
-            im[(gain==0) & (im > 1000)] += dy
-            
-            mhg = largs[0]
-            bhg = largs[1]
-            
-            largs = self.cnl_const['med']['lin']
-            rargs = self.cnl_const['med']['repo']
-            c, k, j, n = rargs
-            j *= -1
-            mf = mhg/largs[0]
-            
-            y = im[(gain==1)]
-            x = (np.sqrt(c**2*n**2+4*c*j+2*c*n*(k-y)+(k-y)**2)+c*n-k+y)/(2*c) - (bhg-largs[1])/largs[0]          
-            dy = (mf*largs[0]-c)*x + largs[1] - k + j/(x-n)
-            
-            im[(gain==1)] += (bhg-largs[1]) + dy 
-            
-            #im[(gain==1) & (im >= 3500)] *= mf
-            #im[(gain==1) & (im >= 3500)] += (bhg-largs[1])
+            def lin_exp_fun(x, m, b, A, lam, c):
+                return m * x + b + A * np.exp(lam * (x - c))
+
+            x = im[(gain == 0)]
+            cnl = self.cnl_const['high']
+            cf = lin_exp_fun(x, cnl['m'], cnl['b'], cnl['A'], cnl['lam'],
+                             cnl['c'])
+            im[(gain == 0)] -= np.maximum(cf, -0.2) * x
+
+            x = im[(gain == 1)]
+            cnl = self.cnl_const['med']
+            cf = lin_exp_fun(x, cnl['m'], cnl['b'], cnl['A'], cnl['lam'],
+                             cnl['c'])
+            im[(gain == 1)] -= np.minimum(cf, 0.2) * x
+
+            x = im[(gain == 2)]
+            cnl = self.cnl_const['low']
+            cf = lin_exp_fun(x, cnl['m'], cnl['b'], cnl['A'], cnl['lam'],
+                             cnl['c'])
+            im[(gain == 2)] -= np.minimum(cf, 0.45) * x
 
         # create bad pixels masks, here non-finite values
         bidx = ~np.isfinite(im)
@@ -378,11 +367,18 @@ class LpdCorrections:
             status = np.squeeze(self.infile[lpd_base + "image/status"])
             if np.count_nonzero(status != 0) == 0:
                 raise IOError("File {} has no valid counts".format(
-                               self.infile))
+                    self.infile))
             last = np.squeeze(self.infile[lpd_base + "image/last"])
             valid = status != 0
-            last_index = int(last[status != 0][-1])
-            first_index = int(last[status != 0][0])
+
+            idxtrains = np.squeeze(self.infile["/INDEX/trainId"])
+            medianTrain = np.nanmedian(idxtrains)
+            lowok = (idxtrains > medianTrain - 1e4)
+            highok = (idxtrains < medianTrain + 1e4)
+            valid &= lowok & highok
+
+            last_index = int(last[valid][-1])
+            first_index = int(last[valid][0])
         else:
             raise AttributeError(
                 "Not a known raw format version: {}".format(self.index_v))
@@ -457,8 +453,32 @@ class LpdCorrections:
         self.infile.visititems(visitor)
         # sanitize indices
         for do in ["image", ]:
-            uq, fidx, cnts = np.unique(alltrains[firange], return_index=True,
-                                       return_counts=True)
+            uq, fidxv, cntsv = np.unique(alltrains[firange - firange[0]],
+                                         return_index=True,
+                                         return_counts=True)
+
+            duq = (uq[1:] - uq[:-1]).astype(np.int64)
+
+            cfidxv = [fidxv[0], ]
+            ccntsv = [cntsv[0], ]
+            for i, du in enumerate(duq.tolist()):
+                if du > 1000:
+                    du = 1
+                    cntsv[i] = 0
+                cfidxv += [0] * (du - 1) + [fidxv[i + 1], ]
+                ccntsv += [0] * (du - 1) + [cntsv[i + 1], ]
+
+            mv = len(cfidxv)
+            fidx = np.zeros(len(cfidxv), fidxv.dtype)
+            fidx[self.valid[:mv]] = np.array(cfidxv)[self.valid[:mv]]
+
+            for i in range(len(fidx) - 1, 2, -1):
+                if fidx[i - 1] == 0 and fidx[i] != 0:
+                    fidx[i - 1] = fidx[i]
+
+            cnts = np.zeros(len(cfidxv), cntsv.dtype)
+            cnts[self.valid[:mv]] = np.array(ccntsv)[self.valid[:mv]]
+
             self.outfile.create_dataset(idx_base + "{}/first".format(do),
                                         fidx.shape,
                                         dtype=fidx.dtype,
@@ -585,23 +605,24 @@ class LpdCorrections:
             (cal_db_interface, creation_time, max_cells_db, capacitor,
              bias_voltage, photon_energy, timeout) = dbparms
 
-        if "#" in cal_db_interface:
-            prot, serv, ran = cal_db_interface.split(":")
-            r1, r2 = ran.split("#")
-            cal_db_interface = ":".join(
-                [prot, serv, str(np.random.randint(int(r1), int(r2)))])
-            print("Connecting to interface: {}".format(cal_db_interface))
-
-        offset = get_constant_from_db(getattr(Detectors.LPD1M1, qm),
-                                      Constants.LPD.Offset(),
-                                      Conditions.Dark.LPD(
-                                          memory_cells=max_cells_db,
-                                          bias_voltage=bias_voltage,
-                                          capacitor=capacitor),
-                                      np.zeros((256, 256, max_cells_db, 3)),
-                                      cal_db_interface,
-                                      creation_time=creation_time,
-                                      timeout=timeout)
+        # if "#" in cal_db_interface:
+        #    prot, serv, ran = cal_db_interface.split(":")
+        #    r1, r2 = ran.split("#")
+        #    cal_db_interface = ":".join(
+        #        [prot, serv, str(np.random.randint(int(r1), int(r2)))])
+        #    print("Connecting to interface: {}".format(cal_db_interface))
+
+        offset, when = get_constant_from_db_and_time(
+            getattr(Detectors.LPD1M1, qm),
+            Constants.LPD.Offset(),
+            Conditions.Dark.LPD(
+                memory_cells=max_cells_db,
+                bias_voltage=bias_voltage,
+                capacitor=capacitor),
+            np.zeros((256, 256, max_cells_db, 3)),
+            cal_db_interface,
+            creation_time=creation_time,
+            timeout=timeout)
 
         noise = get_constant_from_db(getattr(Detectors.LPD1M1, qm),
                                      Constants.LPD.Noise(),
@@ -628,7 +649,7 @@ class LpdCorrections:
         # done loading constants and returning
         if only_dark:
             self.initialize(offset, None, None, bpixels, noise, None)
-            return
+            return when
 
         slopesCI = get_constant_from_db(getattr(Detectors.LPD1M1, qm),
                                         Constants.LPD.SlopesCI(),
@@ -653,7 +674,8 @@ class LpdCorrections:
                                                             pixels_x=256,
                                                             pixels_y=256,
                                                             beam_energy=None,
-                                                            capacitor=capacitor),  # noqa
+                                                            capacitor=capacitor),
+                                 # noqa
                                  np.ones((256, 256)),
                                  cal_db_interface,
                                  creation_time=creation_time,
@@ -685,6 +707,7 @@ class LpdCorrections:
         bpixels |= bpix[..., None]
         self.initialize(offset, rel_gains, rel_gain_offset, bpixels, noise,
                         flat_fields)
+        return when
 
     def initialize_from_file(self, filename, qm, with_dark=True):
         """ Initialize calibration constants from a calibration file
diff --git a/cal_tools/cal_tools/tools.py b/cal_tools/cal_tools/tools.py
index 2e8961c002d7685df06ef97a0671f0ad6db4ece2..a8517ba60298f236724f4b3b6370a48b5837620a 100644
--- a/cal_tools/cal_tools/tools.py
+++ b/cal_tools/cal_tools/tools.py
@@ -1,61 +1,42 @@
+import glob
+import os
+import shutil
+import textwrap
 from collections import OrderedDict
-from glob import glob
-from importlib.machinery import SourceFileLoader
-from os import chdir, listdir, makedirs, path, remove, stat
-from os.path import isdir, isfile, splitext
+from os.path import isfile, isdir, splitext
 from queue import Queue
-from shutil import copy, copytree, move, rmtree
-from subprocess import CalledProcessError, check_call, check_output
-from textwrap import dedent
+from subprocess import check_output
 from time import sleep
 
+import numpy as np
 from jinja2 import Template
-import tabulate
-
-from xfel_calibrate import settings
-
-
-def atoi(text):
-    """
-    Convert string to integer is possible
-
-    :param text: string to be converted
-    :return: integer value or input string
-    """
-
-    return int(text) if text.isdigit() else text
-
-
-def natural_keys(text):
-    """
-    Decompose string to list of integers and sub-strings
-    """
-    return [atoi(c) for c in re.split(r'(\d+)', text)]
 
 
 def combine_report(run_path, calibration):
-    sphinx_path = "{}/sphinx_rep".format(path.abspath(run_path))
-    makedirs(sphinx_path)
-    direntries = listdir(run_path)
-    direntries.sort(key=natural_keys)
-    
+    sphinx_path = "{}/sphinx_rep".format(os.path.abspath(run_path))
+    os.makedirs(sphinx_path)
+    direntries = os.listdir(run_path)
+
     for entry in direntries:
-        
+
         if isfile("{}/{}".format(run_path, entry)):
             name, ext = splitext("{}".format(entry))
-            
+
             if ext == ".rst":
-                comps = name.split("__")                
+                comps = name.split("__")
                 if len(comps) >= 3:
-                    group, name_param, conc_param = "_".join(comps[:-2]), comps[-2], comps[-1]
+                    group, name_param, conc_param = "_".join(comps[:-2]), \
+                                                    comps[-2], comps[-1]
                 else:
-                    group, name_param,  conc_param = comps[0], "None", "None"
+                    group, name_param, conc_param = comps[0], "None", "None"
 
-                with open("{}/{}.rst".format(sphinx_path, group), "a") as gfile:
+                with open("{}/{}.rst".format(sphinx_path, group),
+                          "a") as gfile:
                     if conc_param != "None":
-                        title = "{}. {} = {}".format(calibration, name_param, conc_param)
+                        title = "{}. {} = {}".format(calibration, name_param,
+                                                     conc_param)
                         gfile.write(title + "\n")
-                        gfile.write( "=" *len (title) + "\n")
+                        gfile.write("=" * len(title) + "\n")
                         gfile.write("\n")
                     with open("{}/{}".format(run_path, entry), "r") as ifile:
                         skip_next = False
@@ -65,172 +46,69 @@ def combine_report(run_path, calibration):
                                 continue
                             if conc_param == "None":
                                 gfile.write(line)
-                            elif " ".join(calibration.split()) != " ".join(line.split()):
+                            elif " ".join(calibration.split()) != " ".join(
+                                    line.split()):
                                 gfile.write(line)
                             else:
                                 skip_next = True
-                                
+
                     gfile.write("\n\n")
         if isdir("{}/{}".format(run_path, entry)):
-            copytree("{}/{}".format(run_path, entry), "{}/{}".format(sphinx_path, entry))
+            shutil.copytree("{}/{}".format(run_path, entry),
+                            "{}/{}".format(sphinx_path, entry))
     return sphinx_path
 
 
-def prepare_plots(run_path, threshold=1000000):
-    """
-    Convert svg file to pdf or png to be used for latex
-
-    Conversion of svg to vector graphics pdf is performed using svglib3.
-    This procedure is CPU consuming. In order to speed up process
-    large svg files are converted to png format.
-
-    The links in the rst files are adapted accordingly to the
-    converted image files.
-
-    :param run_path: Run path of the slurm job
-    :param threshold: Max svg file size (in bytes) to be converted to pdf
-    """
-    print('Convert svg to pdf and png')
-    run_path = path.abspath(run_path)
-
-    rst_files = glob('{}/*rst'.format(run_path))
-    for rst_file in rst_files:
-        rst_file_name = path.basename(rst_file)
-        rst_file_name = path.splitext(rst_file_name)[0]
-
-        svg_files = glob(
-            '{}/{}_files/*svg'.format(run_path, rst_file_name))
-        for f_path in svg_files:
-            f_name = path.basename(f_path)
-            f_name = path.splitext(f_name)[0]
-
-            if (stat(f_path)).st_size < threshold:
-                check_call(["svg2pdf", "{}".format(f_path)], shell=False)
-                new_ext = 'pdf'
-            else:
-                check_call(["convert", "{}".format(f_path),
-                            "{}.png".format(f_name)], shell=False)
-                new_ext = 'png'
-
-            check_call(["sed",
-                        "-i",
-                        "s/{}.svg/{}.{}/g".format(f_name, f_name, new_ext),
-                        rst_file],
-                       shell=False)
-
-
-def make_timing_summary(run_path, joblist):
-    """
-    Create an rst file with timing summary of executed slurm jobs
-
-    :param run_path: Run path of the slurm job
-    :param joblist: List of slurm jobs
-    """
-    print('Prepare timing summary')
-    run_path = path.abspath(run_path)
-    pars_vals = []
-    pars = 'JobID,Elapsed,Suspended'
-    pars_name = pars.split(',')
-
-    for job in joblist:
-        out = check_output(['sacct', '-j', job,
-                            '--format={}'.format(pars)],
-                           shell=False)
-        lines = str(out).split('\\n')
-
-        # loop over output lines, skip first two lines with header.
-        for line in lines[2:]:
-            s = line.split()
-            if len(s) == len(pars_name):
-                pars_vals.append(s)
-                break
-
-    tmpl = Template('''
-                    Timing summary
-                    ==============
-                    
-                    .. math::
-                        {% for line in table %}
-                        {{ line }}
-                        {%- endfor %}
-                    ''')
-
-    with open("{}/timing_summary.rst".format(run_path), "w+") as gfile:
-
-        table = tabulate.tabulate(pars_vals, tablefmt='latex',
-                                  headers=pars_name)
-        gfile.write(dedent(tmpl.render(table=table.split('\n'))))
-
-
 def make_report(run_path, tmp_path, out_path, project, author, version,
                 report_to):
-    """
-    Create calibration report (pdf file)
-
-    Automatically generated report document results, produced by
-    jupyter-notebooks.
-
-    :param run_path: Path to sphinx run directory
-    :param tmp_path: Run path of the slurm job
-    :param out_path: Output directory for report.
-    Overwritten if path to report is given in `report_to`
-    :param project: Project title
-    :param author: Author of the notebook
-    :param version: Version of the notebook
-    :param report_to: Name or path of the report file
-    """
-    run_path = path.abspath(run_path)
-    report_path, report_name = path.split(report_to)
+    run_path = os.path.abspath(run_path)
+    report_path, report_name = os.path.split(report_to)
     if report_path != '':
         out_path = report_path
 
     try:
-        check_call(["sphinx-quickstart",
-                    "--quiet",
-                    "--project='{}'".format(project),
-                    "--author='{}'".format(author),
-                    "-v", str(version),
-                    "--suffix=.rst",
-                    "--master=index",
-                    "--ext-intersphinx",
-                    "--ext-mathjax",
-                    "--makefile",
-                    "--no-batchfile", run_path])
-
-    except CalledProcessError:
-        raise Exception("Failed to run sphinx-quickbuild. Is sphinx installed?"
-                        "Generated simple index.rst instead")
-
-    # quickbuild went well we need to edit the index.rst and conf.py files
-    module_path = "{}".format(path.abspath(path.dirname(__file__)))
-
-    conf = SourceFileLoader("conf",
-                            "{}/conf.py".format(run_path)).load_module()
-    l_var = [v for v in dir(conf) if not v.startswith('__')]
-
-    with open("{}/conf.py.tmp".format(run_path), "w") as mf:
-        latex_elements = {'extraclassoptions': ',openany, oneside',
-                          'maketitle': r'\input{titlepage.tex.txt}'}
-        mf.write("latex_elements = {}\n".format(latex_elements))
-        mf.write("latex_logo = '{}/{}'\n".format(module_path,
-                                                 settings.logo_path))
-        mf.write("latex_additional_files = ['titlepage.tex.txt']\n")
+        import subprocess
+        subprocess.check_call(["sphinx-quickstart",
+                               "--quiet",
+                               "--project='{}'".format(project),
+                               "--author='{}'".format(author),
+                               "-v", str(version),
+                               "--suffix=.rst",
+                               "--master=index",
+                               "--ext-intersphinx",
+                               "--ext-mathjax",
+                               "--makefile",
+                               "--no-batchfile", run_path])
+        # now conf.py exists in temp folder, so add latex options to remove empty pages 
+        patch_path = "{}".format(os.path.abspath(os.path.dirname(__file__)))
+        patch_file = ("{}/conf_latex.patch".format(patch_path))
+        try:
+            subprocess.check_call(["patch",
+                                   "{}/sphinx_rep/conf.py".format(tmp_path),
+                                   patch_file])
+        except:
+            print("Patching latex conf failed")
 
-        for var in l_var:
-            if var in ['latex_elements', 'latex_logo',
-                       'latex_additional_files']:
-                continue
-            tmpl = '{} = {}\n'
-            v = getattr(conf, var, None)
-            if isinstance(v, str):
-                tmpl = '{} = "{}"\n'
+        try:
 
-            mf.write(tmpl.format(var, v))
+            subprocess.check_call(["sed",
+                                   "-i",
+                                   "-e",
+                                   "s/{}.tex/{}.tex/g".format(
+                                       project.replace(" ", ""), report_name),
+                                   "{}/sphinx_rep/conf.py".format(tmp_path)],
+                                  shell=False)
+        except:
+            print("SEDing tex failed")
+
+    except subprocess.CalledProcessError:
+        raise Exception("Failed to run sphinx-quickbuild. Is sphinx installed?"
+                        "Generated simple index.rst instead")
 
-    remove("{}/conf.py".format(run_path))
-    move("{}/conf.py.tmp".format(run_path), "{}/conf.py".format(run_path))
+    # quickbuild went well we need to edit the index file
+    from shutil import move
 
-    direntries = listdir(run_path)
+    direntries = os.listdir(run_path)
     files_to_handle = []
     for entry in direntries:
         if isfile("{}/{}".format(run_path, entry)):
@@ -238,57 +116,51 @@ def make_report(run_path, tmp_path, out_path, project, author, version,
             if ext == ".rst" and "index" not in name:
                 files_to_handle.append(name.strip())
 
-    index_tmp = Template('''
-                        Calibration report
-                        ==================
-                        
-                        .. toctree::
-                           :maxdepth: 2
-                           {% for k in keys %}
-                           {{ k }}
-                           {%- endfor %}
-                        ''')
-
-    with open("{}/index.rst".format(run_path), "w+") as mf:
-        mf.write(dedent(index_tmp.render(keys=files_to_handle)))
+    with open("{}/index.rst.tmp".format(run_path), "w") as mf:
+        with open("{}/index.rst".format(run_path), "r") as mfr:
+            indexTmp = Template('''
+                .. toctree::
+                   :maxdepth: 2
+                   {% for k in keys %}
+                   {{ k }}
+                   {%- endfor %}
+                ''')
+            for line in mfr:
+                line = line.replace(".. toctree::", textwrap.dedent(
+                    indexTmp.render(keys=files_to_handle)))
+                line = line.replace(":maxdepth: 2", "")
+                line = line.replace("Documentation", "Calibration")
+                if ":caption." in line:
+                    continue
+                mf.write(line)
+                cdir = os.getcwd()
+
+    os.remove("{}/index.rst".format(run_path))
+    move("{}/index.rst.tmp".format(run_path), "{}/index.rst".format(run_path))
 
     # finally call the make scripts
-    chdir(run_path)
+
+    os.chdir(run_path)
     try:
-        check_call(["make", "latexpdf"])
+        import subprocess
+        subprocess.check_call(["make", "latexpdf"])
 
-    except CalledProcessError:
+    except subprocess.CalledProcessError:
         print("Failed to make pdf documentation")
         print("Temp files will not be deleted and " +
               "can be inspected at: {}".format(run_path))
         return
     print("Moving report to final location: {}".format(out_path))
-    for f in glob(r'{}/_build/latex/*.pdf'.format(run_path)):
-        copy(f, out_path)
+    for f in glob.glob(r'{}/_build/latex/*.pdf'.format(run_path)):
+        print(f)
+        os.makedirs(out_path, exist_ok=True)
+        shutil.copy(f, out_path + "/")
     print("Removing temporary files at: {}".format(tmp_path))
-    rmtree(tmp_path)
+    shutil.rmtree(tmp_path)
 
-    
-def make_titlepage(sphinx_path, project, data_path):
-    """
-    Create title page for report using template
 
-    :param sphinx_path: path to sphinx run directory
-    :param project: title of the project
-    :param data_path: path to input data sample used for notebook
-    """
-    module_path = "{}".format(path.abspath(path.dirname(__file__)))
-    with open('{}/titlepage.tmpl'.format(module_path)) as file_:
-        title_tmp = Template(file_.read())
-
-    with open("{}/titlepage.tex.txt".format(sphinx_path), "w+") as mf:
-        mf.write(dedent(title_tmp.render(project=project,
-                                         data_path=data_path)))
-
-
-def finalize(joblist, finaljob, run_path, out_path, project, calibration,
-             author, version, report_to, data_path='Unknown'):
-    
+def finalize(joblist, run_path, out_path, project, calibration, author,
+             version, report_to):
     print("Waiting on jobs to finish: {}".format(joblist))
     while True:
         found_jobs = set()
@@ -300,11 +172,7 @@ def finalize(joblist, finaljob, run_path, out_path, project, calibration,
         if len(found_jobs) == 0:
             break
         sleep(10)
-
-    prepare_plots(run_path)
-    make_timing_summary(run_path, joblist+[str(finaljob)])
     sphinx_path = combine_report(run_path, calibration)
-    make_titlepage(sphinx_path, project, data_path)
     make_report(sphinx_path, run_path, out_path, project, author, version,
                 report_to)
 
@@ -321,13 +189,13 @@ def parse_runs(runs, return_type=str):
     elif isinstance(runs, (list, tuple)):
         pruns = runs
     else:
-        pruns = [runs,]
-        
+        pruns = [runs, ]
+
     if return_type is str:
         return ["r{:04d}".format(r) for r in pruns]
     else:
         return pruns
-                
+
 
 def run_prop_seq_from_path(filename):
     run = re.findall(r'.*r([0-9]{4}).*', filename)
@@ -355,37 +223,41 @@ def map_modules_from_files(filelist, file_inset, quadrants, modules_per_quad):
                 if file_infix in file:
                     module_files[name].put(file)
                     total_sequences += 1
-                    total_file_size += path.getsize(file)
-                    
+                    total_file_size += os.path.getsize(file)
+
     return module_files, mod_ids, total_sequences, total_file_size
 
 
-def gain_map_files(in_folder, runs, sequences, file_inset, quadrants, modules_per_quad):
+def gain_map_files(in_folder, runs, sequences, file_inset, quadrants,
+                   modules_per_quad):
     total_sequences = 0
     total_file_size = 0
     gain_mapped_files = OrderedDict()
     for gain, run in runs.items():
         ginfolder = "{}/{}".format(in_folder, run)
-        dirlist = listdir(ginfolder)
+        dirlist = os.listdir(ginfolder)
         file_list = []
         for entry in dirlist:
-            #only h5 file
+            # only h5 file
             abs_entry = "{}/{}".format(ginfolder, entry)
-            if path.isfile(abs_entry) and path.splitext(abs_entry)[1] == ".h5":
+            if os.path.isfile(abs_entry) and os.path.splitext(abs_entry)[
+                1] == ".h5":
 
                 if sequences is None:
                     file_list.append(abs_entry)
                 else:
                     for seq in sequences:
                         if "{:05d}.h5".format(seq) in abs_entry:
-                            file_list.append(path.abspath(abs_entry))
-                            
-        mapped_files, mod_ids, seq, fs = map_modules_from_files(file_list, file_inset,
-                                                                quadrants, modules_per_quad)
+                            file_list.append(os.path.abspath(abs_entry))
+
+        mapped_files, mod_ids, seq, fs = map_modules_from_files(file_list,
+                                                                file_inset,
+                                                                quadrants,
+                                                                modules_per_quad)
         total_sequences += seq
         total_file_size += fs
         gain_mapped_files[gain] = mapped_files
-    return gain_mapped_files, total_sequences, total_file_size/1e9
+    return gain_mapped_files, total_sequences, total_file_size / 1e9
 
 
 # from https://github.com/jupyter/notebook/issues/1000
@@ -405,6 +277,7 @@ try:  # Python 3
 except ImportError:  # Python 2
     import warnings
     from IPython.utils.shimmodule import ShimWarning
+
     with warnings.catch_warnings():
         warnings.simplefilter("ignore", category=ShimWarning)
         from IPython.html.notebookapp import list_running_servers
@@ -427,105 +300,123 @@ def get_notebook_name():
                     return relative_path
     except:
         return os.environ.get("CAL_NOTEBOOK_NAME", "Unknown Notebook")
-    
-    
+
+
 def get_dir_creation_date(directory, run):
     import os
     import datetime
-    creation_time = os.stat("{}/r{:04d}".format(directory, run)).st_ctime 
+    creation_time = os.stat("{}/r{:04d}".format(directory, run)).st_ctime
     creation_time = datetime.datetime.fromtimestamp(creation_time)
-    return(creation_time)
+    return (creation_time)
 
 
 def get_dir_creation_date(directory, run):
     import os
     import datetime
-    creation_time = os.stat("{}/r{:04d}".format(directory, run)).st_mtime 
+    creation_time = os.stat("{}/r{:04d}".format(directory, run)).st_mtime
     creation_time = datetime.datetime.fromtimestamp(creation_time)
-    return(creation_time)
+    return (creation_time)
 
 
-already_printed = {}
-def get_from_db(device, constant, condition, empty_constant,
-                         cal_db_interface, creation_time = None,
-                         verbosity=1, timeout=30000):
-    """
-    Return calibration constants and metadata requested from CalDB
-
-    :param device: Instance of detector
-    :param constant: Calibration constant known for given detector
-    :param condition: Calibration condition
-    :param empty_constant: Constant to be returned in case of failure.
-    :param cal_db_interface: Interface string, e.g. "tcp://max-exfl016:8015"
-    :param creation_time: Latest time for constant to be created
-    :param verbosity: Level of verbosity (0 - silent)
-    :param timeout: Timeout for zmq request
-    :return: Calibration constant, metadata
-    """
-    from iCalibrationDB import ConstantMetaData, Constants, Conditions, Detectors, Versions
-    
+def get_random_db_interface(cal_db_interface):
+    if "#" in cal_db_interface:
+        prot, serv, ran = cal_db_interface.split(":")
+        r1, r2 = ran.split("#")
+        return ":".join([prot, serv, str(np.random.randint(int(r1), int(r2)))])
+    return cal_db_interface
+
+
+all_ready_printed = {}
+
+
+def get_constant_from_db(device, constant, condition, empty_constant,
+                         cal_db_interface, creation_time=None,
+                         print_once=True, timeout=120000, ntries=120):
+    from iCalibrationDB import ConstantMetaData, Versions
+    sleep(np.random.randint(0, 60))
     if device:
-        metadata = ConstantMetaData()        
+        metadata = ConstantMetaData()
         metadata.calibration_constant = constant
+        metadata.calibration_constant_version = Versions.Now(device)
         metadata.detector_condition = condition
         if creation_time is None:
             metadata.calibration_constant_version = Versions.Now(device=device)
+            while ntries > 0:
+                this_interface = get_random_db_interface(cal_db_interface)
+                try:
+                    metadata.retrieve(this_interface, timeout=6000000,
+                                      meta_only=True)
+                    break
+                except:
+                    ntries -= 1
         else:
-            metadata.calibration_constant_version = Versions.Timespan(device=device,
-                                                                      start=creation_time)
-            creation_time = creation_time.isoformat()
-
-        try:
-            metadata.retrieve(cal_db_interface, when=creation_time, timeout=timeout)
-            if verbosity > 0:
-                if constant.name not in already_printed or verbosity > 1:
-                    already_printed[constant.name] = True
-                    print("{} was injected on: {}".format(constant.name,
-                                                          metadata.calibration_constant_version.begin_at))
-            return constant.data, metadata
-        except Exception as e:
-            if verbosity > 0:
-                print(e)
-            return empty_constant, metadata
+            metadata.calibration_constant_version = Versions.Timespan(
+                device=device,
+                start=creation_time)
+
+            while ntries > 0:
+                this_interface = get_random_db_interface(cal_db_interface)
+                try:
+                    metadata.retrieve(this_interface,
+                                      when=creation_time.isoformat(),
+                                      timeout=6000000, meta_only=True)
+                    break
+                except:
+                    ntries -= 1
+        if ntries > 0:
+            if constant.name not in all_ready_printed or not print_once:
+                all_ready_printed[constant.name] = True
+                print("{} was injected on: {}".format(constant.name,
+                                                      metadata.calibration_constant_version.begin_at))
+            return constant.data
+        else:
+            return empty_constant
     else:
-        return empty_constant, None
+        return empty_constant
 
 
-def get_constant_from_db(device, constant, condition, empty_constant,
-                         cal_db_interface, creation_time=None,
-                         print_once=True, timeout=30000):
-    """
-    Return calibration constants requested from CalDB
-    """
-    data, _ = get_from_db(device, constant, condition, empty_constant,
-                               cal_db_interface, creation_time,
-                               int(print_once), timeout)
-
-    return data
-
-
-def tex_escape(text):
-    """
-    Escape latex special characters found in the text
+def get_constant_from_db_and_time(device, constant, condition, empty_constant,
+                                  cal_db_interface, creation_time=None,
+                                  print_once=True, timeout=120000, ntries=120):
+    from iCalibrationDB import ConstantMetaData, Versions
+    sleep(np.random.randint(0, 60))
+    if device:
+        metadata = ConstantMetaData()
+        metadata.calibration_constant = constant
+        metadata.calibration_constant_version = Versions.Now(device)
+        metadata.detector_condition = condition
+        if creation_time is None:
+            metadata.calibration_constant_version = Versions.Now(device=device)
+            while ntries > 0:
+                this_interface = get_random_db_interface(cal_db_interface)
+                try:
+                    metadata.retrieve(this_interface, timeout=6000000,
+                                      meta_only=True)
+                    break
+                except:
+                    ntries -= 1
 
-    :param text: a plain text message
-    :return: the message escaped to appear correctly in LaTeX
-    """
-    conv = {
-        '&': r'\&',
-        '%': r'\%',
-        '$': r'\$',
-        '#': r'\#',
-        '_': r'\_',
-        '{': r'\{',
-        '}': r'\}',
-        '~': r'\textasciitilde{}',
-        '^': r'\^{}',
-        '\\': r'\textbackslash{}',
-        '<': r'\textless{}',
-        '>': r'\textgreater{}',
-    }
-
-    key_list = sorted(conv.keys(), key=lambda item: - len(item))
-    regex = re.compile('|'.join(re.escape(str(key)) for key in key_list))
-    return regex.sub(lambda match: conv[match.group()], text)
+        else:
+            metadata.calibration_constant_version = Versions.Timespan(
+                device=device,
+                start=creation_time)
+            while ntries > 0:
+                this_interface = get_random_db_interface(cal_db_interface)
+                try:
+                    metadata.retrieve(this_interface,
+                                      when=creation_time.isoformat(),
+                                      timeout=6000000, meta_only=True)
+                    break
+                except:
+                    ntries -= 1
+
+        if ntries > 0:
+            if constant.name not in all_ready_printed or not print_once:
+                all_ready_printed[constant.name] = True
+                print("{} was injected on: {}".format(constant.name,
+                                                      metadata.calibration_constant_version.begin_at))
+            return constant.data, metadata.calibration_constant_version.begin_at
+        else:
+            return empty_constant, None
+    else:
+        return empty_constant, None