diff --git a/cal_tools/cal_tools/agipdlib.py b/cal_tools/cal_tools/agipdlib.py
index 36d2d404454151a6540aea1e6cdaf2b45549bfc1..f9f6d7bd394b30cde7274c78339fb7ca9d19b0e5 100644
--- a/cal_tools/cal_tools/agipdlib.py
+++ b/cal_tools/cal_tools/agipdlib.py
@@ -76,7 +76,8 @@ class AgipdCorrections:
                  cal_det_instance="AGIPD1M1", karabo_data_mode=False,
                  force_hg_if_below=None, force_mg_if_below=None,
                  mask_noisy_adc=False, adjust_mg_baseline=False,
-                 acquisition_rate=None):
+                 acquisition_rate=None, dont_zero_nans=False,
+                 dont_zero_orange=False):
         """
         Initialize an AgipdCorrections Class
 
@@ -148,6 +149,9 @@ class AgipdCorrections:
         self.adjust_mg_baseline = adjust_mg_baseline
         self.mg_bl_adjust = 0
         self.acquisition_rate = acquisition_rate
+        self.dont_zero_nans = dont_zero_nans
+        self.dont_zero_orange = dont_zero_orange
+        self.valid_indices = None
 
     def get_iteration_range(self):
         """Returns a range expression over which to iterate in chunks
@@ -999,14 +1003,16 @@ class AgipdCorrections:
 
         # create a bad pixel mask for the data
         # we add any non-finite values to the mask
-        bidx = ~np.isfinite(im)
-        im[bidx] = 0
-        msk[bidx] |= BadPixels.VALUE_IS_NAN.value
+        if not self.dont_zero_nans:
+            bidx = ~np.isfinite(im)
+            im[bidx] = 0
+            msk[bidx] |= BadPixels.VALUE_IS_NAN.value
 
         # and such with have unrealistically high and low pixel values
-        bidx = (im < -1e7) | (im > 1e7)
-        im[bidx] = 0
-        msk[bidx] |= BadPixels.VALUE_OUT_OF_RANGE.value
+        if not self.dont_zero_orange:
+            bidx = (im < -1e7) | (im > 1e7)
+            im[bidx] = 0
+            msk[bidx] |= BadPixels.VALUE_OUT_OF_RANGE.value
 
         # include pixels with zero standard deviation in the dataset into
         # the mask
@@ -1070,7 +1076,7 @@ class AgipdCorrections:
         """ Return the indices of valid data
         """
         agipd_base = self.idx_base
-        print(agipd_base)
+
         if self.index_v == 2:
             count = np.squeeze(self.infile[agipd_base + "image/count"])
             first = np.squeeze(self.infile[agipd_base + "image/first"])
@@ -1082,9 +1088,22 @@ class AgipdCorrections:
             lowok = (idxtrains > medianTrain - 1e4)
             highok = (idxtrains < medianTrain + 1e4)
             valid &= lowok & highok
+            
+            uq, fidxv, cntsv = np.unique(idxtrains,
+                                         return_index=True,
+                                         return_counts=True)
+            valid &= cntsv == 1  # filter out double trains
             self.valid = valid
             last_index = int(first[valid][-1] + count[valid][-1])
             first_index = int(first[valid][0])
+            
+            # do actual validity filtering:
+            validc, validf = count[valid], first[valid]
+            valid_indices = np.concatenate([np.arange(validf[i],
+                                                      validf[i]+validc[i])
+                                            for i in range(validf.size)],
+                                            axis=0)
+            self.valid_indices = np.squeeze(valid_indices).astype(np.int32)
 
         elif self.index_v == 1:
             status = np.squeeze(self.infile[agipd_base + "image/status"])
@@ -1119,8 +1138,11 @@ class AgipdCorrections:
         last_index = self.last_index
         max_cells = self.max_cells
         agipd_base = self.agipd_base
-        allcells = self.infile[agipd_base + "image/cellId"]
-        allcells = np.squeeze(allcells[first_index:last_index, ...])
+        allcells = np.squeeze(self.infile[agipd_base + "image/cellId"])
+        if self.valid_indices is not None:
+            allcells = allcells[self.valid_indices]
+        else:
+            allcells = allcells[first_index:last_index]
 
         single_image = self.infile[agipd_base + "image/data"][first_index, ...]
         single_image = np.array(single_image)
@@ -1129,7 +1151,10 @@ class AgipdCorrections:
         if np.count_nonzero(can_calibrate) == 0:
             return
         allcells = allcells[can_calibrate]
-        firange = np.arange(first_index, last_index)
+        if self.valid_indices is not None:
+            firange = np.arange(first_index, last_index)
+        else:
+            firange = self.valid_indices
         firange = firange[can_calibrate]
 
         self.oshape = (firange.size if not self.il_mode else firange.size // 2,
@@ -1152,6 +1177,7 @@ class AgipdCorrections:
         if self.il_mode:
             firange = firange[0::2]
         alltrains = self.infile[agipd_base + "image/trainId"]
+        
         alltrains = np.squeeze(
             alltrains[first_index:last_index, ...])
 
diff --git a/cal_tools/cal_tools/tools.py b/cal_tools/cal_tools/tools.py
index 85f3e167973f21860a516fc2fc17207266604921..ab242cc61052cf904d5ca115694d5eb89e1b6ea8 100644
--- a/cal_tools/cal_tools/tools.py
+++ b/cal_tools/cal_tools/tools.py
@@ -297,9 +297,9 @@ def make_titlepage(sphinx_path, project, data_path, version):
         title_tmp = Template(file_.read())
 
     with open("{}/titlepage.tex.txt".format(sphinx_path), "w+") as mf:
-        mf.write(dedent(title_tmp.render(project=project,
-                                         data_path=data_path,
-                                         version=version)))
+        mf.write(dedent(title_tmp.render(project=tex_escape(project),
+                                         data_path=tex_escape(data_path),
+                                         version=tex_escape(version))))
 
 
 def finalize(joblist, finaljob, run_path, out_path, project, calibration,
@@ -461,7 +461,8 @@ already_printed = {}
 
 def get_from_db(device, constant, condition, empty_constant,
                 cal_db_interface, creation_time=None,
-                verbosity=1, timeout=30000, ntries=120, meta_only=True):
+                verbosity=1, timeout=30000, ntries=120, meta_only=True,
+                version_info=False):
     """
     Return calibration constants and metadata requested from CalDB
 
@@ -480,30 +481,44 @@ def get_from_db(device, constant, condition, empty_constant,
     """
     from iCalibrationDB import ConstantMetaData, Versions
     import zmq
-
+    
+    if version_info:
+        meta_only = False
+    
     if device:
         metadata = ConstantMetaData()
         metadata.calibration_constant = constant
         metadata.detector_condition = condition
+        when = None
         if creation_time is None:
-            metadata.calibration_constant_version = Versions.Now(device=device)
+            metadata.calibration_constant_version = Versions.Now(
+                                                    device=device)
         else:
+            if hasattr(creation_time, 'isoformat'):
+                when = creation_time.isoformat()
+
             metadata.calibration_constant_version = Versions.Timespan(
-                device=device,
-                start=creation_time)
+                                                    device=device,
+                                                    start=creation_time)
 
         while ntries > 0:
+            
             this_interface = get_random_db_interface(cal_db_interface)
             try:
-                metadata.retrieve(this_interface, timeout=timeout,
-                                  meta_only=meta_only)
+                r = metadata.retrieve(this_interface, timeout=timeout,
+                                      when=when, meta_only=meta_only,
+                                      version_info=version_info)
+                if version_info:
+                    return r
                 break
             except zmq.error.Again:
                 ntries -= 1
+                sleep(np.random.randint(30))
             except Exception as e:
                 if verbosity > 0:
                     print(e)
                 ntries = 0
+                break
 
         if ntries > 0:
             if verbosity > 0:
diff --git a/notebooks/AGIPD/AGIPD_Correct_and_Verify.ipynb b/notebooks/AGIPD/AGIPD_Correct_and_Verify.ipynb
index 5d65241159bc13bd572877ad546739d966bc21b2..d0c1cb0def4043d699f2989732e25ba38fded598 100644
--- a/notebooks/AGIPD/AGIPD_Correct_and_Verify.ipynb
+++ b/notebooks/AGIPD/AGIPD_Correct_and_Verify.ipynb
@@ -18,25 +18,24 @@
     "ExecuteTime": {
      "end_time": "2019-02-21T11:30:06.730220Z",
      "start_time": "2019-02-21T11:30:06.658286Z"
-    },
-    "collapsed": true
+    }
    },
    "outputs": [],
    "source": [
-    "in_folder = \"/gpfs/exfel/exp/SPB/201901/p002480/raw/\" # the folder to read data from, required\n",
-    "run = 11 # runs to process, required\n",
-    "out_folder =  \"/gpfs/exfel/exp/SPB/201901/p002480/proc/\"  # the folder to output to, required\n",
-    "calfile =  \"/gpfs/exfel/data/scratch/haufs/agipd_on_demand/agipd_store.h5\" # path to calibration file. Leave empty if all data should come from DB\n",
+    "in_folder = \"/gpfs/exfel/exp/MID/201931/p900090/raw/\" # the folder to read data from, required\n",
+    "run = 5 # runs to process, required\n",
+    "out_folder =  \"/gpfs/exfel/exp/MID/201931/p900090/proc/\"  # the folder to output to, required\n",
+    "calfile =  \"/gpfs/exfel/data/scratch/haufs/agipd_on_demand/agipd_store_mid.h5\" # path to calibration file. Leave empty if all data should come from DB\n",
     "sequences =  [-1] # sequences to correct, set to -1 for all, range allowed\n",
     "mem_cells = 0 # number of memory cells used, set to 0 to automatically infer\n",
     "interlaced = False # whether data is in interlaced layout\n",
     "overwrite = True # set to True if existing data should be overwritten\n",
     "no_relative_gain = False # do not do relative gain correction\n",
-    "cluster_profile = \"noDB5\"\n",
+    "cluster_profile = \"noDB\"\n",
     "max_pulses = 500\n",
     "local_input = False\n",
     "bias_voltage = 300\n",
-    "cal_db_interface = \"tcp://max-exfl016:8020#8025\" # the database interface to use\n",
+    "cal_db_interface = \"tcp://max-exfl016:8015#8045\" # the database interface to use\n",
     "use_dir_creation_date = True # use the creation data of the input dir for database queries\n",
     "sequences_per_node = 2 # number of sequence files per cluster node if run as slurm job, set to 0 to not run SLURM parallel\n",
     "photon_energy = 9.2 # photon energy in keV\n",
@@ -53,12 +52,14 @@
     "max_cells_db = 0 # set to a value different than 0 to use this value for DB queries\n",
     "chunk_size_idim = 1  # chunking size of imaging dimension, adjust if user software is sensitive to this.\n",
     "creation_date_offset = \"00:00:00\" # add an offset to creation date, e.g. to get different constants\n",
-    "instrument = \"SPB\"  # the instrument the detector is installed at, required\n",
+    "instrument = \"MID\"  # the instrument the detector is installed at, required\n",
     "force_hg_if_below = 1000 # set to a value other than 0 to force a pixel into high gain if it's high gain offset subtracted value is below this threshold\n",
     "force_mg_if_below = 1000 # set to a value other than 0 to force a pixel into medium gain if it's medium gain offset subtracted value is below this threshold\n",
     "mask_noisy_adc = 0.25 # set to a value other than 0 and below 1 to mask entire ADC if fraction of noisy pixels is above\n",
     "adjust_mg_baseline = False # adjust medium gain baseline to match highest high gain value\n",
     "acq_rate = 0. # the detector acquisition rate, use 0 to try to auto-determine\n",
+    "dont_zero_nans = False # do not zero NaN values in corrected data\n",
+    "dont_zero_orange = False # do not zero very negative and very large values\n",
     "\n",
     "def balance_sequences(in_folder, run, sequences, sequences_per_node):\n",
     "    import glob\n",
@@ -89,8 +90,7 @@
     "ExecuteTime": {
      "end_time": "2019-02-21T11:30:07.086286Z",
      "start_time": "2019-02-21T11:30:06.929722Z"
-    },
-    "collapsed": false
+    }
    },
    "outputs": [],
    "source": [
@@ -181,8 +181,7 @@
     "ExecuteTime": {
      "end_time": "2019-02-21T11:30:07.263445Z",
      "start_time": "2019-02-21T11:30:07.217070Z"
-    },
-    "collapsed": true
+    }
    },
    "outputs": [],
    "source": [
@@ -241,8 +240,7 @@
     "ExecuteTime": {
      "end_time": "2019-02-21T11:30:07.974174Z",
      "start_time": "2019-02-21T11:30:07.914832Z"
-    },
-    "collapsed": true
+    }
    },
    "outputs": [],
    "source": [
@@ -308,8 +306,7 @@
     "ExecuteTime": {
      "end_time": "2019-02-21T11:30:08.870802Z",
      "start_time": "2019-02-21T11:30:08.826285Z"
-    },
-    "collapsed": false
+    }
    },
    "outputs": [],
    "source": [
@@ -342,8 +339,7 @@
     "ExecuteTime": {
      "end_time": "2019-02-21T11:30:16.057429Z",
      "start_time": "2019-02-21T11:30:10.082114Z"
-    },
-    "collapsed": false
+    }
    },
    "outputs": [],
    "source": [
@@ -353,7 +349,8 @@
     "                   bins_gain_vs_signal, bins_signal_low_range, bins_signal_high_range,\n",
     "                   bins_dig_gain_vs_signal, max_pulses, dbparms, fileparms, nodb, chunk_size_idim,\n",
     "                   special_opts, il_mode, loc, dinstance, force_hg_if_below, force_mg_if_below,\n",
-    "                   mask_noisy_adc, adjust_mg_baseline, acq_rate, inp):\n",
+    "                   mask_noisy_adc, adjust_mg_baseline, acq_rate, dont_zero_nans, dont_zero_orange,\n",
+    "                   inp):\n",
     "    print(\"foo\")\n",
     "    import numpy as np\n",
     "    import copy\n",
@@ -445,7 +442,8 @@
     "                                      h5_index_path=\"INDEX/{}/DET/{{}}CH0:xtdf/\".format(loc),\n",
     "                                      cal_det_instance=dinstance, force_hg_if_below=force_hg_if_below,\n",
     "                                      force_mg_if_below=force_mg_if_below, mask_noisy_adc=mask_noisy_adc,\n",
-    "                                      adjust_mg_baseline=adjust_mg_baseline, acquisition_rate=acq_rate)\n",
+    "                                      adjust_mg_baseline=adjust_mg_baseline, acquisition_rate=acq_rate,\n",
+    "                                      dont_zero_nans=dont_zero_nans, dont_zero_orange=dont_zero_orange)\n",
     "        \n",
     "        blc_noise, blc_noise_threshold, blc_hist, match_asics, corr_asic_diag, melt_snow = special_opts\n",
     "        agipd_corr.baseline_corr_using_noise = blc_noise\n",
@@ -466,7 +464,7 @@
     "        print(\"Initialized constants\")\n",
     "        \n",
     "        for irange in agipd_corr.get_iteration_range():\n",
-    "            \n",
+    "           \n",
     "            agipd_corr.correct_agipd(irange)\n",
     "            print(\"Iterated\")\n",
     "            \n",
@@ -541,7 +539,7 @@
     "                    sequences_qm, bins_gain_vs_signal, bins_signal_low_range, bins_signal_high_range,\n",
     "                    bins_dig_gain_vs_signal, max_pulses, dbparms, fileparms, nodb, chunk_size_idim,\n",
     "                    special_opts, il_mode, loc, dinstance, force_hg_if_below, force_mg_if_below,\n",
-    "                    mask_noisy_adc, adjust_mg_baseline, acq_rate)\n",
+    "                    mask_noisy_adc, adjust_mg_baseline, acq_rate, dont_zero_nans, dont_zero_orange)\n",
     "         \n",
     "        r = view.map_sync(p, inp)\n",
     "        \n",
@@ -571,9 +569,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "\n",
@@ -590,8 +586,7 @@
     "ExecuteTime": {
      "end_time": "2019-02-18T17:28:51.765030Z",
      "start_time": "2019-02-18T17:28:51.714783Z"
-    },
-    "collapsed": true
+    }
    },
    "outputs": [],
    "source": [
@@ -636,8 +631,7 @@
     "ExecuteTime": {
      "end_time": "2019-02-18T17:28:52.857960Z",
      "start_time": "2019-02-18T17:28:51.767217Z"
-    },
-    "collapsed": false
+    }
    },
    "outputs": [],
    "source": [
@@ -651,8 +645,7 @@
     "ExecuteTime": {
      "end_time": "2019-02-18T17:28:53.690522Z",
      "start_time": "2019-02-18T17:28:52.860143Z"
-    },
-    "collapsed": false
+    }
    },
    "outputs": [],
    "source": [
@@ -687,8 +680,7 @@
     "ExecuteTime": {
      "end_time": "2019-02-18T17:28:54.370559Z",
      "start_time": "2019-02-18T17:28:53.691959Z"
-    },
-    "collapsed": false
+    }
    },
    "outputs": [],
    "source": [
@@ -702,8 +694,7 @@
     "ExecuteTime": {
      "end_time": "2019-02-18T17:31:51.668096Z",
      "start_time": "2019-02-18T17:31:51.529158Z"
-    },
-    "collapsed": false
+    }
    },
    "outputs": [],
    "source": [
@@ -730,7 +721,6 @@
      "end_time": "2019-02-18T17:28:57.327702Z",
      "start_time": "2019-02-18T17:28:54.377061Z"
     },
-    "collapsed": false,
     "scrolled": false
    },
    "outputs": [],
@@ -748,8 +738,7 @@
     "ExecuteTime": {
      "end_time": "2019-02-18T17:29:20.634480Z",
      "start_time": "2019-02-18T17:28:57.329231Z"
-    },
-    "collapsed": true
+    }
    },
    "outputs": [],
    "source": [
@@ -802,8 +791,7 @@
     "ExecuteTime": {
      "end_time": "2019-02-18T17:29:27.025667Z",
      "start_time": "2019-02-18T17:29:20.642029Z"
-    },
-    "collapsed": true
+    }
    },
    "outputs": [],
    "source": [
@@ -835,8 +823,7 @@
     "ExecuteTime": {
      "end_time": "2019-02-18T17:29:33.226396Z",
      "start_time": "2019-02-18T17:29:27.027758Z"
-    },
-    "collapsed": false
+    }
    },
    "outputs": [],
    "source": [
@@ -866,8 +853,7 @@
     "ExecuteTime": {
      "end_time": "2019-02-18T17:29:33.761015Z",
      "start_time": "2019-02-18T17:29:33.227922Z"
-    },
-    "collapsed": false
+    }
    },
    "outputs": [],
    "source": [
@@ -887,8 +873,7 @@
     "ExecuteTime": {
      "end_time": "2019-02-18T17:29:35.903487Z",
      "start_time": "2019-02-18T17:29:33.762568Z"
-    },
-    "collapsed": false
+    }
    },
    "outputs": [],
    "source": [
@@ -913,8 +898,7 @@
     "ExecuteTime": {
      "end_time": "2019-02-18T17:29:39.369686Z",
      "start_time": "2019-02-18T17:29:35.905152Z"
-    },
-    "collapsed": false
+    }
    },
    "outputs": [],
    "source": [
@@ -934,8 +918,7 @@
     "ExecuteTime": {
      "end_time": "2019-02-18T17:29:49.217848Z",
      "start_time": "2019-02-18T17:29:39.371232Z"
-    },
-    "collapsed": false
+    }
    },
    "outputs": [],
    "source": [
@@ -952,8 +935,7 @@
     "ExecuteTime": {
      "end_time": "2019-02-18T17:29:49.222484Z",
      "start_time": "2019-02-18T17:29:49.219933Z"
-    },
-    "collapsed": true
+    }
    },
    "outputs": [],
    "source": [
@@ -976,8 +958,7 @@
     "ExecuteTime": {
      "end_time": "2019-02-18T17:29:49.641675Z",
      "start_time": "2019-02-18T17:29:49.224167Z"
-    },
-    "collapsed": false
+    }
    },
    "outputs": [],
    "source": [
@@ -1006,8 +987,7 @@
     "ExecuteTime": {
      "end_time": "2019-02-18T17:29:49.651913Z",
      "start_time": "2019-02-18T17:29:49.643556Z"
-    },
-    "collapsed": false
+    }
    },
    "outputs": [],
    "source": [
@@ -1036,8 +1016,7 @@
     "ExecuteTime": {
      "end_time": "2019-02-18T17:29:50.086169Z",
      "start_time": "2019-02-18T17:29:49.653391Z"
-    },
-    "collapsed": false
+    }
    },
    "outputs": [],
    "source": [
@@ -1064,8 +1043,7 @@
     "ExecuteTime": {
      "end_time": "2019-02-18T17:29:51.686562Z",
      "start_time": "2019-02-18T17:29:50.088883Z"
-    },
-    "collapsed": false
+    }
    },
    "outputs": [],
    "source": [
@@ -1090,8 +1068,7 @@
     "ExecuteTime": {
      "end_time": "2019-02-18T17:29:53.662423Z",
      "start_time": "2019-02-18T17:29:51.688376Z"
-    },
-    "collapsed": false
+    }
    },
    "outputs": [],
    "source": [
@@ -1109,8 +1086,7 @@
     "ExecuteTime": {
      "end_time": "2019-02-18T17:29:55.483270Z",
      "start_time": "2019-02-18T17:29:53.664226Z"
-    },
-    "collapsed": false
+    }
    },
    "outputs": [],
    "source": [
@@ -1127,9 +1103,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
+   "metadata": {},
    "outputs": [],
    "source": []
   }
@@ -1150,7 +1124,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.6.6"
+   "version": "3.6.7"
   }
  },
  "nbformat": 4,
diff --git a/notebooks/Jungfrau/Jungfrau_Gain_Correct_and_Verify_NBC.ipynb b/notebooks/Jungfrau/Jungfrau_Gain_Correct_and_Verify_NBC.ipynb
index f3e5930395aa741fb98611469932e1b119cc2118..297962e5a8a180ae1d7b866a1e7b862bedfd0690 100644
--- a/notebooks/Jungfrau/Jungfrau_Gain_Correct_and_Verify_NBC.ipynb
+++ b/notebooks/Jungfrau/Jungfrau_Gain_Correct_and_Verify_NBC.ipynb
@@ -14,9 +14,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "in_folder = \"/gpfs/exfel/exp/FXE/201802/p002271/ra\" # the folder to read data from, required\n",
@@ -42,6 +40,7 @@
     "gmapfile = \"/gpfs/exfel/data/scratch/xcal/jfgain/gainMaps_M233.h5\" #temporary gain calibration file, not in the DB; leave empty if using DB\n",
     "memcells = 1 # number of memory cells\n",
     "karabo_id = \"FXE_XAD_JF1M\" # karabo prefix of Jungfrau devices\n",
+    "karabo_id_control = \"\"  # if control is on a different ID, set to empty string for using the same as for data\n",
     "receiver_id = \"RECEIVER\" # inset for receiver devices\n",
     "control_id = \"CONTROL\" # inset for control devices\n",
     "db_module = \"Jungfrau_M233\" # ID of module in calibration database\n",
@@ -70,18 +69,14 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
+   "metadata": {},
    "outputs": [],
    "source": []
   },
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "import warnings\n",
@@ -158,22 +153,23 @@
     "    creation_time = get_dir_creation_date(in_folder, run)\n",
     "    print(\"Using {} as creation time\".format(creation_time))\n",
     "    \n",
-    "cal_timeout = 600000 #ms"
+    "cal_timeout = 600000 #ms\n",
+    "\n",
+    "if karabo_id_control == \"\":\n",
+    "    karabo_id_control = karabo_id"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "import h5py\n",
     "if not manual_slow_data:\n",
     "    with h5py.File(fp_path_contr.format(0), 'r') as f:\n",
-    "        integration_time = int(f['/RUN/{}/DET/{}/exposureTime/value'.format(karabo_id, control_id)][()]*1e6)\n",
-    "        bias_voltage = int(np.squeeze(f['/RUN/{}/DET/{}/vHighVoltage/value'.format(karabo_id, control_id)])[0])\n",
+    "        integration_time = int(f['/RUN/{}/DET/{}/exposureTime/value'.format(karabo_id_control, control_id)][()]*1e6)\n",
+    "        bias_voltage = int(np.squeeze(f['/RUN/{}/DET/{}/vHighVoltage/value'.format(karabo_id_control, control_id)])[0])\n",
     "print(\"Integration time is {} us\".format(integration_time))\n",
     "print(\"Bias voltage is {} V\".format(bias_voltage))"
    ]
@@ -181,9 +177,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "dirlist = sorted(os.listdir(ped_dir))\n",
@@ -214,9 +208,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "import copy\n",
@@ -235,9 +227,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "def get_PSI_gainmaps(fname, dset):\n",
@@ -253,9 +243,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "## offset\n",
@@ -335,9 +323,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "# gain correction\n",
@@ -350,9 +336,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "def copy_and_sanitize_non_cal_data(infile, outfile, h5base):\n",
@@ -381,9 +365,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "import copy\n",
@@ -438,9 +420,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "def do_2d_plot(data, edges, y_axis, x_axis):\n",
@@ -459,9 +439,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "h, ex, ey = np.histogram2d(rim_data.flatten(), gim_data.flatten(),\n",
@@ -481,9 +459,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "fig = plt.figure(figsize=(20,10))\n",
@@ -506,9 +482,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "fig = plt.figure(figsize=(20,10))\n",
@@ -531,9 +505,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "fig = plt.figure(figsize=(20,10))\n",
@@ -554,9 +526,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "fig = plt.figure(figsize=(20,10))\n",
@@ -583,9 +553,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "fig = plt.figure(figsize=(20,10))\n",
@@ -606,9 +574,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "from cal_tools.enums import BadPixels\n",
@@ -632,9 +598,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "fig = plt.figure(figsize=(20,10))\n",
@@ -646,9 +610,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
+   "metadata": {},
    "outputs": [],
    "source": []
   }
@@ -669,7 +631,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.6.6"
+   "version": "3.6.7"
   }
  },
  "nbformat": 4,
diff --git a/notebooks/Jungfrau/Jungfrau_dark_analysis_all_gains_NBC.ipynb b/notebooks/Jungfrau/Jungfrau_dark_analysis_all_gains_NBC.ipynb
index a79590ee15f8357b521f6e7af8ab83c11d2506d3..236d45880a20e11b312402e89df878729ea136b0 100644
--- a/notebooks/Jungfrau/Jungfrau_dark_analysis_all_gains_NBC.ipynb
+++ b/notebooks/Jungfrau/Jungfrau_dark_analysis_all_gains_NBC.ipynb
@@ -14,9 +14,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "in_folder = '/gpfs/exfel/exp/SPB/201921/p002429/raw/'  # folder under which runs are located, required\n",
@@ -40,6 +38,7 @@
     "run_med = 0 # run number for G1 dark run, required\n",
     "run_low = 0 # run number for G2 dark run, required\n",
     "karabo_id = \"FXE_XAD_JF500K\"  # karabo prefix of Jungfrau devices\n",
+    "karabo_id_control = \"\"  # if control is on a different ID, set to empty string for using the same as for data\n",
     "receiver_id = \"RECEIVER\" # inset for receiver devices\n",
     "control_id = \"CONTROL\" # inset for control devices\n",
     "db_module = \"Jungfrau_M233\" # ID of module in calibration database\n",
@@ -50,9 +49,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "import warnings\n",
@@ -92,15 +89,16 @@
     "    creation_time = get_dir_creation_date(in_folder, run_high)\n",
     "    print(\"Using {} as creation time\".format(creation_time))\n",
     "    \n",
-    "offset_abs_threshold = [offset_abs_threshold_low, offset_abs_threshold_high]"
+    "offset_abs_threshold = [offset_abs_threshold_low, offset_abs_threshold_high]\n",
+    "\n",
+    "if karabo_id_control == \"\":\n",
+    "    karabo_id_control = karabo_id"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "noiseCal = xcal.NoiseCalculator(sensorSize, nCells=memoryCells, cores=cpuCores, parallel=is_parallel, gains=gains,\n",
@@ -110,9 +108,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "import h5py\n",
@@ -126,8 +122,8 @@
     "\n",
     "        if not manual_slow_data:\n",
     "            with h5py.File(fp_path.format(0), 'r') as f:\n",
-    "                integration_time = int(f['/RUN/{}/DET/{}/exposureTime/value'.format(karabo_id, control_id)][()]*1e6)\n",
-    "                bias_voltage = int(np.squeeze(f['/RUN/{}/DET/{}/vHighVoltage/value'.format(karabo_id, control_id)])[0])\n",
+    "                integration_time = int(f['/RUN/{}/DET/{}/exposureTime/value'.format(karabo_id_control, control_id)][()]*1e6)\n",
+    "                bias_voltage = int(np.squeeze(f['/RUN/{}/DET/{}/vHighVoltage/value'.format(karabo_id_control, control_id)])[0])\n",
     "        print(\"Integration time is {} us\".format(integration_time))\n",
     "        print(\"Bias voltage is {} V\".format(bias_voltage))\n",
     "\n",
@@ -176,7 +172,6 @@
    "cell_type": "code",
    "execution_count": null,
    "metadata": {
-    "collapsed": false,
     "scrolled": false
    },
    "outputs": [],
@@ -257,9 +252,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "def print_bp_entry(bp):\n",
@@ -273,9 +266,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "bad_pixels_map = np.zeros(noise_map.shape, np.uint32)\n",
@@ -310,9 +301,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "## offset\n",
@@ -401,9 +390,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "\n",
@@ -441,9 +428,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "is_log = True\n",
@@ -481,7 +466,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.6.6"
+   "version": "3.6.7"
   }
  },
  "nbformat": 4,