diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs
index d6830b9098a93c003892f05b66c2cc39b67cb36c..03b9eafb7d1ac8151ee55f2849eb2ed7265b237c 100644
--- a/.git-blame-ignore-revs
+++ b/.git-blame-ignore-revs
@@ -5,3 +5,6 @@
 # - `git config blame.ignoreRevsFile .git-blame-ignore-revs`
 #
 # Second option is a bit better as it'll work on the whole repo all the time
+
+#  fix/pre-commit-whitespace - Whitespace fixes
+e7dfadaf4e189ef0e0f67798e8984695111257e3
diff --git a/cal_tools/cal_tools/agipdlib.py b/cal_tools/cal_tools/agipdlib.py
index 9b40845e024ba0655bb835fac0b1e31be02016d1..212e5c64eb6231f042baf157281fff6db76e220a 100644
--- a/cal_tools/cal_tools/agipdlib.py
+++ b/cal_tools/cal_tools/agipdlib.py
@@ -279,17 +279,17 @@ class AgipdCorrections:
             f = h5py.File(file_name, 'r')
             group = f[agipd_base]["image"]
 
-            (_, first_index, last_index, 
+            (_, first_index, last_index,
              _, valid_indices) = self.get_valid_image_idx(idx_base, f)
 
             allcells = np.squeeze(group['cellId'])
             allpulses = np.squeeze(group['pulseId'])
-         
+
             firange = self.gen_valid_range(first_index, last_index,
                                            self.max_cells, allcells,
                                            allpulses, valid_indices,
                                            apply_sel_pulses)
-                                           
+
             n_img = firange.shape[0]
             data_dict['nImg'][0] = n_img
             if np.all(np.diff(firange) == 1):
@@ -382,7 +382,7 @@ class AgipdCorrections:
         Both corrections are iterative and requires 4 iterations.
 
         Correction is performed in chunks of (e.g. 512 images).
-        A complete array of data from one file 
+        A complete array of data from one file
         (256 trains, 352 cells) will take
         256 * 352 * 128 * 512 * 4 // 1024**3 = 22 Gb in memory
 
@@ -484,7 +484,7 @@ class AgipdCorrections:
             self.shared_dict[i_proc]['t0_rgain'][first:last] = \
                 rawgain / t0[cellid, ...]
             self.shared_dict[i_proc]['raw_data'][first:last] = np.copy(data)
-    
+
         # Often most pixels are in high-gain, so it's more efficient to
         # set the whole output block to zero than select the right pixels.
         gain[:] = 0
@@ -514,7 +514,7 @@ class AgipdCorrections:
 
     def baseline_correction(self, i_proc:int, first:int, last:int):
         """
-        Perform image-wise base-line shift correction for 
+        Perform image-wise base-line shift correction for
         data in shared memory via histogram or stripe
 
         :param first: Index of the first image to be corrected
@@ -635,9 +635,9 @@ class AgipdCorrections:
         # not just set to 0
         if self.corr_bools.get('blc_set_min'):
             data[(data < 0) & (gain == 1)] = 0
-            
+
         # Do xray correction if requested
-        # The slopes we have in our constants are already relative 
+        # The slopes we have in our constants are already relative
         # slopeFF = slopeFFpix/avarege(slopeFFpix)
         # To apply them we have to / not *
         if self.corr_bools.get("xray_corr"):
@@ -746,7 +746,7 @@ class AgipdCorrections:
         :param i_proc: the index of sharedmem for a given file/module
         :return n_img: number of images to correct
         """
-        
+
         data_dict = self.shared_dict[i_proc]
         n_img = data_dict['nImg'][0]
 
@@ -814,12 +814,12 @@ class AgipdCorrections:
 
         return first_pulse, last_pulse, pulse_step
 
-    def choose_selected_pulses(self, allpulses: np.array, 
+    def choose_selected_pulses(self, allpulses: np.array,
                                can_calibrate: np.array) -> np.array:
 
         """
         Choose given selected pulse from pulseId array of
-        raw data. The selected pulses range is validated then 
+        raw data. The selected pulses range is validated then
         used to add a booleans in can_calibrate and guide the
         later appliance.
 
@@ -830,7 +830,7 @@ class AgipdCorrections:
                                selected pulses
         """
 
-        (first_pulse, last_pulse, 
+        (first_pulse, last_pulse,
         pulse_step) = self.validate_selected_pulses(allpulses)
 
         # collect the pulses to be calibrated
@@ -853,9 +853,9 @@ class AgipdCorrections:
         return can_calibrate
 
     def gen_valid_range(self, first_index: int, last_index: int,
-                        max_cells: int, allcells: np.array, allpulses: np.array, 
+                        max_cells: int, allcells: np.array, allpulses: np.array,
                         valid_indices: Optional[np.array] = None,
-                        apply_sel_pulses: Optional[bool] = True 
+                        apply_sel_pulses: Optional[bool] = True
                         ) -> np.array:
         """ Validate the arrays of image.cellId and image.pulseId
         to check presence of data and to avoid empty trains.
@@ -871,7 +871,7 @@ class AgipdCorrections:
         :param valid_indices: validated indices of image.data
         :param apply_sel_pulses: A flag for applying selected pulses
                                  after validation for correction
-        :return firange: An array of validated image.data 
+        :return firange: An array of validated image.data
                          indices to correct
         # TODO: Ignore rows (32 pulse) of empty pulses even if
         common-mode is selected
@@ -1067,7 +1067,7 @@ class AgipdCorrections:
             rel_low gain = _rel_medium gain * 4.48
 
         :param cons_data: A dictionary for each retrieved constant value.
-        :param when: A dictionary for the creation time 
+        :param when: A dictionary for the creation time
                      of each retrieved constant.
         :param module_idx: A module_idx index
         :return:
@@ -1089,7 +1089,7 @@ class AgipdCorrections:
                 bpixels |= cons_data["BadPixelsFF"].astype(np.uint32)[...,
                                                                       :bpixels.shape[2],  # noqa
                                                                       None]
-            
+
             if when["SlopesFF"]: # Checking if constant was retrieved
 
                 slopesFF = cons_data["SlopesFF"]
@@ -1150,16 +1150,16 @@ class AgipdCorrections:
                 pc_med_m = slopesPC[..., :self.max_cells, 3]
                 pc_med_l = slopesPC[..., :self.max_cells, 4]
 
-                # calculate median for slopes  
+                # calculate median for slopes
                 pc_high_med = np.nanmedian(pc_high_m, axis=(0,1))
                 pc_med_med = np.nanmedian(pc_med_m, axis=(0,1))
                 # calculate median for intercepts:
                 pc_high_l_med = np.nanmedian(pc_high_l, axis=(0,1))
                 pc_med_l_med = np.nanmedian(pc_med_l, axis=(0,1))
-                
-                # sanitize PC data 
+
+                # sanitize PC data
                 # (it should be done already on the level of constants)
-                # In the following loop, 
+                # In the following loop,
                 # replace `nan`s across memory cells with
                 # the median value calculated previously.
                 # Then, values outside of the valid range (0.8 and 1.2)
diff --git a/cal_tools/cal_tools/agipdutils.py b/cal_tools/cal_tools/agipdutils.py
index 9533a65717676bef660795007a090214121847a5..347198785c517a8c4e6621466fa374de351ec3b6 100644
--- a/cal_tools/cal_tools/agipdutils.py
+++ b/cal_tools/cal_tools/agipdutils.py
@@ -175,11 +175,11 @@ def baseline_correct_via_noise(d, noise, g, threshold):
     the shift corrected data is returned.
 
     """
-    
+
     seln = (g == 0) & (d <= 50)
     h, e = np.histogram(d[seln], bins=210, range=(-2000, 100))
     c = (e[1:] + e[:-1]) / 2
-    
+
     try:
         cwtmatr = cwt(h, ricker, [noise, 3. * noise, 5. * noise])
     except:
diff --git a/cal_tools/cal_tools/enums.py b/cal_tools/cal_tools/enums.py
index a516dee2f55dbd8ece632c00f523d58a80b2f578..19420caeacf38580e808ee0da4a76c508e29e464 100644
--- a/cal_tools/cal_tools/enums.py
+++ b/cal_tools/cal_tools/enums.py
@@ -4,7 +4,7 @@ from enum import Enum
 class BadPixels(Enum):
     """ The European XFEL Bad Pixel Encoding
     """
-        
+
     OFFSET_OUT_OF_THRESHOLD  = 0b000000000000000000001 # bit 1
     NOISE_OUT_OF_THRESHOLD   = 0b000000000000000000010 # bit 2
     OFFSET_NOISE_EVAL_ERROR  = 0b000000000000000000100 # bit 3
@@ -26,12 +26,12 @@ class BadPixels(Enum):
     OVERSCAN                 = 0b001000000000000000000 # bit 19
     NON_SENSITIVE            = 0b010000000000000000000 # bit 20
     NON_LIN_RESPONSE_REGION  = 0b100000000000000000000 # bit 21
-    
-    
+
+
 class BadPixelsFF(Enum):
     """ The SLopesFF Bad Pixel Encoding
     """
-        
+
     FIT_FAILED               = 0b000000000000000000001 # bit 1
     CHI2_THRESHOLD           = 0b000000000000000000010 # bit 2
     NOISE_PEAK_THRESHOLD     = 0b000000000000000000100 # bit 3
@@ -41,11 +41,10 @@ class BadPixelsFF(Enum):
     BAD_DARK                 = 0b000000000000001000000 # bit 6
     NO_ENTRY                 = 0b000000000000010000000 # bit 7
     GAIN_DEVIATION           = 0b000000000000100000000 # bit 8
-    
-    
+
+
 class SnowResolution(Enum):
     """ An Enum specifying how to resolve snowy pixels
     """
     NONE = "none"
     INTERPOLATE = "interpolate"
-
diff --git a/cal_tools/cal_tools/lpdlib.py b/cal_tools/cal_tools/lpdlib.py
index ea78384d4736f866cbed7043aa78669659d6d843..716637a5261cbfddeb752758290644c0c1707c0c 100644
--- a/cal_tools/cal_tools/lpdlib.py
+++ b/cal_tools/cal_tools/lpdlib.py
@@ -68,7 +68,7 @@ class LpdCorrections:
             index section
         :param do_ff: perform flat field corrections
         :param correct_non_linear: perform non-linear transition region corr.
-        :param karabo_data_mode: set to true to use data iterated with karabo 
+        :param karabo_data_mode: set to true to use data iterated with karabo
             data
         """
         self.lpd_base = h5_data_path.format(channel)
@@ -261,19 +261,19 @@ class LpdCorrections:
 
         # correct offset
         im -= og
-        
+
         nlf = 0
         if self.mark_nonlin and self.linear_between is not None:
             for gl, lr in enumerate(self.linear_between):
-                
+
                 midx = (gain == gl) & ((im < lr[0]) | (im > lr[1]))
                 msk[midx] = BadPixels.NON_LIN_RESPONSE_REGION.value
                 numnonlin = np.count_nonzero(midx, axis=(1,2))
                 nlf += numnonlin
             nlf = nlf/float(im.shape[0] * im.shape[1])
-            
+
         # hacky way of smoothening transition region between med and low
-        
+
         cfac = 1
         if self.nlc_version == 1 and self.cnl:
             cfac = 0.314 * np.exp(-im * 0.001)
@@ -310,7 +310,7 @@ class LpdCorrections:
             cf = lin_exp_fun(x, cnl['m'], cnl['b'], cnl['A'], cnl['lam'],
                              cnl['c'])
             im[(gain == 2)] -= np.minimum(cf, 0.45) * x
-        
+
         # create bad pixels masks, here non-finite values
         bidx = ~np.isfinite(im)
         im[bidx] = 0
@@ -547,7 +547,7 @@ class LpdCorrections:
                                     dtype=np.uint16, fletcher32=True)
         self.outfile.create_dataset(lpdbase + "image/length", fsz,
                                     dtype=np.uint32, fletcher32=True)
-        
+
         if self.mark_nonlin:
             self.outfile.create_dataset(lpdbase + "image/nonLinear", fsz,
                                         dtype=np.float32, fletcher32=True)
@@ -590,9 +590,9 @@ class LpdCorrections:
               connect to
             * tcp://host:port_low#port_high to specify a port range from
               which a random port will be picked. E.g. specifying
-              
+
               tcp://max-exfl016:8015#8025
-              
+
               will randomly pick an address in the range max-exfl016:8015 and
               max-exfl016:8025.
 
diff --git a/cal_tools/cal_tools/metrology.py b/cal_tools/cal_tools/metrology.py
index 6c7e807d9051b4078412ccf5d767f24e1cd5d14e..6e94ad85c086a7506e88a4e6c8146b52a22c2092 100644
--- a/cal_tools/cal_tools/metrology.py
+++ b/cal_tools/cal_tools/metrology.py
@@ -7,28 +7,28 @@ from matplotlib import pylab as plt
 
 
 def getModulePosition(metrologyFile, moduleId):
-    """Position (in mm) of a module relative to the top left 
+    """Position (in mm) of a module relative to the top left
     corner of its quadrant. In case of tile-level positions,
-    the the position refers to the center of the top left 
+    the the position refers to the center of the top left
     pixel.
-    
+
     Args
     ----
-    
+
     metrologyFile : str
         Fully qualified path and filename of the metrology file
     moduleId : str
         Identifier of the module in question (e.g. 'Q1M2T03')
-        
+
     Returns
     -------
-    
-    ndarray: 
+
+    ndarray:
         (x, y)-Position of the module in its quadrant
-    
+
     Raises
     ------
-    
+
     ValueError: In case the moduleId contains invalid module
         identifieres
     """
@@ -38,11 +38,11 @@ def getModulePosition(metrologyFile, moduleId):
     #
     #   QXMYTZZ
     #
-    # where X, Y, and Z are digits. Q denotes the quadrant 
-    # (X = 1, ..., 4), M the supermodule (Y = 1, ..., 4) and T 
+    # where X, Y, and Z are digits. Q denotes the quadrant
+    # (X = 1, ..., 4), M the supermodule (Y = 1, ..., 4) and T
     # the tile (Z = 1, ..., 16; with leading zeros).
     modulePattern = re.compile(r'[QMT]\d+')
-    # Give the module identifier Q1M1T01, the moduleList splits this 
+    # Give the module identifier Q1M1T01, the moduleList splits this
     # into the associated quadrant, supermodule, and tile identifiers:
     # >>> print(moduleList)
     # ['Q1', 'M1', 'T01']
@@ -53,7 +53,7 @@ def getModulePosition(metrologyFile, moduleId):
     # >>> print(h5Keys)
     # ['Q1', 'Q1/M1', 'Q1/M1/T01']
     h5Keys = ['/'.join(moduleList[:idx+1]) for idx in range(len(moduleList))]
-    
+
     # Every module of the detector gives its position relative to
     # the top left corner of its parent structure. Every position
     # is stored in the positions array
@@ -83,17 +83,17 @@ def getModulePosition(metrologyFile, moduleId):
 def translateToModuleBL(tilePositions):
     """Tile coordinates within a supermodule with the
     origin in the bottom left corner.
-    
+
     Parameters
     ----------
-    
+
     tilePositions : ndarray
-        Tile positions as retrieved from the LPD metrology 
+        Tile positions as retrieved from the LPD metrology
         file. Must have shape (16, 2)
-        
+
     Returns
     -------
-    
+
     ndarray
         Tile positions relative to the bottom left corner.
     """
@@ -115,7 +115,7 @@ def translateToModuleBL(tilePositions):
     # In the clockwise order of LPD tiles, the 8th
     # tile in the list is the bottom left tile
     bottomLeft8th = np.asarray([0., moduleCoords[8][1]])
-    # Translate coordinates to the bottom left corner 
+    # Translate coordinates to the bottom left corner
     # of the bottom left tile
     bottomLeft = moduleCoords - bottomLeft8th
     return bottomLeft
@@ -124,44 +124,44 @@ def translateToModuleBL(tilePositions):
 def plotSupermoduleData(tileData, metrologyPositions, zoom=1., vmin=100., vmax=6000.):
     """Plots data of a supermodule with tile positions
     determined by the metrology data.
-    
+
     Parameters
     ----------
-    
+
     tileData : ndarray
-        Supermodule image data separated in individual tiles. 
+        Supermodule image data separated in individual tiles.
         Must have shape (16, 32, 128).
-        
-    metrologyPositions : ndarray 
-        Tile positions as retrieved from the metrology file. 
+
+    metrologyPositions : ndarray
+        Tile positions as retrieved from the metrology file.
         Must have shape (16, 2)
-        
+
     zoom : float, optional
         Can enlarge or decrease the size of the plot. Default = 1.
-        
+
     vmin, vmax : float, optional
         Value range. Default vmin=100., vmax=6000.
-        
+
     Returns
     -------
     matplotlib.Figure
-        Figure object containing the supermodule image        
+        Figure object containing the supermodule image
     """
     # Data needs to have 16 tiles, each with
     # 32x128 pixels
     assert tileData.shape == (16, 32, 128)
-    
+
     # Conversion coefficient, required since
     # matplotlib does its business in inches
     mmToInch = 1./25.4 # inch/mm
-    
+
     # Some constants
     numberOfTiles = 16
     numberOfRows = 8
     numberOfCols = 2
     tileWidth = 65.7 # in mm
     tileHeight = 17.7 # in mm
-    
+
     # Base width and height are given by spatial
     # extend of the modules. The constants 3.4 and 1
     # are estimated as a best guess for gaps between
@@ -169,26 +169,26 @@ def plotSupermoduleData(tileData, metrologyPositions, zoom=1., vmin=100., vmax=6
     figureWidth = zoom * numberOfCols*(tileWidth + 3.4)*mmToInch
     figureHeight = zoom * numberOfRows*(tileHeight + 1.)*mmToInch
     fig = plt.figure(figsize=(figureWidth, figureHeight))
-    
-    # The metrology file references module positions 
+
+    # The metrology file references module positions
     bottomRightCornerCoordinates = translateToModuleBL(metrologyPositions)
-    
+
     # The offset here accounts for the fact that there
     # might be negative x,y values
     offset = np.asarray(
-        [min(bottomRightCornerCoordinates[:, 0]), 
+        [min(bottomRightCornerCoordinates[:, 0]),
          min(bottomRightCornerCoordinates[:, 1])]
     )
-    
+
     # Account for blank borders in the plot
     borderLeft = 0.5 * mmToInch
     borderBottom = 0.5 * mmToInch
-    
+
     # The height and width of the plot remain
     # constant for a given supermodule
     width = zoom * 65.7 * mmToInch / (figureWidth - 2.*borderLeft)
     height = zoom * 17.7 * mmToInch / (figureHeight - 2.*borderBottom)
-    
+
     for i in range(numberOfTiles):
         # This is the top left corner of the tile with
         # respect to the top left corner of the supermodule
@@ -200,38 +200,38 @@ def plotSupermoduleData(tileData, metrologyPositions, zoom=1., vmin=100., vmax=6
         ax = fig.add_axes((ax0, ay0, width, height), frameon=False)
         # Do not display axes, tick markers or labels
         ax.tick_params(
-            axis='both', left='off', top='off', right='off', bottom='off', 
+            axis='both', left='off', top='off', right='off', bottom='off',
             labelleft='off', labeltop='off', labelright='off', labelbottom='off'
         )
         # Plot the image. If one wanted to have a colorbar
         # the img object would be needed to produce one
         img = ax.imshow(
-            tileData[i], 
-            interpolation='nearest', 
+            tileData[i],
+            interpolation='nearest',
             vmin=vmin, vmax=vmax
         )
-        
+
     return fig
 
 
 def splitChannelDataIntoTiles(channelData, clockwiseOrder=False):
     """Splits the raw channel data into indiviual tiles
-    
+
     Args
     ----
-    
+
     channelData : ndarray
         Raw channel data. Must have shape (256, 256)
-        
+
     clockwiseOrder : bool, optional
         If set to True, the sequence of tiles is given
         in the clockwise order starting with the top
         right tile (LPD standard). If set to false, tile
         data is returned in reading order
-        
+
     Returns
     -------
-    
+
     ndarray
         Same data, but reshaped into (12, 32, 128)
     """
@@ -240,8 +240,8 @@ def splitChannelDataIntoTiles(channelData, clockwiseOrder=False):
     orderedTiles = tiles.reshape(16, 32, 128)
     if clockwiseOrder:
         # Naturally, the tile data after splitting is in reading
-        # order (i.e. top left tile is first, top right tile is second, 
-        # etc.). The official LPD tile order however is clockwise, 
+        # order (i.e. top left tile is first, top right tile is second,
+        # etc.). The official LPD tile order however is clockwise,
         # starting with the top right tile. The following array
         # contains indices of tiles in reading order as they would
         # be iterated in clockwise order (starting from the top right)
@@ -253,22 +253,22 @@ def splitChannelDataIntoTiles(channelData, clockwiseOrder=False):
 
 def splitChannelDataIntoTiles2(channelData, clockwiseOrder=False):
     """Splits the raw channel data into indiviual tiles
-    
+
     Args
     ----
-    
+
     channelData : ndarray
         Raw channel data. Must have shape (256, 256)
-        
+
     clockwiseOrder : bool, optional
         If set to True, the sequence of tiles is given
         in the clockwise order starting with the top
         right tile (LPD standard). If set to false, tile
         data is returned in reading order
-        
+
     Returns
     -------
-    
+
     ndarray
         Same data, but reshaped into (12, 32, 128)
     """
@@ -277,8 +277,8 @@ def splitChannelDataIntoTiles2(channelData, clockwiseOrder=False):
     orderedTiles = np.moveaxis(tiles.reshape(16, 128, 32, channelData.shape[2]), 2, 1)
     if clockwiseOrder:
         # Naturally, the tile data after splitting is in reading
-        # order (i.e. top left tile is first, top right tile is second, 
-        # etc.). The official LPD tile order however is clockwise, 
+        # order (i.e. top left tile is first, top right tile is second,
+        # etc.). The official LPD tile order however is clockwise,
         # starting with the top right tile. The following array
         # contains indices of tiles in reading order as they would
         # be iterated in clockwise order (starting from the top right)
@@ -294,7 +294,7 @@ def returnPositioned2(geometry_file, modules, dquads):
     tile_order = [1, 2, 3, 4]
     cells = 0
     for sm, mn in modules:
-        
+
         position = np.asarray([getModulePosition(geometry_file,
                                                  'Q{}/M{:d}/T{:02d}'.format(
                                                      sm//4+1,
@@ -355,7 +355,7 @@ def positionFileList(filelist, datapath, geometry_file, quad_pos, nImages='all',
     all_intersected = None
     for file in files:
         ch = int(re.findall(r'.*-{}([0-9]+)-.*'.format(detector), file)[0])
-        
+
         try:
             with h5py.File(file, 'r') as f:
                 if trainIds is None:
@@ -369,18 +369,18 @@ def positionFileList(filelist, datapath, geometry_file, quad_pos, nImages='all',
                     counts = np.squeeze(f[cpath])
                     nzeros = counts != 0
                     tid = tid[nzeros]
-                    
+
                     intersection = np.intersect1d(tid, trainIds, assume_unique=True)
-                    
+
                     if intersection.size == 0:
                         continue
-                        
+
                     if all_intersected is None:
                         all_intersected = intersection
                     else:
                         all_intersected = np.intersect1d(all_intersected, intersection, assume_unique=True)
                     continue
-                    
+
                 if ch not in data:
                     data[ch] = np.moveaxis(np.moveaxis(d, 0, 2), 1, 0)
                 else:
@@ -388,7 +388,7 @@ def positionFileList(filelist, datapath, geometry_file, quad_pos, nImages='all',
         except Exception as e:
             print(file)
             print(e)
-            
+
     pcounts = None
     if trainIds is not None:
         for file in files:
@@ -396,7 +396,7 @@ def positionFileList(filelist, datapath, geometry_file, quad_pos, nImages='all',
 
             try:
                 with h5py.File(file, 'r') as f:
-                    
+
 
                     tid = np.squeeze(f["/INDEX/trainId"])
                     spath = datapath.replace("INSTRUMENT", "INDEX").format(ch).split("/")[:-1]
@@ -408,26 +408,26 @@ def positionFileList(filelist, datapath, geometry_file, quad_pos, nImages='all',
                     tid = tid[nzeros]
 
                     tid_to_use = np.in1d(tid, all_intersected)
-                    
-                    indices = []                                        
+
+                    indices = []
                     firsts = f[fpath][nzeros][tid_to_use]
                     counts = f[cpath][nzeros][tid_to_use]
-                    
+
                     if pcounts is None:
                         pcounts = counts
                     df = firsts[1]-firsts[0]
-                    
-                    for i in range(firsts.shape[0]):                        
+
+                    for i in range(firsts.shape[0]):
                         count = counts[i] if max_counts is None else max_counts
                         first = firsts[i]//df*count if not nwa else firsts[i]
-                        
+
                         indices += list(np.arange(first, first+count))
-                    
+
                     if len(indices) == 0:
-                        continue                        
+                        continue
                     indices = np.unique(np.sort(np.array(indices).astype(np.int)))
                     indices = indices[indices < f[datapath.format(ch)].shape[0]]
-                    
+
                     #if all contingous just use the range
                     #if np.allclose(indices[1:]-indices[:-1], 1):
                     d = np.squeeze(f[datapath.format(ch)][indices,:,:])
@@ -438,11 +438,11 @@ def positionFileList(filelist, datapath, geometry_file, quad_pos, nImages='all',
                     else:
                         data[ch] = np.concatenate(data[ch], np.moveaxis(np.moveaxis(d, 0, 2), 1, 0), axis=2)
             except Exception as e:
-                print(e)                       
-            
+                print(e)
+
     full_data = []
     dummy = next(iter(data.values()))
-    
+
     for i in range(16):
         if i in data:
             full_data.append((i, data[i]))
@@ -453,7 +453,7 @@ def positionFileList(filelist, datapath, geometry_file, quad_pos, nImages='all',
         return np.moveaxis(pos, 2, 0)
     else:
         return np.moveaxis(pos, 2, 0), all_intersected, pcounts
-    
+
 def matchedFileList(filelist, datapath, nImages='all', trainIds=None, nwa=False, max_counts=None):
     import glob
     detector = "LPD" if "LPD" in datapath else "AGIPD"
@@ -462,7 +462,7 @@ def matchedFileList(filelist, datapath, nImages='all', trainIds=None, nwa=False,
     all_intersected = None
     for file in files:
         ch = int(re.findall(r'.*-{}([0-9]+)-.*'.format(detector), file)[0])
-        
+
         try:
             with h5py.File(file, 'r') as f:
                 if trainIds is None:
@@ -476,18 +476,18 @@ def matchedFileList(filelist, datapath, nImages='all', trainIds=None, nwa=False,
                     counts = np.squeeze(f[cpath])
                     nzeros = counts != 0
                     tid = tid[nzeros]
-                    
+
                     intersection = np.intersect1d(tid, trainIds, assume_unique=True)
-                    
+
                     if intersection.size == 0:
                         continue
-                        
+
                     if all_intersected is None:
                         all_intersected = intersection
                     else:
                         all_intersected = np.intersect1d(all_intersected, intersection, assume_unique=True)
                     continue
-                    
+
                 if ch not in data:
                     data[ch] = np.moveaxis(np.moveaxis(d, 0, 2), 1, 0)
                 else:
@@ -495,7 +495,7 @@ def matchedFileList(filelist, datapath, nImages='all', trainIds=None, nwa=False,
         except Exception as e:
             print(file)
             print(e)
-            
+
     pcounts = None
     if trainIds is not None:
         for file in files:
@@ -503,7 +503,7 @@ def matchedFileList(filelist, datapath, nImages='all', trainIds=None, nwa=False,
 
             try:
                 with h5py.File(file, 'r') as f:
-                    
+
 
                     tid = np.squeeze(f["/INDEX/trainId"])
                     spath = datapath.replace("INSTRUMENT", "INDEX").format(ch).split("/")[:-1]
@@ -515,26 +515,26 @@ def matchedFileList(filelist, datapath, nImages='all', trainIds=None, nwa=False,
                     tid = tid[nzeros]
 
                     tid_to_use = np.in1d(tid, all_intersected)
-                    
-                    indices = []                                        
+
+                    indices = []
                     firsts = f[fpath][nzeros][tid_to_use]
                     counts = f[cpath][nzeros][tid_to_use]
-                    
+
                     if pcounts is None:
                         pcounts = counts
                     df = firsts[1]-firsts[0]
-                    
-                    for i in range(firsts.shape[0]):                        
+
+                    for i in range(firsts.shape[0]):
                         count = counts[i] if max_counts is None else max_counts
                         first = firsts[i]//df*count if not nwa else firsts[i]
-                        
+
                         indices += list(np.arange(first, first+count))
-                    
+
                     if len(indices) == 0:
-                        continue                        
+                        continue
                     indices = np.unique(np.sort(np.array(indices).astype(np.int)))
                     indices = indices[indices < f[datapath.format(ch)].shape[0]]
-                    
+
                     #if all contingous just use the range
                     #if np.allclose(indices[1:]-indices[:-1], 1):
                     d = np.squeeze(f[datapath.format(ch)][indices,:,:])
@@ -545,11 +545,11 @@ def matchedFileList(filelist, datapath, nImages='all', trainIds=None, nwa=False,
                     else:
                         data[ch] = np.concatenate(data[ch], np.moveaxis(np.moveaxis(d, 0, 2), 1, 0), axis=2)
             except Exception as e:
-                print(e)                       
-            
+                print(e)
+
     full_data = []
     dummy = next(iter(data.values()))
-    
+
     for i in range(16):
         if i in data:
             full_data.append((i, data[i]))
@@ -559,4 +559,4 @@ def matchedFileList(filelist, datapath, nImages='all', trainIds=None, nwa=False,
     if trainIds is None:
         return pos
     else:
-        return pos, all_intersected, pcounts
\ No newline at end of file
+        return pos, all_intersected, pcounts
diff --git a/cal_tools/cal_tools/plotting.py b/cal_tools/cal_tools/plotting.py
index 7eff8f3ed4be174a5c384ba791ffb6a33a14c7e6..d85cbd1716a887edcd37f7058915f25538d87f70 100644
--- a/cal_tools/cal_tools/plotting.py
+++ b/cal_tools/cal_tools/plotting.py
@@ -47,7 +47,7 @@ def show_overview(d, cell_to_preview, gain_to_preview, out_folder=None, infix=No
             else:
                 med = np.nanmedian(item[..., cell_to_preview])
             medscale = med
-            if med == 0:           
+            if med == 0:
                 medscale = 0.1
 
             bound = 0.2
diff --git a/cal_tools/cal_tools/pnccdlib.py b/cal_tools/cal_tools/pnccdlib.py
index 3d913a72a49dd6e436f924a648d8033415051784..ec8b92e7f65dfb21a09fa099df3a749d55833fc7 100644
--- a/cal_tools/cal_tools/pnccdlib.py
+++ b/cal_tools/cal_tools/pnccdlib.py
@@ -33,10 +33,10 @@ def extract_slow_data(karabo_id: str, karabo_da_control: str,
                 bias_voltage = abs(f[os.path.join(mdl_ctrl_path,
                                                   "DAQ_MPOD/u0voltage/value")][0])  # noqa
             if gain == 0.1:
-                gain = f[os.path.join(mdl_ctrl_path, 
+                gain = f[os.path.join(mdl_ctrl_path,
                                       "DAQ_GAIN/pNCCDGain/value")][0]
             if fix_temperature_top == 0.:
-                fix_temperature_top = f[os.path.join(ctrl_path, 
+                fix_temperature_top = f[os.path.join(ctrl_path,
                                                      "inputA/krdg/value")][0]
             if fix_temperature_bot == 0.:
                 fix_temperature_bot = f[os.path.join(ctrl_path,
@@ -53,5 +53,5 @@ def extract_slow_data(karabo_id: str, karabo_da_control: str,
               os.path.join(ctrl_path, "inputA/krdg/value"))
         print("fix_temperature_bot control h5path:",
               os.path.join(ctrl_path, "inputB/krdg/value"))
-    
-    return bias_voltage, gain, fix_temperature_top, fix_temperature_bot
\ No newline at end of file
+
+    return bias_voltage, gain, fix_temperature_top, fix_temperature_bot
diff --git a/cal_tools/cython/agipdalgs.pyx b/cal_tools/cython/agipdalgs.pyx
index e5457aef11a29a81ced4538e00365cf99096c88f..cacc0f549bbce04b4d25b74874488b0ce3cf6f3e 100644
--- a/cal_tools/cython/agipdalgs.pyx
+++ b/cal_tools/cython/agipdalgs.pyx
@@ -15,10 +15,10 @@ def histogram(cnp.ndarray[cnp.float32_t, ndim=2] data, range=(0,1), int bins=20,
     """
 
     cdef cnp.ndarray[cnp.float32_t, ndim=2] ret
-    cdef double min, max 
-    min = range[0] 
-    max = range[1] 
-         
+    cdef double min, max
+    min = range[0]
+    max = range[1]
+
     ret = np.zeros((bins,data.shape[1]), dtype=np.float32)
     cdef double bin_width = (max - min) / bins
     cdef double x
@@ -31,9 +31,9 @@ def histogram(cnp.ndarray[cnp.float32_t, ndim=2] data, range=(0,1), int bins=20,
         for i in xrange(data.shape[0]):
             x = (data[i,j] - min) / bin_width
             if 0.0 <= x < bins:
-                if weights is None: 
+                if weights is None:
                     ret[<int>x,j] += 1.0
-                else: 
+                else:
                     ret[<int>x,j] += weights[i,j]
     return ret, np.linspace(min, max, bins+1)
 
@@ -83,16 +83,16 @@ def gain_choose(cnp.ndarray[cnp.uint8_t, ndim=3] a, cnp.ndarray[cnp.float32_t, n
     cdef cnp.uint8_t v
     cdef cnp.ndarray[cnp.float32_t, ndim=3] out
     out = np.zeros_like(a, dtype=np.float32)
-    
+
     assert (<object>choices).shape == (3,) + (<object>a).shape
-    
+
     with nogil:
         for i in range(a.shape[0]):
             for j in range(a.shape[1]):
                 for k in range(a.shape[2]):
                     v = a[i, j, k]
                     out[i, j, k] = choices[v, i, j, k]
-    
+
     return out
 
 
@@ -104,16 +104,16 @@ def gain_choose_int(cnp.ndarray[cnp.uint8_t, ndim=3] a, cnp.ndarray[cnp.int32_t,
     cdef cnp.uint8_t v
     cdef cnp.ndarray[cnp.int32_t, ndim=3] out
     out = np.zeros_like(a, dtype=np.int32)
-    
+
     assert (<object>choices).shape == (3,) + (<object>a).shape
-    
+
     with nogil:
         for i in range(a.shape[0]):
             for j in range(a.shape[1]):
                 for k in range(a.shape[2]):
                     v = a[i, j, k]
                     out[i, j, k] = choices[v, i, j, k]
-    
+
     return out
 
 
@@ -130,12 +130,12 @@ def sum_and_count_in_range_asic(cnp.ndarray[float, ndim=4] arr, float lower, flo
     cdef float value
     cdef cnp.ndarray[unsigned long long, ndim=2] count
     cdef cnp.ndarray[double, ndim=2] sum_
-    
+
     # Drop axes -2 & -1 (pixel dimensions within each ASIC)
     out_shape = arr[:, :, 0, 0].shape
     count = np.zeros(out_shape, dtype=np.uint64)
     sum_ = np.zeros(out_shape, dtype=np.float64)
-    
+
     with nogil:
         for i in range(arr.shape[0]):
             for k in range(arr.shape[1]):
@@ -161,13 +161,13 @@ def sum_and_count_in_range_cell(cnp.ndarray[float, ndim=4] arr, float lower, flo
     cdef float value
     cdef cnp.ndarray[unsigned long long, ndim=2] count
     cdef cnp.ndarray[double, ndim=2] sum_
-    
+
     # Drop axes 0 & 1
     out_shape = arr[0, 0, :, :].shape
     count = np.zeros(out_shape, dtype=np.uint64)
     sum_ = np.zeros(out_shape, dtype=np.float64)
-    
-    
+
+
     with nogil:
         for i in range(arr.shape[0]):
             for k in range(arr.shape[1]):
diff --git a/docs/source/advanced.rst b/docs/source/advanced.rst
index 9f623ab8cc46ef589babef4d044c72341c43dc4c..07bfb1f9e846a36ee2271f4528a2b457381a0969 100644
--- a/docs/source/advanced.rst
+++ b/docs/source/advanced.rst
@@ -42,35 +42,35 @@ This can be useful to add user requests while running. For this:
 
 1. create a working copy of the notebook in question, and create a commit of the the
    production notebook to fall back to in case of problems::
-   
+
    git add production_notebook_NBC.py
    git commit -m "Known working version before edits"
    cp production_notebook_NBC.py production_notebook_TEST.py
-   
+
 2. add any feature there and *thouroughly* test them
 3. when you are happy with the results, copy them over into the production notebook and
    save.
- 
+
 .. warning::
 
     Live editing of correction notebooks is fully at your responsiblity. Do not do it
     if you are not 100% sure you know what you are doing.
-    
-4. If it fails, revert back to the original state, ideally via git:: 
 
-       git checkout HEAD -- production_notebook_NBC.py 
+4. If it fails, revert back to the original state, ideally via git::
+
+       git checkout HEAD -- production_notebook_NBC.py
 
 5. Any runs which did not correct do to failures of the live edit can then be relaunched
    manually, assuming the correction notebook allows run and overwrite paramters::
-   
+
        xfel-calibrate ...... --run XYZ,ZXY-YYS --overwrite
-  
-  
+
+
 Using a Parameter Generator Function
 ------------------------------------
 
 By default, the parameters to be exposed to the command line are deduced from the
-first code cell of the notebook, after resolving the notebook itself from the 
+first code cell of the notebook, after resolving the notebook itself from the
 detector and characterization type. For some applications it might be beneficial
 to define a context-specific parameter range within the same notebook, based on
 additional user input. This can be done via a parameter generation function which
@@ -82,7 +82,7 @@ is defined in one of the code cell::
         existing = set()
         def extract_parms(cls):
             args, varargs, varkw, defaults = inspect.getargspec(cls.__init__)
-            pList = []        
+            pList = []
             for i, arg in enumerate(args[1:][::-1]):
                 if arg in existing:
                     continue
@@ -90,7 +90,7 @@ is defined in one of the code cell::
                 existing.add(arg)
 
                 if i < len(defaults):
-                    default = defaults[::-1][i]                
+                    default = defaults[::-1][i]
                     if str(default).isdigit():
                         pList.append("{} = {}".format(arg, default))
                     elif default is None or default == "None":
@@ -108,21 +108,21 @@ is defined in one of the code cell::
                 parms = extract_parms(getattr(condition, dtype))
                 [all_conditions.add(p) for p in parms]
         return "\n".join(all_conditions)
-        
+
 
 .. note::
 
    Note how all imports are inlined, as the function is executed outside the
    notebook context.
-       
+
 In the example, the function generates a list of additional parameters depending
 on the `detector_instance` given. Here, `detector_instance` is defined in the first
-code cell the usual way. Any other parameters defined such, that have names matching 
+code cell the usual way. Any other parameters defined such, that have names matching
 those of the generator function signature are passed to this function. The function
 should then return a string containing additional code to be appended to the first
 code cell.
 
-To make use of this functionality, the parameter generator function needs to be 
+To make use of this functionality, the parameter generator function needs to be
 configured in `notebooks.py`, e.g. ::
 
     ...
@@ -136,7 +136,7 @@ configured in `notebooks.py`, e.g. ::
         },
     }
     ...
-       
+
 To generically query which parameters are defined in the first code cell, the
 code execution history feature of iPython can be used::
 
@@ -156,6 +156,6 @@ code execution history feature of iPython can be used::
             parms[n] = str(v) if not isinstance(v, str) else v
         if parms[n] == "None" or parms[n] == "'None'":
             parms[n] = None
-                      
+
 This will create a dictionary `parms` which contains all parameters either
-as `float` or `str` values.
\ No newline at end of file
+as `float` or `str` values.
diff --git a/docs/source/conf.py b/docs/source/conf.py
index 23c008eba4f6ed9c3cf63b4d03b4cb01668b2c73..693a632c479a8138ec0597535dd386458686d595 100644
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -385,7 +385,7 @@ from nbconvert import RSTExporter
 from xfel_calibrate import notebooks
 
 rst_exporter = RSTExporter()
-    
+
 with open("available_notebooks.rst", "w") as f:
     f.write(dedent("""
             .. _available_notebooks:
@@ -395,8 +395,8 @@ with open("available_notebooks.rst", "w") as f:
 
             The following notebooks are currently integrated into the European XFEL
             Offline Calibration tool chain.
-            
-            
+
+
             """))
 
     for detector in sorted(notebooks.notebooks.keys()):
@@ -404,10 +404,10 @@ with open("available_notebooks.rst", "w") as f:
         f.write("{}\n".format(detector))
         f.write("{}\n".format("-"*len(detector)))
         f.write("\n")
-        
+
         for caltype in sorted(values.keys()):
             data = values[caltype]
-            
+
             nbpath = os.path.abspath("{}/../../../{}".format(__file__, data["notebook"]))
             with open(nbpath, "r") as nf:
                 nb = nbformat.read(nf, as_version=4)
@@ -419,16 +419,16 @@ with open("available_notebooks.rst", "w") as f:
                 nb.cells = [mdcell]  # we only want this single cell
                 body, _ = rst_exporter.from_notebook_node(nb)
                 adjusted = []
-                # adjust titles 
+                # adjust titles
                 for line in body.split("\n"):
                     if line.startswith("=="):
                         line = line.replace("=", "+")
                     if line.startswith("--"):
                         line = line.replace("-", "~")
                     adjusted.append(line)
-                f.write("\n".join(adjusted))                
+                f.write("\n".join(adjusted))
                 f.write("\n")
-            
+
             f.write("To invoke this notebook and display help use:\n\n")
             f.write(".. code-block:: bash\n\n")
             f.write("    xfel-calibrate {} {} --help\n\n".format(detector, caltype))
@@ -461,18 +461,18 @@ def xml_to_rst_report(xml, git_tag, reports=[]):
     rst[-1] = rst[-1].format(test_name=test_name, ex_date=ex_date)
     rst += ["="*len(rst[-1])]
     rst += [""]
-    
+
     num_tests = e.get("tests")
     num_err = int(e.get("errors"))
     num_fail = int(e.get("failures"))
     num_skip = int(e.get("skipped"))
-    
+
     # create a summary header
     if num_err + num_fail == 0:
         rst += [":header-passed:`✓`"]
     else:
         rst += [":header-failed:`❌`"]
-        
+
     if num_skip > 0:
         rst[-1] += ":header-skipped:`âš `"
     rst += [""]
@@ -487,12 +487,12 @@ def xml_to_rst_report(xml, git_tag, reports=[]):
     for rname, rpath in reports:
         rst += [":Report: `{} <{}>`_".format(rname, rpath)]
     rst += [""]
-    
+
     # now the details
     rst += ["Detailed Results"]
     rst += ["-"*len(rst[-1])]
     rst += [""]
-    
+
     detailed_failures = []
     rows = []
     for child in e:
@@ -515,12 +515,12 @@ def xml_to_rst_report(xml, git_tag, reports=[]):
         msg = "\n".join(textwrap.wrap(msg, 20))
         row = [status, name, etype, msg, extime ]
         rows.append(row)
-    
+
     header = ["Result", "Test", "Error", "Message", "Duration (s)"]
     tblrst =  tabulate.tabulate(rows, headers=header, tablefmt="rst")
     rst += tblrst.split("\n")
     rst += [""]
-    
+
     for test, report in detailed_failures:
         rst += ["Failure report for: {}".format(test)]
         rst += ["~"*len(rst[-1])]
@@ -528,15 +528,15 @@ def xml_to_rst_report(xml, git_tag, reports=[]):
         rst += [".. code-block:: python"]
         rst += textwrap.indent(report, " "*4).split("\n")
         rst += [""]
-    
+
     do_console = False
     for child in e:
         if child.tag == "system-out" and len(child.text.strip()):
             do_console = True
             break
-    
+
     if do_console:
-    
+
         # console output
         rst += ["Console Output"]
         rst += ["-"*len(rst[-1])]
@@ -549,7 +549,7 @@ def xml_to_rst_report(xml, git_tag, reports=[]):
             rst += [".. code-block:: console"]
             rst += textwrap.indent(child.text, " "*4).split("\n")
 
-        
+
     return "\n".join(rst)
 
 def sorted_dir(folder):
@@ -570,12 +570,12 @@ Contents:
 
 .. toctree::
    :maxdepth: 2
-   
+
 
 """
 if not os.path.exists("./test_rsts"):
     os.makedirs("./test_rsts")
-    
+
 with open("test_results.rst", "w") as f:
     f.write(header)
     for commit, modtime in sorted_dir(test_artefact_dir):
@@ -586,7 +586,7 @@ with open("test_results.rst", "w") as f:
             rst += ["+"*len(rst[-1])]
             rst += [""]
             fr.write("\n".join(rst))
-            
+
             # copy reports
             pdfs = glob.glob("{}/{}/*/*.pdf".format(test_artefact_dir, commit))
             if not os.path.exists("./_static/reports/{}".format(commit)):
@@ -600,7 +600,7 @@ with open("test_results.rst", "w") as f:
                 rname = os.path.basename(pdf).split(".")[0]
                 rlist.append((rname, "../_static/reports/{}".format(ppath)))
                 reports[rloc] = rlist
-            
+
             xmls = glob.glob("{}/{}/*/TEST*.xml".format(test_artefact_dir, commit))
             for xml in xmls:
                 rloc = xml.split("/")[-2]
diff --git a/docs/source/configuration.rst b/docs/source/configuration.rst
index 8db9435689f61adef714f01daa23725a1204be7e..241070cbd51c9e0eab655e9f8920c5878c1db17c 100644
--- a/docs/source/configuration.rst
+++ b/docs/source/configuration.rst
@@ -28,7 +28,7 @@ python file of the form::
 
     # the command to run this concurrently. It is prepended to the actual call
     launcher_command = "sbatch -p exfel -t 24:00:00 --mem 500G --mail-type END --requeue --output {temp_path}/slurm-%j.out"
-    
+
 A comment is given for the meaning of each configuration parameter.
 
 
@@ -62,11 +62,11 @@ The configuration is to be given in form of a python directory::
              ...
          }
      }
-     
+
 The first key is the detector that the calibration may be used for, here AGIPD. The second
 key level gives the name of the task being performed (here: DARK and PC). For each of these
 entries, a path to the notebook and a concurrency hint should be given. In the concurrency
-hint the first entry specifies which parameter of the notebook expects a list whose integer 
+hint the first entry specifies which parameter of the notebook expects a list whose integer
 entries, can be concurrently run (here "modules"). The second parameter state with which range
 to fill this parameter if it is not given by the user. In the example a `range(16):=0,1,2,...15`
 would be passed onto the notebook, which is run as 16 concurrent jobs, each processing one module.
@@ -75,9 +75,9 @@ be derived e.g. by profiling memory usage per core, run times, etc.
 
 .. note::
 
-    It is good practice to name command line enabled notebooks with an `_NBC` suffix as shown in 
+    It is good practice to name command line enabled notebooks with an `_NBC` suffix as shown in
     the above example.
-    
+
 The `CORRECT` notebook (last notebook in the example) makes use of a concurrency generating function
 by setting the `use function` parameter. This function must be defined in a code cell in the notebook,
 its parameters should be named like other exposed parameters. It should return a list of of parameters
@@ -99,13 +99,13 @@ function::
                                                        len(seq_nums)//sequences_per_node+1)]
         else:
             return sequences
-         
-         
+
+
 .. note::
 
     Note how imports are inlined in the definition. This is necessary, as only the function code,
     not the entire notebook is executed.
-            
+
 which requires as exposed parameters e.g. ::
 
     in_folder = "/gpfs/exfel/exp/SPB/201701/p002038/raw/" # the folder to read data from, required
diff --git a/docs/source/how_it_works.rst b/docs/source/how_it_works.rst
index d8246c1883afd9fa8fef9bddeb911b913ab8b156..090ebae9fc97591f86af5b71cac651b555faa63d 100644
--- a/docs/source/how_it_works.rst
+++ b/docs/source/how_it_works.rst
@@ -8,14 +8,14 @@ to expose Jupyter_ notebooks to a command line interface. In the process reports
 from these notebooks. The general interface is::
 
     % xfel-calibrate DETECTOR TYPE
-    
+
 where `DETECTOR` and `TYPE` specify the task to be performed.
 
 Additionally, it leverages the DESY/XFEL Maxwell cluster to run these jobs in parallel
 via SLURM_.
 
 Here is a list of :ref:`available_notebooks`. See the :ref:`advanced_topics` if you are
-for details on how to use as detector group staff. 
+for details on how to use as detector group staff.
 
 If you would like to integrate additional notebooks please see the :ref:`development_workflow`.
 
diff --git a/docs/source/index.rst b/docs/source/index.rst
index 06ef255aca0dc5fd2826611c375dc6d8191fe82f..5512e8a006aec326d32864ed11b2321248d428f3 100644
--- a/docs/source/index.rst
+++ b/docs/source/index.rst
@@ -10,7 +10,7 @@ Contents:
 
 .. toctree::
    :maxdepth: 2
-   
+
    how_it_works
    installation
    configuration
diff --git a/docs/source/installation.rst b/docs/source/installation.rst
index 38ef40719d5b0e048cca3ebb4edc9e5ee061901c..58f3f72268b460d4adaf297c0e6ede83ba2a0a1d 100644
--- a/docs/source/installation.rst
+++ b/docs/source/installation.rst
@@ -35,9 +35,9 @@ Installation using Anaconda
 
 First you need to load the anaconda/3 environment through::
 
-    1. module load anaconda/3 
+    1. module load anaconda/3
 
-If installing into other python enviroments, this step can be skipped. 
+If installing into other python enviroments, this step can be skipped.
 
 Then the package for the offline calibration can be obtained from the git repository::
 
@@ -75,14 +75,14 @@ folder to match your environment.
 The tool-chain is then available via the::
 
     xfel-calibrate
-    
+
 command.
 
 
 Installation using karabo
 +++++++++++++++++++++++++
 
-If required, one can install into karabo environment. The difference would be to 
+If required, one can install into karabo environment. The difference would be to
 first source activate the karabo envrionment::
 
     1. source karabo/activate
@@ -94,8 +94,8 @@ then after cloning the offline calibration package from git, the requirements ca
 Development Installation
 ------------------------
 
-For a development installation in your home directory, which automatically 
-picks up (most) changes, first install the dependencies as above, 
+For a development installation in your home directory, which automatically
+picks up (most) changes, first install the dependencies as above,
 but then install the tool-chain separately in development mode::
 
    pip install -e . --user
@@ -107,14 +107,12 @@ but then install the tool-chain separately in development mode::
 Installation of New Notebooks
 -----------------------------
 
-To install new, previously untracked notebooks in the home directory, 
-repeat the installation of the the tool-chain, without requirments, 
+To install new, previously untracked notebooks in the home directory,
+repeat the installation of the the tool-chain, without requirments,
 from the package base directory::
 
     pip install --upgrade . --user
-    
+
 Or, in case you are actively developing::
 
     pip install -e . --user
-
-
diff --git a/docs/source/makeAllDocs.sh b/docs/source/makeAllDocs.sh
index 402e2e603b579827c4eae5846a9f1b2e395382fd..a54263b5de157ad92301f687860a6512486c182f 100755
--- a/docs/source/makeAllDocs.sh
+++ b/docs/source/makeAllDocs.sh
@@ -35,4 +35,3 @@ done
 rm *.bak
 
 #cd .. rm api/* sphinx-apidoc -o ./api/ -E ../../iCalibrationDB/
-
diff --git a/docs/source/test_rsts/roles.rst b/docs/source/test_rsts/roles.rst
index 525c6f0af4aa5a93870f59ef90957cb432598a3c..d153c86059f561e86c47fcdd864a2972373b38f7 100644
--- a/docs/source/test_rsts/roles.rst
+++ b/docs/source/test_rsts/roles.rst
@@ -3,4 +3,4 @@
 .. role:: header-failed
 .. role:: passed
 .. role:: skipped
-.. role:: failed
\ No newline at end of file
+.. role:: failed
diff --git a/docs/source/testing.rst b/docs/source/testing.rst
index 214248bf4b7da4a83b160e3e153f5d79221f0ccb..4f9ad478f3a3ec19e7da29e789bd90059f69ec53 100644
--- a/docs/source/testing.rst
+++ b/docs/source/testing.rst
@@ -23,7 +23,7 @@ run can be assigned to that commit::
 To run all tests, navigate to the test directory and execute::
 
     python -m unittest discover
-    
+
 This will usually entail executing a notebook under test via SLURM
 first, then checking its output against the last commited artefacts
 of that test type.
@@ -32,7 +32,7 @@ If individual tests are run, e.g. for debugging, additional options
 exist to skip tests, or notebook execution::
 
    python test_XXX.py --help
-   
+
 where `test_XXX.py` is the test name, will give you a list of options
 available for that test.
 
@@ -44,7 +44,7 @@ generate new artefacts.
 
     Running tests will generate entries for test reports in the
     artefacts directory under the most recent commit.
-    Reviewers should check that such updates are present in the 
+    Reviewers should check that such updates are present in the
     list of changed files.
 
 
@@ -64,7 +64,7 @@ Contrary to running tests alone, new artefacts need to be generated
 for each affected test individually::
 
     python test_XXX.py --generate
-    
+
 replacing `test_XXX.py` with the test you'd like to run. This
 will execute the notebook, create artefact entries in the artefact
 dir, and after that will check for consistency by executing the test against
@@ -76,15 +76,15 @@ commit the new artefacts and create a merge request for your branch::
 
     git add tests/artefacts/
     git commit -m "Added new artefacts for changes related to baseline shifts"
-    
+
 Please also add comments in the MR description on why artefacts have
 changed.
 
 .. note::
 
-    Reviewers should always evaluate if the changes in test artefacts are 
+    Reviewers should always evaluate if the changes in test artefacts are
     appropriate, intended and acceptable.
-    
+
 Test Reports
 ++++++++++++
 
@@ -114,4 +114,3 @@ Repositories of calibration constants used in testing can be found at::
     /gpfs/exfel/exp/XMPL/201750/p700001/usr
 
 .. include:: test_results.rst
-
diff --git a/docs/source/tutorial.rst b/docs/source/tutorial.rst
index 89607fdb77bb3637f7c42f0aee746d8470a6a5ed..609d97bf6119c8ef034eede79a943f7a2a88202e 100644
--- a/docs/source/tutorial.rst
+++ b/docs/source/tutorial.rst
@@ -17,7 +17,7 @@ The Tutorial consist of this documentation and two very simple notebooks:
    calibration tool-chain.
 
 To have a look at those notebooks start from a shell with the karabo environment::
-  
+
   jupyter-notebook
 
 This will open a jupyter kernel running in your browser where you can then open the notebooks in the folder notebooks/Tutorial. If you in addition also start on another shell the ipcluster as instructed in the calversion.ipynb notebook::
@@ -50,14 +50,14 @@ to install the necessary packages and setup the environment:
 
      ./karabo-2.2.4-Release-CentOS-7-x86_64.sh
 
-     source karabo/activate 
+     source karabo/activate
 
 3. Get the package pycalibration which contains the offline calibration tool-chain::
-  
+
      git clone https://git.xfel.eu/gitlab/detectors/pycalibration.git
 
 4. Install the necessary requirements and the package itself::
- 
+
      cd pycalibration
      pip install -r requirements.txt .
 
diff --git a/docs/source/workflow.rst b/docs/source/workflow.rst
index 8b7d4744a77a5db2b897c51ddfcae3b0a2a42a3f..0b9f0c6ff2d8870b18b0f85959c62d548f1c0ca5 100644
--- a/docs/source/workflow.rst
+++ b/docs/source/workflow.rst
@@ -9,31 +9,31 @@ when developing new offline calibration tools.
 Fresh Start
 -----------
 
-If you are starting a blank notebook from scratch you should first 
+If you are starting a blank notebook from scratch you should first
 think about a few preconsiderations:
 
-* Will the notebook performan a headless task, or will it also be 
-  an important interface for evaluating the results in form of a 
+* Will the notebook performan a headless task, or will it also be
+  an important interface for evaluating the results in form of a
   report.
 * Do you need to run concurrently? Is concurrency handled internally,
-  e.g. by use of ipcluster, or also on a host level, using cluster 
+  e.g. by use of ipcluster, or also on a host level, using cluster
   computing via slurm.
 
 In case you plan on using the notebook as a report tool, you should make
 sure to provide sufficient guidance and textual details using e.g. markdown
-cells in the notebook. You should also structure it into appropriate 
+cells in the notebook. You should also structure it into appropriate
 subsections.
 
 If you plan on running concurrently on the cluster, identify which variable
-should be mapped to concurent runs. For autofilling it an integer list is 
+should be mapped to concurent runs. For autofilling it an integer list is
 needed.
 
 Once you've clarified the above points, you should create a new notebook,
-either in an existing detector folder, or if for a yet not integrated 
+either in an existing detector folder, or if for a yet not integrated
 detector, into a new folder with the detector's name. Give it a suffix
 `_NBC` to denote that it is enabled for the tool chain.
 
-You should then start writing your code following the guidelines 
+You should then start writing your code following the guidelines
 below.
 
 
@@ -41,10 +41,10 @@ From Existing Notebook
 ----------------------
 
 Copy your existing notebook into the appropriate detector directory,
-or create a new one if the detector does not exist yet. Give the copy 
-a suffix `_NBC` to denote that it is enabled for the tool chain. 
+or create a new one if the detector does not exist yet. Give the copy
+a suffix `_NBC` to denote that it is enabled for the tool chain.
 
-You should then start restructuring your code following the guidelines 
+You should then start restructuring your code following the guidelines
 below.
 
 Title and Author Information
@@ -55,11 +55,11 @@ author and version. These should be given in a leading markdown cell in
 the form::
 
     # My Fancy Calculation #
-    
+
     Author: Jane Doe, Version 0.1
-    
+
     A description of the notebook.
-    
+
 Information in the format will allow automatic parsing of author and version.
 
 
@@ -91,7 +91,7 @@ required::
     sequences = [0,1,2,3,4] # sequences files to use, range allowed
     cluster_profile = "noDB" # The ipcluster profile to use
     local_output = False # output constants locally
-    
+
 Here, `in_folder` and `out_folder` are required string values. Values for required parameters have to be given when executing from the command line. This means that any defaults given in the first cell of the code are ignored (they are only used to derive the type of the parameter). `Modules` is a list, which from the command line could also be assigned using a range expression, e.g. `5-10,12,13,18-21`, which would translate to `5,6,7,8,9,12,13,18,19,20`. It is also a required parameter. The parameter `local_output` is a Boolean. The corresponding argument given in the command line will change this parameter from `false` to `True`. There is no way to change this parameter from `True` to `False` from the command line.
 
 The `cluster_profile` parameter is a bit special, in that the tool kit expects exactly this
@@ -124,10 +124,10 @@ to the following parameters being exposed via the command line::
       --no-cluster-job      Do not run as a cluster job
       --report-to str       Filename (and optionally path) for output report
       --modules str [str ...]
-                            modules to work on, required, range allowed. 
+                            modules to work on, required, range allowed.
                             Default: [0]
       --sequences str [str ...]
-                            sequences files to use, range allowed. 
+                            sequences files to use, range allowed.
                             Default: [0, 1, 2, 3, 4]
       --cluster-profile str
                             The ipcluster profile to use. Default: noDB2
@@ -135,7 +135,7 @@ to the following parameters being exposed via the command line::
       --local-output        output constants locally. Default: False
 
     ...
-    
+
 
 .. note::
 
@@ -184,9 +184,9 @@ wanting to run the tool will need to install these requirements as well. Thus,
 
 * keep runtimes and library requirements in mind. A library doing its own parallelism either
   needs to programatically be able to set this up, or automatically do so. If you need to
-  start something from the command line first, things might be tricky as you will likely 
+  start something from the command line first, things might be tricky as you will likely
   need to run this via `POpen` commands with appropriate environment variable.
-  
+
 Writing out data
 ~~~~~~~~~~~~~~~~
 
@@ -197,7 +197,7 @@ possibly done later on in a notebook.
 Also consider using HDF5 via h5py_ as your output format. If you correct or calibrated
 input data, which adhears to the XFEL naming convention, you should maintain the convention
 in your output data. You should not touch any data that you do not actively work on and
-should assure that the `INDEX` and identifier entries are syncronized with respect to 
+should assure that the `INDEX` and identifier entries are syncronized with respect to
 your output data. E.g. if you remove pulses from a train, the `INDEX/.../count` section
 should reflect this.
 
@@ -205,8 +205,8 @@ Finally, XFEL RAW data can contain filler data from the DAQ. One possible way of
 this data is the following::
 
     datapath = "/INSTRUMENT/FXE_DET_LPD1M-1/DET/{}CH0:xtdf/image/cellId".format(channel)
-    
-    count = np.squeeze(infile[datapath])        
+
+    count = np.squeeze(infile[datapath])
     first = np.squeeze(infile[datapath])
     if np.count_nonzero(count != 0) == 0:  # filler data has counts of 0
         print("File {} has no valid counts".format(infile))
@@ -215,14 +215,14 @@ this data is the following::
     idxtrains = np.squeeze(infile["/INDEX/trainId"])
     medianTrain = np.nanmedian(idxtrains)  # protect against freak train ids
     valid &= (idxtrains > medianTrain - 1e4) & (idxtrains < medianTrain + 1e4)
-    
+
     # index ranges in which non-filler data exists
     last_index = int(first[valid][-1]+count[valid][-1])
     first_index = int(first[valid][0])
-    
+
     # access these indices
     cellIds = np.squeeze(np.array(infile[datapath][first_index:last_index, ...]))
-    
+
 
 Plotting
 ~~~~~~~~
@@ -243,10 +243,10 @@ Calibration Database Interaction
 --------------------------------
 
 Tasks which require calibration constants or produce such should do this by interacting with
-the European XFEL calibration database. 
+the European XFEL calibration database.
 
 In terms of developement workflow it is usually easier to work with file-based I/O first and
-only switch over to the database after the algorithmic part of the notebook has matured. 
+only switch over to the database after the algorithmic part of the notebook has matured.
 Reasons for this include:
 
 * for developing against the database new constants will have to be integrated therein first
@@ -263,7 +263,7 @@ Testing
 
 The most important test is that your notebook completes flawlessy outside any special
 tool chain feature. After all, the tool chain will only replace parameters, and then
-launch a concurrent job and generate a report out of notebook. If it fails to run in the 
+launch a concurrent job and generate a report out of notebook. If it fails to run in the
 normal Jupyter notebook environment, it will certainly fail in the tool chain environment.
 
 Once you are satisfied with your current state of initial development, you can add it
@@ -273,7 +273,7 @@ Any changes you now make in the notebook will be automatically propagated to the
 Specifically, you should verify that all arguments are parsed correctly, e.g. by calling::
 
     xfel-calibrate DETECTOR NOTEBOOK_TYPE --help
-    
+
 From then on, check include if parallel slurm jobs are exectuted correctly and if a report
 is generated at the end.
 
@@ -298,4 +298,4 @@ documentation.
 .. _matplotlib: https://matplotlib.org/
 .. _numpy: http://www.numpy.org/
 .. _h5py: https://www.h5py.org/
-.. _iCalibrationDB: https://in.xfel.eu/readthedocs/docs/icalibrationdb/en/latest/
\ No newline at end of file
+.. _iCalibrationDB: https://in.xfel.eu/readthedocs/docs/icalibrationdb/en/latest/
diff --git a/notebooks/AGIPD/playground/AGIPD_SingleM_test_Dark.ipynb b/notebooks/AGIPD/playground/AGIPD_SingleM_test_Dark.ipynb
index 8fc4b3c22d69a3ae99b7eaa00cb676b0565f1c82..533d0f36e367668b26e86611cc63d272c5d75d5e 100644
--- a/notebooks/AGIPD/playground/AGIPD_SingleM_test_Dark.ipynb
+++ b/notebooks/AGIPD/playground/AGIPD_SingleM_test_Dark.ipynb
@@ -860,4 +860,4 @@
  },
  "nbformat": 4,
  "nbformat_minor": 2
-}
\ No newline at end of file
+}
diff --git a/notebooks/AGIPD/playground/Characterize_AGIPD_Gain_FlatFields_NBC.ipynb b/notebooks/AGIPD/playground/Characterize_AGIPD_Gain_FlatFields_NBC.ipynb
index 26671f7109241e39e9982ee26665064ca4765c2a..49c2e334f2632847528d2b53fe2991a56c69e5cc 100644
--- a/notebooks/AGIPD/playground/Characterize_AGIPD_Gain_FlatFields_NBC.ipynb
+++ b/notebooks/AGIPD/playground/Characterize_AGIPD_Gain_FlatFields_NBC.ipynb
@@ -1592,4 +1592,4 @@
  },
  "nbformat": 4,
  "nbformat_minor": 1
-}
\ No newline at end of file
+}
diff --git a/notebooks/AGIPD/playground/Chracterize_AGIPD_Gain_PC_mlearn.ipynb b/notebooks/AGIPD/playground/Chracterize_AGIPD_Gain_PC_mlearn.ipynb
index 4761a412a05f1a246b293fac8d19f000f07c3373..831dc4bd2145a533418e4e32a2bb358baba09dfd 100644
--- a/notebooks/AGIPD/playground/Chracterize_AGIPD_Gain_PC_mlearn.ipynb
+++ b/notebooks/AGIPD/playground/Chracterize_AGIPD_Gain_PC_mlearn.ipynb
@@ -607,4 +607,4 @@
  },
  "nbformat": 4,
  "nbformat_minor": 2
-}
\ No newline at end of file
+}
diff --git a/notebooks/LPD/playground/correct_lpd_batch.py_old b/notebooks/LPD/playground/correct_lpd_batch.py_old
index 304a7ab7fbcad0ddacc663ebe1efb5c2d399e486..ef76d8d32c0bcde5bef3dbe04676a82942f03745 100644
--- a/notebooks/LPD/playground/correct_lpd_batch.py_old
+++ b/notebooks/LPD/playground/correct_lpd_batch.py_old
@@ -45,7 +45,7 @@ elif not overwrite:
 def combine_stack(d, sdim):
     combined = np.zeros((sdim, 2048,2048))
     combined[...] = np.nan
-    
+
     map_x = [1,0,0,1]
     map_y = [1,1,0,0]
     to_map = d
@@ -97,7 +97,7 @@ saveFile.close()
 
 # set everything up filewise
 from queue import Queue
-    
+
 def map_modules_from_files(filelist):
     module_files = {}
     mod_ids = {}
@@ -111,7 +111,7 @@ def map_modules_from_files(filelist):
             for file in filelist:
                 if file_infix in file:
                     module_files[name].put(file)
-                
+
     return module_files, mod_ids
 
 dirlist = os.listdir(in_folder)
@@ -120,14 +120,14 @@ for entry in dirlist:
     #only h5 file
     abs_entry = "{}/{}".format(in_folder, entry)
     if os.path.isfile(abs_entry) and os.path.splitext(abs_entry)[1] == ".h5":
-        
+
         if sequences is None:
             file_list.append(abs_entry)
         else:
             for seq in sequences:
                 if "{:05d}.h5".format(seq) in abs_entry:
                     file_list.append(os.path.abspath(abs_entry))
-                    
+
 mapped_files, mod_ids = map_modules_from_files(file_list)
 
 import copy
@@ -136,7 +136,7 @@ def correct_module(cells, inp):
     import numpy as np
     import copy
     import h5py
-    
+
     def splitOffGainLPD(d):
         msk = np.zeros(d.shape, np.uint16)
         msk[...] = 0b0000111111111111
@@ -145,9 +145,9 @@ def correct_module(cells, inp):
         gain = np.bitwise_and(d, msk)//4096
         gain[gain > 2] = 2
         return data, gain
-    
+
     if True:
-    
+
         filename, filename_out, channel, offset, rel_gain = inp
 
         infile = h5py.File(filename, "r", driver="core")
@@ -176,34 +176,34 @@ def correct_module(cells, inp):
 
         im, gain = splitOffGainLPD(im[:,0,...])
         im = im.astype(np.float32)
-        
+
         im = np.rollaxis(im, 2)
         im = np.rollaxis(im, 2, 1)
 
         gain = np.rollaxis(gain, 2)
         gain = np.rollaxis(gain, 2, 1)
 
-      
+
 
         om = offset
         rc = rel_gain
 
         for cc in range(im.shape[2]//cells):
             tg = gain[...,cc*cells:(cc+1)*cells]
-            
+
             offset = np.choose(tg, (om[...,0], om[...,1], om[...,2]))
             if rc is not None:
                 rel_cor = np.choose(tg, (rc[...,0], rc[...,1], rc[...,2]))
             tim = im[...,cc*cells:(cc+1)*cells]
             tim = tim - offset
-            if rc is not None:      
+            if rc is not None:
                 tim /= rel_cor
             im[...,cc*cells:(cc+1)*cells] = tim
 
         outfile["INSTRUMENT/FXE_DET_LPD1M-1/DET/{}CH0:xtdf/image/data".format(channel)] = np.rollaxis(np.rollaxis(im,1), 2)
         outfile["INSTRUMENT/FXE_DET_LPD1M-1/DET/{}CH0:xtdf/image/gain".format(channel)] = np.rollaxis(np.rollaxis(gain,1), 2)
         outfile.close()
-    
+
 
 done = False
 first_files = []
@@ -228,7 +228,7 @@ while not done:
                     rel_gains[i][...,:max_cells,:] if do_rel_gain else None))
     first = False
     p = partial(correct_module, max_cells)
-    
+
     r = view.map_sync(p, inp)
     done = all(dones)
 
@@ -239,7 +239,7 @@ for i, ff in enumerate(first_files):
     try:
         rf, cf = ff
         if rf is not None:
-        
+
             infile = h5py.File(rf, "r")
             raw.append(np.array(infile["/INSTRUMENT/FXE_DET_LPD1M-1/DET/{}CH0:xtdf/image/data".format(i)][max_cells*3:4*max_cells,0,...]))
             infile.close()
@@ -250,7 +250,7 @@ for i, ff in enumerate(first_files):
             infile.close()
         else:
             raise Exception("File not found")
-        
+
     except Exception as e:
         corrected.append(np.zeros((max_cells, 256, 256)))
         raw.append(np.zeros((max_cells, 256, 256)))
diff --git a/notebooks/LPD/playground/metroLib.py b/notebooks/LPD/playground/metroLib.py
index 0ef031095e14ec7bae0cd1b1b42497635f82194a..222a706f68a2d6b92aca4700a338413b09239ab3 100644
--- a/notebooks/LPD/playground/metroLib.py
+++ b/notebooks/LPD/playground/metroLib.py
@@ -7,28 +7,28 @@ from matplotlib import pylab as plt
 
 
 def getModulePosition(metrologyFile, moduleId):
-    """Position (in mm) of a module relative to the top left 
+    """Position (in mm) of a module relative to the top left
     corner of its quadrant. In case of tile-level positions,
-    the the position refers to the center of the top left 
+    the the position refers to the center of the top left
     pixel.
-    
+
     Args
     ----
-    
+
     metrologyFile : str
         Fully qualified path and filename of the metrology file
     moduleId : str
         Identifier of the module in question (e.g. 'Q1M2T03')
-        
+
     Returns
     -------
-    
-    ndarray: 
+
+    ndarray:
         (x, y)-Position of the module in its quadrant
-    
+
     Raises
     ------
-    
+
     ValueError: In case the moduleId contains invalid module
         identifieres
     """
@@ -38,11 +38,11 @@ def getModulePosition(metrologyFile, moduleId):
     #
     #   QXMYTZZ
     #
-    # where X, Y, and Z are digits. Q denotes the quadrant 
-    # (X = 1, ..., 4), M the supermodule (Y = 1, ..., 4) and T 
+    # where X, Y, and Z are digits. Q denotes the quadrant
+    # (X = 1, ..., 4), M the supermodule (Y = 1, ..., 4) and T
     # the tile (Z = 1, ..., 16; with leading zeros).
     modulePattern = re.compile(r'[QMT]\d+')
-    # Give the module identifier Q1M1T01, the moduleList splits this 
+    # Give the module identifier Q1M1T01, the moduleList splits this
     # into the associated quadrant, supermodule, and tile identifiers:
     # >>> print(moduleList)
     # ['Q1', 'M1', 'T01']
@@ -53,7 +53,7 @@ def getModulePosition(metrologyFile, moduleId):
     # >>> print(h5Keys)
     # ['Q1', 'Q1/M1', 'Q1/M1/T01']
     h5Keys = ['/'.join(moduleList[:idx+1]) for idx in range(len(moduleList))]
-    
+
     # Every module of the detector gives its position relative to
     # the top left corner of its parent structure. Every position
     # is stored in the positions array
@@ -83,17 +83,17 @@ def getModulePosition(metrologyFile, moduleId):
 def translateToModuleBL(tilePositions):
     """Tile coordinates within a supermodule with the
     origin in the bottom left corner.
-    
+
     Parameters
     ----------
-    
+
     tilePositions : ndarray
-        Tile positions as retrieved from the LPD metrology 
+        Tile positions as retrieved from the LPD metrology
         file. Must have shape (16, 2)
-        
+
     Returns
     -------
-    
+
     ndarray
         Tile positions relative to the bottom left corner.
     """
@@ -115,7 +115,7 @@ def translateToModuleBL(tilePositions):
     # In the clockwise order of LPD tiles, the 8th
     # tile in the list is the bottom left tile
     bottomLeft8th = np.asarray([0., moduleCoords[8][1]])
-    # Translate coordinates to the bottom left corner 
+    # Translate coordinates to the bottom left corner
     # of the bottom left tile
     bottomLeft = moduleCoords - bottomLeft8th
     return bottomLeft
@@ -124,44 +124,44 @@ def translateToModuleBL(tilePositions):
 def plotSupermoduleData(tileData, metrologyPositions, zoom=1., vmin=100., vmax=6000.):
     """Plots data of a supermodule with tile positions
     determined by the metrology data.
-    
+
     Parameters
     ----------
-    
+
     tileData : ndarray
-        Supermodule image data separated in individual tiles. 
+        Supermodule image data separated in individual tiles.
         Must have shape (16, 32, 128).
-        
-    metrologyPositions : ndarray 
-        Tile positions as retrieved from the metrology file. 
+
+    metrologyPositions : ndarray
+        Tile positions as retrieved from the metrology file.
         Must have shape (16, 2)
-        
+
     zoom : float, optional
         Can enlarge or decrease the size of the plot. Default = 1.
-        
+
     vmin, vmax : float, optional
         Value range. Default vmin=100., vmax=6000.
-        
+
     Returns
     -------
     matplotlib.Figure
-        Figure object containing the supermodule image        
+        Figure object containing the supermodule image
     """
     # Data needs to have 16 tiles, each with
     # 32x128 pixels
     assert tileData.shape == (16, 32, 128)
-    
+
     # Conversion coefficient, required since
     # matplotlib does its business in inches
     mmToInch = 1./25.4 # inch/mm
-    
+
     # Some constants
     numberOfTiles = 16
     numberOfRows = 8
     numberOfCols = 2
     tileWidth = 65.7 # in mm
     tileHeight = 17.7 # in mm
-    
+
     # Base width and height are given by spatial
     # extend of the modules. The constants 3.4 and 1
     # are estimated as a best guess for gaps between
@@ -169,26 +169,26 @@ def plotSupermoduleData(tileData, metrologyPositions, zoom=1., vmin=100., vmax=6
     figureWidth = zoom * numberOfCols*(tileWidth + 3.4)*mmToInch
     figureHeight = zoom * numberOfRows*(tileHeight + 1.)*mmToInch
     fig = plt.figure(figsize=(figureWidth, figureHeight))
-    
-    # The metrology file references module positions 
+
+    # The metrology file references module positions
     bottomRightCornerCoordinates = translateToModuleBL(metrologyPositions)
-    
+
     # The offset here accounts for the fact that there
     # might be negative x,y values
     offset = np.asarray(
-        [min(bottomRightCornerCoordinates[:, 0]), 
+        [min(bottomRightCornerCoordinates[:, 0]),
          min(bottomRightCornerCoordinates[:, 1])]
     )
-    
+
     # Account for blank borders in the plot
     borderLeft = 0.5 * mmToInch
     borderBottom = 0.5 * mmToInch
-    
+
     # The height and width of the plot remain
     # constant for a given supermodule
     width = zoom * 65.7 * mmToInch / (figureWidth - 2.*borderLeft)
     height = zoom * 17.7 * mmToInch / (figureHeight - 2.*borderBottom)
-    
+
     for i in range(numberOfTiles):
         # This is the top left corner of the tile with
         # respect to the top left corner of the supermodule
@@ -200,38 +200,38 @@ def plotSupermoduleData(tileData, metrologyPositions, zoom=1., vmin=100., vmax=6
         ax = fig.add_axes((ax0, ay0, width, height), frameon=False)
         # Do not display axes, tick markers or labels
         ax.tick_params(
-            axis='both', left='off', top='off', right='off', bottom='off', 
+            axis='both', left='off', top='off', right='off', bottom='off',
             labelleft='off', labeltop='off', labelright='off', labelbottom='off'
         )
         # Plot the image. If one wanted to have a colorbar
         # the img object would be needed to produce one
         img = ax.imshow(
-            tileData[i], 
-            interpolation='nearest', 
+            tileData[i],
+            interpolation='nearest',
             vmin=vmin, vmax=vmax
         )
-        
+
     return fig
 
 
 def splitChannelDataIntoTiles(channelData, clockwiseOrder=False):
     """Splits the raw channel data into indiviual tiles
-    
+
     Args
     ----
-    
+
     channelData : ndarray
         Raw channel data. Must have shape (256, 256)
-        
+
     clockwiseOrder : bool, optional
         If set to True, the sequence of tiles is given
         in the clockwise order starting with the top
         right tile (LPD standard). If set to false, tile
         data is returned in reading order
-        
+
     Returns
     -------
-    
+
     ndarray
         Same data, but reshaped into (12, 32, 128)
     """
@@ -240,8 +240,8 @@ def splitChannelDataIntoTiles(channelData, clockwiseOrder=False):
     orderedTiles = tiles.reshape(16, 32, 128)
     if clockwiseOrder:
         # Naturally, the tile data after splitting is in reading
-        # order (i.e. top left tile is first, top right tile is second, 
-        # etc.). The official LPD tile order however is clockwise, 
+        # order (i.e. top left tile is first, top right tile is second,
+        # etc.). The official LPD tile order however is clockwise,
         # starting with the top right tile. The following array
         # contains indices of tiles in reading order as they would
         # be iterated in clockwise order (starting from the top right)
@@ -253,22 +253,22 @@ def splitChannelDataIntoTiles(channelData, clockwiseOrder=False):
 
 def splitChannelDataIntoTiles2(channelData, clockwiseOrder=False):
     """Splits the raw channel data into indiviual tiles
-    
+
     Args
     ----
-    
+
     channelData : ndarray
         Raw channel data. Must have shape (256, 256)
-        
+
     clockwiseOrder : bool, optional
         If set to True, the sequence of tiles is given
         in the clockwise order starting with the top
         right tile (LPD standard). If set to false, tile
         data is returned in reading order
-        
+
     Returns
     -------
-    
+
     ndarray
         Same data, but reshaped into (12, 32, 128)
     """
@@ -277,8 +277,8 @@ def splitChannelDataIntoTiles2(channelData, clockwiseOrder=False):
     orderedTiles = np.moveaxis(tiles.reshape(16, 128, 32, channelData.shape[2]), 2, 1)
     if clockwiseOrder:
         # Naturally, the tile data after splitting is in reading
-        # order (i.e. top left tile is first, top right tile is second, 
-        # etc.). The official LPD tile order however is clockwise, 
+        # order (i.e. top left tile is first, top right tile is second,
+        # etc.). The official LPD tile order however is clockwise,
         # starting with the top right tile. The following array
         # contains indices of tiles in reading order as they would
         # be iterated in clockwise order (starting from the top right)
@@ -294,7 +294,7 @@ def returnPositioned2(geometry_file, modules, dquads):
     tile_order = [1, 2, 3, 4]
     cells = 0
     for sm, mn in modules:
-        
+
         position = np.asarray([getModulePosition(geometry_file,
                                                  'Q{}/M{:d}/T{:02d}'.format(
                                                      sm//4+1,
@@ -357,7 +357,7 @@ def positionFileList(filelist, datapath, geometry_file, quad_pos, nImages='all')
     data = {}
     for file in files:
         ch = int(re.findall(r'.*-{}([0-9]+)-.*'.format(detector), file)[0])
-        
+
         try:
             with h5py.File(file, 'r', driver='core') as f:
                 d = np.squeeze(f[datapath.format(ch)][()] if nImages == 'all' else f[datapath.format(ch)][:nImages,:,:])
diff --git a/notebooks/pnCCD/frm6reader.py b/notebooks/pnCCD/frm6reader.py
index a0539603fbd1c64534475daff511d39ffc20dea7..d853a9cadd5e715c4ed21ccbdd4d1bb065317697 100644
--- a/notebooks/pnCCD/frm6reader.py
+++ b/notebooks/pnCCD/frm6reader.py
@@ -331,4 +331,3 @@ class Frms6Reader(object):
             numberOfFrames = int(numberOfFrames)
 
         return (frameWidth, frameHeight, numberOfFrames)
-
diff --git a/reportservice/README.md b/reportservice/README.md
index a4e6da44a3fe67bf39f6e0622de0cd1614314d1b..ccf7f91bab188ec10bffa1638a4fa61b9238aa4c 100644
--- a/reportservice/README.md
+++ b/reportservice/README.md
@@ -4,7 +4,7 @@ Offline Calibration Reportservice
 The Reportserivce is a service responsible for handling requests (manual or automatic triggers)
 for generating the DetectorCharacterization reports based on the requested configurations.
 
-The Reportservice mainly consists of a service, clients and YAML configuration. 
+The Reportservice mainly consists of a service, clients and YAML configuration.
 The service keeps on listening to any ZMQ requests with a given configurations.
 Then based on these configurations, it produces slurm jobs (through xfel-calibrate command line) to generate *.png plots of calibration configurations over time.
 
@@ -39,7 +39,7 @@ and it should generate a very generalized DC report for the available detectors
 
 *local* is the mode used for generating figures locally without uploading the DC report on RTD or pushing figures
 to the git repository, rather generated figures are copied to the local repository and depending on the
-given report-fmt(report format) argument an html or a pdf is generated in doc/_build/ 
+given report-fmt(report format) argument an html or a pdf is generated in doc/_build/
 of the report service out folder (repo-local).
 
 *sim* is a simulation mode, which is mostly used for debugging purposes and tool development without generating any reports locally or over RTD.
@@ -116,9 +116,9 @@ Automatic Launch:
 Manual Launch:
 
     This manual launch script is currently used for debugging purposes, only.
-    
+
     The available command line arguments are:
-    
+
 * --config-file: The path for the configuration file
 * --instrument: A selected list of instruments to generate a report for. This instrument must be in the report_conf.yaml. The default for this argument is ['all]
 * --overwrite-conf: A bool for indicating a new report configuration file(conf-file) should be sent instead of the default report_conf.yaml,
diff --git a/reportservice/automatic_run.py b/reportservice/automatic_run.py
index 708037153f3b84f3d821f5d0814d24973b797504..dc3b7156416ba9d57428d1f7c4f42a8b55a2bc96 100644
--- a/reportservice/automatic_run.py
+++ b/reportservice/automatic_run.py
@@ -53,13 +53,13 @@ async def auto_run(cfg, timeout=3000):
 
             tidx = tidx + 1 if tidx != len(run_time)-1 else 0
 
-        # check every 10mins, if there is 
+        # check every 10mins, if there is
         # a need for an automatic-run.
         await asyncio.sleep(3000)
 
 
 arg_parser = argparse.ArgumentParser(description='Automatic Launch')
-arg_parser.add_argument('--config-file', type=str, 
+arg_parser.add_argument('--config-file', type=str,
                         default='./report_conf.yaml',
                         help='config file path with reportservice port. '
                              'Default=./report_conf.yaml')
diff --git a/reportservice/manual_run.py b/reportservice/manual_run.py
index caa04fcf65a2a3e0449c30ef1d1de8b58c8f48b7..df7ab40f7bf1d7c8323ad021c83ce19d59a35e28 100644
--- a/reportservice/manual_run.py
+++ b/reportservice/manual_run.py
@@ -60,7 +60,7 @@ arg_parser.add_argument('--report-fmt', default='html',
                              'Note: THIS HAS NO EFFECT IN PROD AND SIM MODES!')
 arg_parser.add_argument('--log-file', type=str, default='./report.log',
                         help='The report log file path. Default=./report.log')
-arg_parser.add_argument('--logging', type=str, default="INFO", 
+arg_parser.add_argument('--logging', type=str, default="INFO",
                         help='logging modes: INFO, DEBUG or ERROR. '
                              'Default=INFO',
                         choices=['INFO', 'DEBUG', 'ERROR'])
diff --git a/reportservice/messages.py b/reportservice/messages.py
index 901795b5c8bcaafc84bc8b4ec2e53cd3d19ef830..a4723c1628d4522ec661a87ea2c7863cce9af50b 100644
--- a/reportservice/messages.py
+++ b/reportservice/messages.py
@@ -1,5 +1,3 @@
 class Errors:
     REQUEST_MALFORMED = "FAILED: request {} is malformed, please contact det-support@xfel.eu"
     INSTRUMENT_NOT_FOUND = "FAILED: Instrument {} is not known!, please contact det-support@xfel.eu"
-
-
diff --git a/reportservice/report_conf.yaml b/reportservice/report_conf.yaml
index 4a6ade68ea54fa052ecd838da768b0b26d79734c..aa6a59202e7c7bde890d1bdb816e56fdffe7ad79 100644
--- a/reportservice/report_conf.yaml
+++ b/reportservice/report_conf.yaml
@@ -638,5 +638,3 @@ HED:
         out-folder: "/gpfs/exfel/data/scratch/xcal/report_service/tmp/{instrument}/{detector}/"
         cal-db-timeout: 180000
         cal-db-interface: "tcp://max-exfl016:8015#8025"
-
-
diff --git a/reportservice/report_service.py b/reportservice/report_service.py
index 03d24a9a6d0606920df0668ab4eaf8dae650e09d..4e6859f7e990a481f4a16b5dab10a06745de0b9f 100644
--- a/reportservice/report_service.py
+++ b/reportservice/report_service.py
@@ -293,7 +293,7 @@ async def server_runner(conf_file, mode):
                     try:
                         output = await asyncio.create_subprocess_shell(
                                  " ".join(run_base), stdout=PIPE, stderr=PIPE)
-                        
+
                         launched_jobs.append(output.communicate())
 
                         logging.info('Submission information: {}:'
@@ -402,7 +402,7 @@ async def server_runner(conf_file, mode):
             break
 
 arg_parser = argparse.ArgumentParser(description='Start the report service')
-arg_parser.add_argument('--config-file', type=str, 
+arg_parser.add_argument('--config-file', type=str,
                         default='./report_conf.yaml',
                         help='config file path with '
                              'reportservice port. '
diff --git a/setup.py b/setup.py
index 58b1c9ce10dbb518dd11c7af2bada148433de433..fc9ead08dfc7c9a90354c5bcebc689759ebd36ec 100644
--- a/setup.py
+++ b/setup.py
@@ -68,7 +68,7 @@ setup(
         'xfel_calibrate': ['bin/*.sh'] + data_files + ['titlepage.tmpl',
                                                        'xfel.pdf']
     },
-    
+
     cmdclass={
         'build' : PreInstallCommand,
         'install': PostInstallCommand,
@@ -81,10 +81,9 @@ setup(
     description='',
     entry_points = {
               'console_scripts': [
-                  'xfel-calibrate = xfel_calibrate.calibrate:run',                  
-              ],              
+                  'xfel-calibrate = xfel_calibrate.calibrate:run',
+              ],
           },
     ext_modules=extensions
-    
-)
 
+)
diff --git a/tests/legacy/correction_base.py b/tests/legacy/correction_base.py
index 86d32b605a94384aac40b45337e38e4482833aa0..831fa9478c9c62e49ae8b345201de3ded0ad1157 100644
--- a/tests/legacy/correction_base.py
+++ b/tests/legacy/correction_base.py
@@ -283,7 +283,7 @@ class CorrectionTestBase:
 
         print("Executing {}".format(" ".join(cmd)))
 
-        
+
         print("Creating data paths for artefacts")
         cls.artefact_dir = get_artefact_dir(cls)
         if not os.path.exists(cls.artefact_dir):
@@ -333,10 +333,10 @@ class CorrectionTestBase:
         """
         out_folder = self._output_to_path()
         files_to_check = glob.glob(
-            "{}/*{}".format(out_folder, self.rel_file_ext))  
-        
+            "{}/*{}".format(out_folder, self.rel_file_ext))
+
         for fname in files_to_check:
-            
+
             with h5py.File(fname, "r") as f:
                 d = {}
                 def visitor(k, item):
@@ -344,20 +344,20 @@ class CorrectionTestBase:
                         d[k] = item.fletcher32
 
                 f.visititems(visitor)
-                
+
                 chkfname = "{}.checksum".format(fname)
                 chkpath = "{}/{}".format(self.artefact_dir,
                                          os.path.basename(chkfname))
                 with open(chkpath, 'wb') as fc:
-                    pickle.dump(d, fc, pickle.HIGHEST_PROTOCOL) 
+                    pickle.dump(d, fc, pickle.HIGHEST_PROTOCOL)
 
     @unittest.skipIf(args.skip_checksum,
                      "User requested to skip checksum test")
     def test_checksums(self):
         """ Compare Fletcher32 checksums of notebook's output with artefacts
 
-        This test will verify if datasets with checksums are identical. 
-        Even for small changes in the correction logic this test is likely 
+        This test will verify if datasets with checksums are identical.
+        Even for small changes in the correction logic this test is likely
         to fail.
         If this is the case, it is recommended to verify correctness using
         the other tests, which inspect data, and the create new checksums
@@ -379,12 +379,12 @@ class CorrectionTestBase:
                                 "No comparison checksums found")
             with open(chkpath, 'rb') as fc:
                 d = pickle.load(fc)
-                
+
                 with h5py.File(fname, "r") as f:
- 
+
                     def visitor(k, item):
                         if isinstance(item, h5py.Dataset):
-                            
+
                             msg = "Verify checksum of: {}".format(k)
                             with self.subTest(msg=msg):
                                 self.assertIn(k, d)
@@ -578,7 +578,7 @@ class CorrectionTestBase:
 
             _, last_train = rd.train_from_id(rd.train_ids[-1])
             test_train_info(last_train, "last_train")
-            
+
     @unittest.skipIf(args.skip_karabo_data,
                      "User requested to skip karabo data test")
     def test_karabo_data_self_test(self):
@@ -592,7 +592,7 @@ class CorrectionTestBase:
                      "User requested to skip report generation test")
     def test_report_gen(self):
         """ Verify expected reports are generated
-        
+
         Also verifies that no additional reports are present, and copies
         the report to the artefact dir.
         """
@@ -610,4 +610,3 @@ class CorrectionTestBase:
         pdfs = glob.glob("{}/*.pdf".format(out_folder))
         for pdf in pdfs:
             self.assertIn(os.path.basename(pdf), self.expected_reports)
-                
diff --git a/tests/legacy/readme.md b/tests/legacy/readme.md
index 060f9898283312292a14c51c6d0da0c14cc00ea5..f5d8619b309f4a65beea4f035aea94df8ee40d0c 100644
--- a/tests/legacy/readme.md
+++ b/tests/legacy/readme.md
@@ -4,4 +4,3 @@ They are broken and haven't been looked at. Some may be fixed, some are deprecat
 This directory is excluded from the CI runner.
 
 It does not mean that they can be freely deleted. Each test file should be assayed and fixed, if possible!
-
diff --git a/tests/test_calibrate.py b/tests/test_calibrate.py
index 6b3d50b54588bbf48805cfad5619e06c54c5a240..7cc48d61b9ea84a98aae7e34da80a246612af605 100644
--- a/tests/test_calibrate.py
+++ b/tests/test_calibrate.py
@@ -32,4 +32,4 @@ def test_balance_sequences():
                           run=9992, sequences=[1991, 2021],
                           sequences_per_node=1, karabo_da=-1,
                           max_nodes=3)
-        assert 'karabo_da as a string or list' in e.value()
\ No newline at end of file
+        assert 'karabo_da as a string or list' in e.value()
diff --git a/webservice/README.md b/webservice/README.md
index 501e7f9c522eee42904d37209b09d3b91235ec98..fef79ac81a6c2c76b30755fff4dd1d6a2bdfb485 100644
--- a/webservice/README.md
+++ b/webservice/README.md
@@ -2,7 +2,7 @@ Offline Calibration Webservice
 ==============================
 
 The offline calibration webservice interacts with the Metadata Catalogue (MDC),
-such that migration of data to the offline cluster automatically triggers 
+such that migration of data to the offline cluster automatically triggers
 calibration jobs on relevant files.
 
 Installation
@@ -18,7 +18,7 @@ The service needs to be installed under a functional user account which
 * has write permission to the *proc* folders for outputting corrected data
 * is allowed to launch SLURM jobs on the cluster
 
-The hosting system needs to be accessible via ZMQ calls from the MDC. 
+The hosting system needs to be accessible via ZMQ calls from the MDC.
 This requires appropriate DMZ settings. Additionally, it needs to be able
 to interact with the MDC via the MDC client interface
 
@@ -32,10 +32,10 @@ Additionally, the *xfel-calibrate* environment needs to be installed:
    ``` bash
    git clone https://git.xfel.eu/gitlab/detectors/pycalibration.git .
    ```
-   
+
 2. pick the python environment to install into. On Maxwell the anaconda/3
    environment will work:
-   
+
    ``` bash
    module load anaconda/3
    ```
@@ -48,7 +48,7 @@ Additionally, the *xfel-calibrate* environment needs to be installed:
 
 4. some correction notebooks require pyDetLib. It requires manual installation in
    a non-Karabo python environment
-   
+
    ``` bash
    mkdir pydetlib
    cd pydetlib
@@ -57,19 +57,19 @@ Additionally, the *xfel-calibrate* environment needs to be installed:
    pip install --user pycuda
    pip install --user ./lib/
    cd ..
-   
+
 5. install the separate requirements for the webservice:
 
    ``` bash
    cd webservice
    pip install --user -r requirements.txt
    ```
-   
+
 6. install the metadata_client library, according to instructions at
 
    https://git.xfel.eu/gitlab/ITDM/metadata_client
-   
-   
+
+
 You are now good to go.
 
 Configuration
@@ -84,7 +84,7 @@ In the **config-repo** section, the configuration repository needs to be configu
 config-repo:
     url: https://git.xfel.eu/gitlab/detectors/calibration_configurations.git
     local-path: /home/haufs/calibration_config/
-``` 
+```
 Here you should prepend the *url* entry with a gitlab access token, that provides access
 to the calibration_configurations repository.
 
@@ -108,9 +108,9 @@ In the **metadata-client** section, the client interface to the MDC is configure
 ``` YAML
 
 metadata-client:
-    user-id: 
-    user-secret: 
-    user-email: 
+    user-id:
+    user-secret:
+    user-email:
     metadata-web-app-url: 'https://in.xfel.eu/metadata'
     metadata-web-app-url: 'https://in.xfel.eu/metadata'
     token-url: 'https://in.xfel.eu/metadata/oauth/token'
@@ -153,5 +153,5 @@ Use
    ``` bash
    python webservice.py --help
    ```
-  
-to display a list of available options.
\ No newline at end of file
+
+to display a list of available options.
diff --git a/webservice/manual_launch.py b/webservice/manual_launch.py
index 2006d842941d4d3e326df627f2de614c5899cbff..4753bbdafc2797cfc259355c92b8c1bf9598acf8 100644
--- a/webservice/manual_launch.py
+++ b/webservice/manual_launch.py
@@ -24,5 +24,3 @@ stuff = [action, dark_run_id, sase, instrument, cycle, proposal, 'SPB_DET_AGIPD1
 socket.send(str(stuff).encode())
 resp = socket.recv_multipart()[0]
 print(resp.decode())
-
-
diff --git a/webservice/serve_overview.yaml b/webservice/serve_overview.yaml
index 14948daae12171a587e67b5643df5694393c5b95..dace52af08542e0bd6a1818bc5d24badba4aefef 100644
--- a/webservice/serve_overview.yaml
+++ b/webservice/serve_overview.yaml
@@ -19,9 +19,9 @@ shell-commands:
   cat-log: "cat web.log"
 
 run-candidates:
-    - "--run-high" 
+    - "--run-high"
     - "--run-med"
-    - "--run-low" 
+    - "--run-low"
     - "--run"
 
 server-config:
diff --git a/webservice/sqlite_view.py b/webservice/sqlite_view.py
index c1672721386a0a5024b12bc9f2a5fb3247b4d630..29670a19f3b02b710da98e713bf9f176e56809d4 100644
--- a/webservice/sqlite_view.py
+++ b/webservice/sqlite_view.py
@@ -24,4 +24,3 @@ for r in c.fetchall():
     rid, jobid, db_proposal, db_run, status, time, _, _ = r
     if db_proposal == proposal and db_run == run:
         print(r)
-        
diff --git a/webservice/templates/checkbox.html b/webservice/templates/checkbox.html
index 154128addd5ce626fed7589df1e37708469c963f..ea734cd7a2de793123a2251cff68a1989205425d 100644
--- a/webservice/templates/checkbox.html
+++ b/webservice/templates/checkbox.html
@@ -8,4 +8,4 @@
 {% for run_name in runs %}
     <label >{{run_name}}:</label>
     <input type="number" id="run{{loop.index}}" name="{{run_name}}" min="1" max="999999" size="4">
-{% endfor %}
\ No newline at end of file
+{% endfor %}
diff --git a/webservice/templates/log_output.html b/webservice/templates/log_output.html
index b5479747c5873a02673383c7c3c6be535b4eb3ec..8d70880e92045c981e7d8768a7bd21122a3b1f46 100644
--- a/webservice/templates/log_output.html
+++ b/webservice/templates/log_output.html
@@ -3,4 +3,4 @@
    <div class="log-out">
    {{ logout }}
    </div>
-</div>
\ No newline at end of file
+</div>
diff --git a/webservice/update_mdc.py b/webservice/update_mdc.py
index fd8d09e895490b5bbd418c9824c7e34f51285f7a..52750549997d7b45e938678fa45e68088bfff8be 100644
--- a/webservice/update_mdc.py
+++ b/webservice/update_mdc.py
@@ -39,4 +39,3 @@ if response.status_code == 200:
     print('Run is updated')
 else:
     print(f'Update failed {response}')
-
diff --git a/xfel_calibrate/calibrate.py b/xfel_calibrate/calibrate.py
index 249556be9e61526bede5057ed3b1c330c104ee18..2924c5eef5a0b384740182c212fb06d3de7e13f6 100755
--- a/xfel_calibrate/calibrate.py
+++ b/xfel_calibrate/calibrate.py
@@ -262,7 +262,7 @@ def balance_sequences(in_folder: str, run: int, sequences: List[int],
                       sequences_per_node: int, karabo_da: Union[list, str],
                       max_nodes: int = 8):
     """Return balance list of sequences to be executed on slurm nodes
-    Total list of sequences is splitted onto several nodes based on 
+    Total list of sequences is splitted onto several nodes based on
     sequences_per_node. If the number of the required nodes is more than
     the max_nodes, the number of sequences_per_node is adjusted.
 
@@ -303,7 +303,7 @@ def balance_sequences(in_folder: str, run: int, sequences: List[int],
         if len(seq_nums) == 0:
             raise ValueError(f"Selected sequences {sequences} are not "
                              f"available in {in_path}")
-    
+
     # Validate required nodes with max_nodes
     nsplits = len(seq_nums) // sequences_per_node
     if nsplits > max_nodes:
diff --git a/xfel_calibrate/notebooks.py b/xfel_calibrate/notebooks.py
index 5019736610c389a37ec238a1801cb83c62bb3a9b..c584b6657c00b97d3a34afdc6c7b94fb26ceddbf 100644
--- a/xfel_calibrate/notebooks.py
+++ b/xfel_calibrate/notebooks.py
@@ -45,7 +45,7 @@ notebooks = {
                             "cluster cores": 8},
         },
         "FF_HISTS": {
-            "notebook": 
+            "notebook":
                 "notebooks/AGIPD/AGIPD_FF_Histogramming.ipynb",
             "concurrency": {"parameter": "modules",
                             "default concurrency": list(range(16)),