diff --git a/cal_tools/cal_tools/agipdlib.py b/cal_tools/cal_tools/agipdlib.py
index 9ee105b9993396873817c928bfa5ef12ee154f7a..7509813830d60edd8e646fac6634ae04b09c02e8 100644
--- a/cal_tools/cal_tools/agipdlib.py
+++ b/cal_tools/cal_tools/agipdlib.py
@@ -251,6 +251,10 @@ class AgipdCorrections:
                          self.corr_bools.get('blc_stripes'),
                          self.corr_bools.get('melt_snow')]
 
+        self.blc_bools = [self.corr_bools.get('blc_noise'),
+                          self.corr_bools.get('blc_hmatch'),
+                          self.corr_bools.get('blc_stripes')]
+
     def read_file(self, i_proc, file_name):
         """
         Read file with raw data to shared memory
@@ -433,39 +437,37 @@ class AgipdCorrections:
         n_img = self.shared_dict[i_proc]['nImg'][0]
         data = self.shared_dict[i_proc]['data'][:n_img]
         cellid = self.shared_dict[i_proc]['cellId'][:n_img]
-        mask = self.mask[module_idx]  # shape of n_cells, x, y
+        mask_std = self.mask[module_idx]  # shape of n_cells, x, y
 
         for c in cells:
             std = np.nanstd(data[cellid == c, ...], axis=0)
-            mask[:, c, std == 0] |= BadPixels.DATA_STD_IS_ZERO.value
+            mask_std[:, c, std == 0] |= BadPixels.DATA_STD_IS_ZERO.value
 
-    def correct_agipd(self, i_proc, first, last):
+    def offset_correction(self, i_proc: int, first: int, last: int):
         """
-        Perform image-wise correction of data in shared memory
+        Perform image-wise offset correction for data in shared memory
 
         :param first: Index of the first image to be corrected
         :param last: Index of the last image to be corrected
         :param i_proc: Index of shared memory array to process
         """
-
         module_idx = self.shared_dict[i_proc]['moduleIdx'][0]
         data = self.shared_dict[i_proc]['data'][first:last]
-        bl_shift = self.shared_dict[i_proc]['blShift'][first:last]
         rawgain = self.shared_dict[i_proc]['rawgain'][first:last]
         gain = self.shared_dict[i_proc]['gain'][first:last]
         cellid = self.shared_dict[i_proc]['cellId'][first:last]
-        mask = self.shared_dict[i_proc]['mask'][first:last]
 
         # first evaluate the gain into 0, 1, 2 --> high, medium, low
         t0 = self.thresholds[module_idx][0]
         t1 = self.thresholds[module_idx][1]
 
-        rgain = None
-        raw_data = None
+        # load raw_data and rgain to be used during gain_correction,
+        # if requested
         if self.corr_bools.get('melt_snow'):
-            rgain = rawgain / t0[cellid, ...]
-            raw_data = np.copy(data)
-
+            self.shared_dict[i_proc]['t0_rgain'][first:last] = \
+                rawgain / t0[cellid, ...]
+            self.shared_dict[i_proc]['raw_data'][first:last] = np.copy(data)
+    
         # Often most pixels are in high-gain, so it's more efficient to
         # set the whole output block to zero than select the right pixels.
         gain[:] = 0
@@ -489,79 +491,121 @@ class AgipdCorrections:
         off = calgs.gain_choose(gain, offsetb)
         del offsetb
 
-        tmask = self.mask[module_idx][:, cellid]
-        msk = calgs.gain_choose_int(gain, tmask)
-        del tmask
-
-        # same for relative gain and then bad pixel mask
-        if hasattr(self, "rel_gain"):
-            # get the correct rel_gain depending on cell-id
-            rc = self.rel_gain[module_idx][:, cellid]
-            rel_cor = calgs.gain_choose(gain, rc)
-            del rc
-
         # subtract offset
         data -= off
         del off
 
+    def baseline_correction(self, i_proc:int, first:int, last:int):
+        """
+        Perform image-wise base-line shift correction for 
+        data in shared memory via histogram or stripe
+
+        :param first: Index of the first image to be corrected
+        :param last: Index of the last image to be corrected
+        :param i_proc: Index of shared memory array to process
+        """
+
         # before doing relative gain correction we need to evaluate any
         # baseline shifts
         # as they are effectively and additional offset in the data
-        if (self.corr_bools.get('blc_noise') or
-                self.corr_bools.get('blc_hmatch') or
-                self.corr_bools.get('blc_stripes')):
-
-            # do this image wise, as the shift is per image
-            for i in range(data.shape[0]):
-
-                # first correction requested may be to evaluate shift via
-                # noise peak
-                if self.corr_bools.get('blc_noise'):
-                    mn_noise = np.nanmean(self.noise[module_idx][0, cellid[i]])  #noqa
-                    dd, sh = baseline_correct_via_noise(data[i],
-                                                        mn_noise,
-                                                        gain[i],
-                                                        self.baseline_corr_noise_threshold)  #noqa
-                # if not we continue with initial data
-                else:
-                    dd = data[i]
-                    sh = 0
-
-                # if we have enough pixels in medium or low gain and
-                # correction via hist matching is requested to this now
-                gcrit = np.count_nonzero(gain[i] > 0) > 1000
-                if (gcrit and self.corr_bools.get('blc_hmatch') and
-                        hasattr(self, "rel_gain")):
-                    dd2, sh2 = correct_baseline_via_hist(data[i],
-                                                         rel_cor[i],
-                                                         gain[i])
-                    data[i] = np.maximum(dd, dd2)
-                    sh = np.minimum(sh, sh2)
-                    # finally correct diagonal effects if requested
-                    if self.corr_bools.get('corr_asic_diag'):
-                        ii = data[i, ...]
-                        gg = gain[i, ...]
-                        adim = correct_baseline_via_hist_asic(ii, gg)
-                        data[i, ...] = adim
-                # if there is not enough medium or low gain data to do an
-                # evaluation, do nothing
-                else:
-                    data[i, ...] = dd
-                    bl_shift[i] = sh
-
-                if self.corr_bools.get('blc_stripes'):
-                    fmh = self.frac_high_med[module_idx][cellid[i]]
-                    dd, sh = baseline_correct_via_stripe(data[i, ...],
-                                                         gain[i, ...],
-                                                         msk[i, ...],
-                                                         fmh)
-                    data[i, ...] = dd
-                    bl_shift[i] = sh
+        if not any(self.blc_bools):
+            return
+
+        module_idx = self.shared_dict[i_proc]['moduleIdx'][0]
+        data = self.shared_dict[i_proc]['data'][first:last]
+        bl_shift = self.shared_dict[i_proc]['blShift'][first:last]
+        gain = self.shared_dict[i_proc]['gain'][first:last]
+        cellid = self.shared_dict[i_proc]['cellId'][first:last]
+        # output is saved in sharedmem to pass for correct_agipd()
+        # as this function takes about 3 seconds.
+        self.shared_dict[i_proc]['msk'][first:last] = \
+                            calgs.gain_choose_int(gain,
+                                                  self.mask[module_idx][:, cellid])  # noqa
+
+        if hasattr(self, "rel_gain"):
+            # Get the correct rel_gain depending on cell-id
+            self.shared_dict[i_proc]['rel_corr'][first:last] = \
+                                calgs.gain_choose(gain,
+                                                  self.rel_gain[module_idx][:, cellid])  # noqa
+
+        # do this image wise, as the shift is per image
+        for i in range(data.shape[0]):
+
+            # first correction requested may be to evaluate shift via
+            # noise peak
+            if self.corr_bools.get('blc_noise'):
+                mn_noise = np.nanmean(self.noise[module_idx][0, cellid[i]])
+                dd, sh = baseline_correct_via_noise(data[i], mn_noise,
+                                                    gain[i],
+                                                    self.baseline_corr_noise_threshold)  # noqa
+            # if not we continue with initial data
+            else:
+                dd = data[i]
+                sh = 0
+
+            # if we have enough pixels in medium or low gain and
+            # correction via hist matching is requested to this now
+            gcrit = np.count_nonzero(gain[i] > 0) > 1000
+            if (gcrit and self.corr_bools.get('blc_hmatch') and
+                    hasattr(self, "rel_gain")):
+                dd2, sh2 = correct_baseline_via_hist(data[i],
+                                                     self.shared_dict[i_proc]['rel_corr'][first:last][i],  # noqa
+                                                     gain[i])
+                data[i] = np.maximum(dd, dd2)
+                sh = np.minimum(sh, sh2)
+                # finally correct diagonal effects if requested
+                if self.corr_bools.get('corr_asic_diag'):
+                    ii = data[i, ...]
+                    gg = gain[i, ...]
+                    adim = correct_baseline_via_hist_asic(ii, gg)
+                    data[i, ...] = adim
+            # if there is not enough medium or low gain data to do an
+            # evaluation, do nothing
+            else:
+                data[i, ...] = dd
+                bl_shift[i] = sh
+
+            if self.corr_bools.get('blc_stripes'):
+                fmh = self.frac_high_med[module_idx][cellid[i]]
+                dd, sh = baseline_correct_via_stripe(data[i, ...],
+                                                     gain[i, ...],
+                                                     self.shared_dict[i_proc]['msk'][first:last][i, ...],  # noqa
+                                                     fmh)
+                data[i, ...] = dd
+                bl_shift[i] = sh
+
+    def gain_correction(self, i_proc: int, first: int, last: int):
+        """
+        Perform several image-wise corrections for data in shared memory
+        e.g. Relative gain, FlatField xray correction, .....
+
+        :param first: Index of the first image to be corrected
+        :param last: Index of the last image to be corrected
+        :param i_proc: Index of shared memory array to process
+        """
+        module_idx = self.shared_dict[i_proc]['moduleIdx'][0]
+        data = self.shared_dict[i_proc]['data'][first:last]
+        gain = self.shared_dict[i_proc]['gain'][first:last]
+        cellid = self.shared_dict[i_proc]['cellId'][first:last]
+        mask = self.shared_dict[i_proc]['mask'][first:last]
+        rel_corr = self.shared_dict[i_proc]['rel_corr'][first:last]
+        msk = self.shared_dict[i_proc]['msk'][first:last]
+        # if baseline correction was not requested
+        # msk and rel_corr will still be empty shared_mem arrays
+        if not any(self.blc_bools):
+            msk = calgs.gain_choose_int(gain,
+                                        self.mask[module_idx][:, cellid])
+
+            # same for relative gain and then bad pixel mask
+            if hasattr(self, "rel_gain"):
+                # Get the correct rel_gain depending on cell-id
+                rel_corr = calgs.gain_choose(gain,
+                                             self.rel_gain[module_idx][:, cellid])  # noqa
 
         # Correct for relative gain
         if self.corr_bools.get("rel_gain") and hasattr(self, "rel_gain"):
-            data *= rel_cor
-            del rel_cor
+            data *= rel_corr
+            del rel_corr
 
         # Adjust medium gain baseline to match highest high gain value
         if self.corr_bools.get("adjust_mg_baseline"):
@@ -582,11 +626,13 @@ class AgipdCorrections:
         if self.corr_bools.get("xray_corr"):
             data /= self.xray_cor[module_idx]
 
+        # use sharedmem raw_data and t0_rgain
+        # after calculating it while offset correcting.
         if self.corr_bools.get('melt_snow'):
-            melt_snowy_pixels(raw_data, data, gain, rgain,
-                              self.snow_resolution)
-            del raw_data
-            del rgain
+            _ = melt_snowy_pixels(self.shared_dict[i_proc]['raw_data'][first:last],  # noqa
+                                        data, gain,
+                                        self.shared_dict[i_proc]['t0_rgain'][first:last],  # noqa
+                                        self.snow_resolution)
 
         # Inner ASIC borders are matched to the same signal level
         if self.corr_bools.get("match_asics"):
@@ -608,9 +654,13 @@ class AgipdCorrections:
             del bidx
 
         # Mask entire ADC if they are noise above a threshold
+        # TODO: Needs clarification if needed,
+        # the returned arg is not used.
         if self.corr_bools.get("mask_noisy_adc"):
-            make_noisy_adc_mask(msk, self.noisy_adc_threshold)
+            _ = make_noisy_adc_mask(msk,
+                                    self.noisy_adc_threshold)
 
+        # Copy the data across into the existing shared-memory array
         mask[...] = msk[...]
 
     def get_valid_image_idx(self, idx_base, infile, index_v=2):
@@ -935,7 +985,8 @@ class AgipdCorrections:
 
         # add additional bad pixel information
         if any(self.pc_bools):
-            bppc = np.moveaxis(cons_data["BadPixelsPC"].astype(np.uint32), 0, 2)  # noqa
+            bppc = np.moveaxis(cons_data["BadPixelsPC"].astype(np.uint32),
+                               0, 2)
             bpixels |= bppc[..., :bpixels.shape[2], None]
 
             slopesPC = cons_data["SlopesPC"].astype(np.float32)
@@ -1145,7 +1196,8 @@ class AgipdCorrections:
 
     def allocate_images(self, shape, n_cores_files):
         """
-        Allocate memory for image data
+        Allocate memory for image data and variable shared
+        between correction functions
 
         :param shape: Shape of expected data (nImg, x, y)
         :param n_cores_files: Number of files, handled in parallel
@@ -1164,7 +1216,16 @@ class AgipdCorrections:
             self.shared_dict[i]['nImg'] = sharedmem.empty(1, dtype='i4')
             self.shared_dict[i]['mask'] = sharedmem.empty(shape, dtype='u4')
             self.shared_dict[i]['data'] = sharedmem.empty(shape, dtype='f4')
-            self.shared_dict[i]['rawgain'] = sharedmem.empty(shape, dtype='u2')
+            self.shared_dict[i]['rawgain'] = sharedmem.empty(shape,
+                                                             dtype='u2')
             self.shared_dict[i]['gain'] = sharedmem.empty(shape, dtype='u1')
             self.shared_dict[i]['blShift'] = sharedmem.empty(shape[0],
                                                              dtype='f4')
+            # Parameters shared between image-wise correction functions
+            self.shared_dict[i]['msk'] = sharedmem.empty(shape, dtype='i4')
+            self.shared_dict[i]['raw_data'] = sharedmem.empty(shape,
+                                                              dtype='f4')
+            self.shared_dict[i]['rel_corr'] = sharedmem.empty(shape,
+                                                              dtype='f4')
+            self.shared_dict[i]['t0_rgain'] = sharedmem.empty(shape,
+                                                              dtype='u2')
diff --git a/notebooks/AGIPD/AGIPD_Correct_and_Verify.ipynb b/notebooks/AGIPD/AGIPD_Correct_and_Verify.ipynb
index e8e5e52265eca66111d76c8bd63109f0a2bc1c5e..2990da965082f29c46a4233c9febd3f9880755d8 100644
--- a/notebooks/AGIPD/AGIPD_Correct_and_Verify.ipynb
+++ b/notebooks/AGIPD/AGIPD_Correct_and_Verify.ipynb
@@ -358,9 +358,9 @@
     "        try:\n",
     "            gain_setting = get_gain_setting(control_fname, h5path_ctrl)\n",
     "        except Exception as e:\n",
-    "            print(f'Error while reading gain setting from: \\n{control_fname}')\n",
+    "            print(f'ERROR: while reading gain setting from: \\n{control_fname}')\n",
     "            print(e)\n",
-    "            print(\"Set gain settion to 0\")\n",
+    "            print(\"Set gain setting to 0\")\n",
     "            gain_setting = 0\n",
     "            "
    ]
@@ -516,24 +516,34 @@
     "        step_timer.start()\n",
     "        \n",
     "        img_counts = pool.starmap(agipd_corr.read_file, enumerate(file_batch))\n",
-    "        step_timer.done_step('Load')\n",
+    "        step_timer.done_step('Loading data from files')\n",
     "        \n",
     "        # Evaluate zero-data-std mask\n",
     "        pool.starmap(agipd_corr.mask_zero_std, itertools.product(\n",
     "            range(len(file_batch)), np.array_split(np.arange(agipd_corr.max_cells), n_cores_correct)\n",
     "        ))\n",
     "        step_timer.done_step('Mask 0 std')\n",
+    "\n",
+    "        # Perform offset image-wise correction\n",
+    "        pool.starmap(agipd_corr.offset_correction, imagewise_chunks(img_counts))\n",
+    "        step_timer.done_step(\"Offset correction\")\n",
     "        \n",
-    "        # Perform image-wise correction\n",
-    "        pool.starmap(agipd_corr.correct_agipd, imagewise_chunks(img_counts))\n",
-    "        step_timer.done_step(\"Image-wise correction\")\n",
     "        \n",
-    "        # Perform cross-file correction parallel over asics\n",
-    "        pool.starmap(agipd_corr.cm_correction, itertools.product(\n",
-    "            range(len(file_batch)), range(16)  # 16 ASICs per module\n",
-    "        ))\n",
-    "        step_timer.done_step(\"Common-mode correction\")\n",
+    "        if blc_noise or blc_stripes or blc_hmatch:\n",
+    "            # Perform image-wise correction\n",
+    "            pool.starmap(agipd_corr.baseline_correction, imagewise_chunks(img_counts))\n",
+    "            step_timer.done_step(\"Base-line shift correction\")\n",
     "        \n",
+    "        if common_mode:\n",
+    "            # Perform cross-file correction parallel over asics\n",
+    "            pool.starmap(agipd_corr.cm_correction, itertools.product(\n",
+    "                range(len(file_batch)), range(16)  # 16 ASICs per module\n",
+    "            ))\n",
+    "            step_timer.done_step(\"Common-mode correction\")\n",
+    "        \n",
+    "        # Perform image-wise correction\n",
+    "        pool.starmap(agipd_corr.gain_correction, imagewise_chunks(img_counts))\n",
+    "        step_timer.done_step(\"Image-wise correction\")\n",
     "        \n",
     "        # Save corrected data\n",
     "        pool.starmap(agipd_corr.write_file, [\n",
diff --git a/notebooks/AGIPD/AGIPD_Retrieve_Constants_Precorrection.ipynb b/notebooks/AGIPD/AGIPD_Retrieve_Constants_Precorrection.ipynb
index 87bb034efbd1cb504fb6cf6a4b407dd38a168d2d..e19a03b99b81b8394b893ed27e44288fabe9e932 100644
--- a/notebooks/AGIPD/AGIPD_Retrieve_Constants_Precorrection.ipynb
+++ b/notebooks/AGIPD/AGIPD_Retrieve_Constants_Precorrection.ipynb
@@ -167,8 +167,11 @@
     "        try:\n",
     "            gain_setting = get_gain_setting(control_fname, h5path_ctrl)\n",
     "        except Exception as e:\n",
-    "            print(f'Error while reading gain setting: {e}\\n')\n",
-    "            \n",
+    "            print(f'ERROR: while reading gain setting from: \\n{control_fname}')\n",
+    "            print(e)\n",
+    "            print(\"Set gain setting to 0\")\n",
+    "            gain_setting = 0\n",
+    "\n",
     "print(f\"Gain setting: {gain_setting}\")\n",
     "print(f\"Detector in use is {karabo_id}\")\n",
     "\n",
diff --git a/requirements.txt b/requirements.txt
index 039431d2c1091b5c0f5908c09601473bdebc43c3..7b0af0733e86e8c349a36a14fe291d1a209b482a 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -26,6 +26,7 @@ metadata_client == 3.0.5
 nbclient == 0.5.1
 nbconvert == 5.6.1
 nbformat == 5.0.7
+notebook == 6.1.5 
 numpy == 1.19.1
 prettytable == 0.7.2
 princess == 0.2