diff --git a/bin/slurm_calibrate.sh b/bin/slurm_calibrate.sh
index 700f21c8939dfae81f99da12df718c56726ccbc1..2d8dfccbf3a596ab23c8fa329b14c05bd224f88f 100755
--- a/bin/slurm_calibrate.sh
+++ b/bin/slurm_calibrate.sh
@@ -21,7 +21,7 @@ echo "notebook: $notebook"
 echo "detector: $detector"
 echo "caltype: $caltype"
 echo "cluster_cores: $cluster_cores"
-echo "job ID: $SLURM_JOB_ID"
+echo "job ID: ${SLURM_JOB_ID:-none}"
 
 export CAL_NOTEBOOK_NAME="$notebook"
 
diff --git a/bin/slurm_finalize.sh b/bin/slurm_finalize.sh
old mode 100644
new mode 100755
index c0f632dd8c7603ad5c87d1cf04f4e1dc469601b2..9eaf9a73581d5861f3fba2005324de0542e1a4f0
--- a/bin/slurm_finalize.sh
+++ b/bin/slurm_finalize.sh
@@ -11,7 +11,7 @@ echo "Running with the following parameters:"
 echo "Python path: $python_path"
 echo "Correction temp dir: $temp_dir"
 echo "finalize script: $finalize_script"
-echo "job ID: $SLURM_JOB_ID"
+echo "job ID: ${SLURM_JOB_ID:-none}"
 
 # set-up enviroment
 source /etc/profile.d/modules.sh
diff --git a/notebooks/AGIPD/Characterize_AGIPD_Gain_FlatFields_NBC.ipynb b/notebooks/AGIPD/Characterize_AGIPD_Gain_FlatFields_NBC.ipynb
index 9d09b56d44110704d3a9f84a058c04705392738e..5ce10fa6eb7bede821ba34db28d321cd4fee3cac 100644
--- a/notebooks/AGIPD/Characterize_AGIPD_Gain_FlatFields_NBC.ipynb
+++ b/notebooks/AGIPD/Characterize_AGIPD_Gain_FlatFields_NBC.ipynb
@@ -92,6 +92,7 @@
     "import XFELDetAna.xfelpyanatools as xana\n",
     "from cal_tools.agipdlib import get_bias_voltage\n",
     "from cal_tools.agipdutils_ff import (\n",
+    "    BadPixelsFF,\n",
     "    any_in,\n",
     "    fit_n_peaks,\n",
     "    gaussian,\n",
@@ -101,7 +102,6 @@
     "    set_par_limits,\n",
     ")\n",
     "from cal_tools.ana_tools import get_range, save_dict_to_hdf5\n",
-    "from cal_tools.enums import BadPixelsFF\n",
     "from iminuit import Minuit\n",
     "from XFELDetAna.plotting.heatmap import heatmapPlot\n",
     "from XFELDetAna.plotting.simpleplot import simplePlot\n",
diff --git a/notebooks/AGIPD/Characterize_AGIPD_Gain_FlatFields_Summary.ipynb b/notebooks/AGIPD/Characterize_AGIPD_Gain_FlatFields_Summary.ipynb
index 1754a260c6e47b86611b8b1f5d6ec2a759b1df6c..021e16ec615b20c9ecf79b9570d87bcd0387f2c0 100644
--- a/notebooks/AGIPD/Characterize_AGIPD_Gain_FlatFields_Summary.ipynb
+++ b/notebooks/AGIPD/Characterize_AGIPD_Gain_FlatFields_Summary.ipynb
@@ -88,13 +88,14 @@
     "    get_num_cells,\n",
     ")\n",
     "from cal_tools.agipdutils_ff import (\n",
+    "    BadPixelsFF,\n",
     "    any_in,\n",
     "    fit_n_peaks,\n",
     "    gaussian_sum,\n",
     "    get_starting_parameters,\n",
     ")\n",
     "from cal_tools.ana_tools import get_range, save_dict_to_hdf5\n",
-    "from cal_tools.enums import BadPixels, BadPixelsFF\n",
+    "from cal_tools.enums import BadPixels\n",
     "from cal_tools.tools import (\n",
     "    get_dir_creation_date,\n",
     "    get_pdu_from_db,\n",
diff --git a/src/cal_tools/agipdutils.py b/src/cal_tools/agipdutils.py
index 280f07324f8076855a5e3b76eb59a38f2c5c9ab7..a5d7cb628c3557aa75d6352157fb5592e6a0d9ae 100644
--- a/src/cal_tools/agipdutils.py
+++ b/src/cal_tools/agipdutils.py
@@ -135,6 +135,16 @@ def baseline_correct_via_stripe(d, g, m, frac_high_med):
     :param m: bad pixel mask
     """
 
+    # Restrict the mask to those bad pixels obtained from darks.
+    # This has ben introduced since the BadPixelsFF constants can add
+    # additional masking that causes the baseline correction to fail due
+    # to the abort conditions len(idx) < 3 below.
+    # (see calibration/planning#96)
+    m = m & (BadPixels.OFFSET_OUT_OF_THRESHOLD |
+             BadPixels.NOISE_OUT_OF_THRESHOLD |
+             BadPixels.OFFSET_NOISE_EVAL_ERROR |
+             BadPixels.NO_DARK_DATA)
+
     dd = copy.copy(d)
     dd[g != 0] = np.nan  # only high gain data
     dd[m != 0] = np.nan  # only good pixels
diff --git a/src/cal_tools/agipdutils_ff.py b/src/cal_tools/agipdutils_ff.py
index ff57587d7f55c60dd59710fe78a156d6507343c3..7295e029e0dad75f16e9c73a1a177a0de7a2cfe5 100644
--- a/src/cal_tools/agipdutils_ff.py
+++ b/src/cal_tools/agipdutils_ff.py
@@ -1,9 +1,27 @@
+from enum import IntFlag
 from typing import Any, Dict, List, Optional, Tuple
 
 import numpy as np
 from iminuit import Minuit
 
-from cal_tools.enums import BadPixelsFF
+
+class BadPixelsFF(IntFlag):
+    """ The SlopesFF Bad Pixel Encoding
+
+    This is only used internally in the AGIPD FF notebooks and must
+    always be converted to corresponding values of BadPixels before
+    saving to file.
+    """
+
+    FIT_FAILED               = 0b000000000000000000001 # bit 1
+    CHI2_THRESHOLD           = 0b000000000000000000010 # bit 2
+    NOISE_PEAK_THRESHOLD     = 0b000000000000000000100 # bit 3
+    GAIN_THRESHOLD           = 0b000000000000000001000 # bit 4
+    PEAK_WIDTH_THRESHOLD     = 0b000000000000000010000 # bit 5
+    ACCURATE_COVAR           = 0b000000000000000100000 # bit 6
+    BAD_DARK                 = 0b000000000000001000000 # bit 6
+    NO_ENTRY                 = 0b000000000000010000000 # bit 7
+    GAIN_DEVIATION           = 0b000000000000100000000 # bit 8
 
 
 def any_in(mask: np.ndarray, bits: int) -> bool:
diff --git a/src/cal_tools/enums.py b/src/cal_tools/enums.py
index 21d4b4f08eee3d8c95c2fbb09fe42dc177d1bff4..e02d59994925aa5530d265ff944839fb160c9613 100644
--- a/src/cal_tools/enums.py
+++ b/src/cal_tools/enums.py
@@ -28,21 +28,6 @@ class BadPixels(IntFlag):
     NON_LIN_RESPONSE_REGION  = 0b100000000000000000000 # bit 21
 
 
-class BadPixelsFF(IntFlag):
-    """ The SLopesFF Bad Pixel Encoding
-    """
-
-    FIT_FAILED               = 0b000000000000000000001 # bit 1
-    CHI2_THRESHOLD           = 0b000000000000000000010 # bit 2
-    NOISE_PEAK_THRESHOLD     = 0b000000000000000000100 # bit 3
-    GAIN_THRESHOLD           = 0b000000000000000001000 # bit 4
-    PEAK_WIDTH_THRESHOLD     = 0b000000000000000010000 # bit 5
-    ACCURATE_COVAR           = 0b000000000000000100000 # bit 6
-    BAD_DARK                 = 0b000000000000001000000 # bit 6
-    NO_ENTRY                 = 0b000000000000010000000 # bit 7
-    GAIN_DEVIATION           = 0b000000000000100000000 # bit 8
-
-
 class SnowResolution(Enum):
     """ An Enum specifying how to resolve snowy pixels
     """
diff --git a/src/xfel_calibrate/calibrate.py b/src/xfel_calibrate/calibrate.py
index 2dc6ffb2404b6c2fd662dcc1f45e6f77eaa328db..e83c60a41de3796751e0b7fd7873cfd650f1468e 100755
--- a/src/xfel_calibrate/calibrate.py
+++ b/src/xfel_calibrate/calibrate.py
@@ -644,7 +644,7 @@ def run_finalize(fmt_args, temp_path, job_list, sequential=False):
         finalize_script,
     ]
 
-    output = check_output(cmd).decode('utf8')
+    output = check_output(cmd, input=b'').decode('utf8')
     jobid = None
     if not sequential:
         jobid = output.partition(';')[0].strip()
@@ -778,7 +778,7 @@ def prepare_job(
     nbformat.write(new_nb, nbpath)
 
     return JobArgs([
-        "pycalib-run-nb.sh",
+        "./pycalib-run-nb.sh",  # ./ allows this to run directly
         new_name,
         "{python}",
         cluster_profile,
diff --git a/src/xfel_calibrate/finalize.py b/src/xfel_calibrate/finalize.py
index 30d3b8b9ff55edeedfaf13eb540a5660ffc42809..3683063011ea05753aad60c7cd893c969c502831 100644
--- a/src/xfel_calibrate/finalize.py
+++ b/src/xfel_calibrate/finalize.py
@@ -141,6 +141,8 @@ def get_job_info(jobs: List[str], fmt: List[str]) -> List[List[str]]:
     Result ordered according to order of jobs given
     Order of fields in inner lists follows fmt
     """
+    if not jobs:
+        return []  # Skip calling sacct if not using Slurm
 
     # will use JobID to match results to jobs (duplicate field in fmt is OK)
     fmt_query = ",".join(["JobID"] + fmt)
@@ -180,6 +182,9 @@ def make_timing_summary(run_path: Path, job_times: List[List[str]],
                         {{ line }}
                         {%- endfor %}
 
+                    ''')
+
+    job_tbl_tmpl = Template('''
                     .. math::
                         {% for line in job_table %}
                         {{ line }}
@@ -194,13 +199,15 @@ def make_timing_summary(run_path: Path, job_times: List[List[str]],
     ]
 
     with (run_path / "timing_summary.rst").open("w+") as fd:
-        job_table = tabulate.tabulate(job_times, tablefmt="latex",
-                                      headers=job_time_fmt)
         time_table = tabulate.tabulate(time_vals, tablefmt="latex",
                                        headers=["Processing step", "Timestamp"])
 
-        fd.write(dedent(tmpl.render(job_table=job_table.split("\n"),
-                                    time_table=time_table.split("\n"))))
+        fd.write(dedent(tmpl.render(time_table=time_table.split("\n"))))
+
+        if job_times:
+            job_table = tabulate.tabulate(job_times, tablefmt="latex",
+                                          headers=job_time_fmt)
+            fd.write(dedent(job_tbl_tmpl.render(job_table=job_table.split("\n"))))
 
 
 def make_report(run_path: Path, tmp_path: Path, project: str,
@@ -389,17 +396,6 @@ def tex_escape(text):
 def finalize(joblist, finaljob, run_path, out_path, version, report_to, data_path='Unknown',
              request_time='', submission_time=''):
     run_path = Path(run_path)
-    print("Waiting on jobs to finish: {}".format(joblist))
-    while True:
-        found_jobs = set()
-        output = check_output(['squeue']).decode('utf8')
-        for line in output.split("\n"):
-            for job in joblist:
-                if str(job) in line:
-                    found_jobs.add(job)
-        if not found_jobs:
-            break
-        sleep(10)
 
     prepare_plots(run_path)