diff --git a/notebooks/ePix100/Correction_ePix100_NBC.ipynb b/notebooks/ePix100/Correction_ePix100_NBC.ipynb
index 30d0586b3039ddd3ddbefed3c7e6da98f1b27753..89fa139226537207b3b447893c3a26c57333ccb5 100644
--- a/notebooks/ePix100/Correction_ePix100_NBC.ipynb
+++ b/notebooks/ePix100/Correction_ePix100_NBC.ipynb
@@ -24,15 +24,15 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "in_folder = \"/gpfs/exfel/exp/MID/202330/p900329/raw\" # input folder, required\n",
+    "in_folder = \"/gpfs/exfel/exp/HED/202102/p002739/raw\" # input folder, required\n",
     "out_folder = \"\"  # output folder, required\n",
     "metadata_folder = \"\"  # Directory containing calibration_metadata.yml when run by xfel-calibrate\n",
     "sequences = [-1]  # sequences to correct, set to -1 for all, range allowed\n",
     "sequences_per_node = 1  # number of sequence files per cluster node if run as slurm job, set to 0 to not run SLURM parallel\n",
-    "run = 106  # which run to read data from, required\n",
+    "run = 38  # which run to read data from, required\n",
     "\n",
     "# Parameters for accessing the raw data.\n",
-    "karabo_id = \"MID_EXP_EPIX-1\"  # karabo karabo_id\n",
+    "karabo_id = \"HED_IA1_EPX100-1\"  # karabo karabo_id\n",
     "karabo_da = \"EPIX01\"  # data aggregators\n",
     "db_module = \"\"  # module id in the database\n",
     "receiver_template = \"RECEIVER\"  # detector receiver template for accessing raw data files\n",
@@ -628,8 +628,11 @@
     "            \"data.image.pixels\", data=data, chunks=dataset_chunk)\n",
     "        outp_source.create_key(\n",
     "            \"data.trainId\", data=seq_dc.train_ids, chunks=min(50, len(seq_dc.train_ids)))\n",
-    "        outp_source.create_key(\n",
-    "            \"data.pulseId\", data=list(seq_dc[instrument_src]['data.pulseId'].ndarray().squeeze()), chunks=min(50, len(seq_dc.train_ids)))\n",
+    "        \n",
+    "        if np.isin('data.pulseId', list(seq_dc[instrument_src].keys())): # some runs are missing 'data.pulseId'\n",
+    "            outp_source.create_key(\n",
+    "                \"data.pulseId\", data=list(seq_dc[instrument_src]['data.pulseId'].ndarray().squeeze()), chunks=min(50, len(seq_dc.train_ids)))\n",
+    "        \n",
     "        if pattern_classification:\n",
     "            # Add main corrected `data.image.pixels` dataset and store corrected data.\n",
     "            outp_source.create_key(\n",