diff --git a/notebooks/ePix100/Correction_ePix100_NBC.ipynb b/notebooks/ePix100/Correction_ePix100_NBC.ipynb
index 94cf8bcb1355d666287ea3bbcdbad2d3c8db63a3..03b9bbded634fbf2c8ff1f18914ac062c8cf07af 100644
--- a/notebooks/ePix100/Correction_ePix100_NBC.ipynb
+++ b/notebooks/ePix100/Correction_ePix100_NBC.ipynb
@@ -71,6 +71,7 @@
     "split_evt_secondary_threshold = 5.  # secondary threshold for split event correction\n",
     "split_evt_mip_threshold = 1000.  # minimum ionizing particle threshold\n",
     "\n",
+    "\n",
     "def balance_sequences(in_folder, run, sequences, sequences_per_node, karabo_da):\n",
     "    from xfel_calibrate.calibrate import balance_sequences as bs\n",
     "    return bs(in_folder, run, sequences, sequences_per_node, karabo_da)"
@@ -573,12 +574,8 @@
     "\n",
     "        d_clu, patterns = patternClassifier.classify(d)\n",
     "        d_clu[d_clu < (split_evt_primary_threshold*const_data[\"Noise\"])] = 0\n",
-    "\n",
-    "        d_sing = np.zeros(d_clu.shape)\n",
-    "        d_sing[patterns==100] = d_clu[patterns==100] # pattern 100 corresponds to single photons events\n",
     "        \n",
     "        data_clu[index, ...] = np.squeeze(d_clu)\n",
-    "        data_sing[index, ...] = np.squeeze(d_sing)\n",
     "        data_patterns[index, ...] = np.squeeze(patterns)\n",
     "\n",
     "        histCalCSCor.fill(d_clu)\n",
@@ -595,17 +592,17 @@
     "        if pattern_classification:\n",
     "            # Modify pattern classification.\n",
     "            d_clu = d_clu * gain_cnst\n",
-    "            d_sing = d_sing * gain_cnst\n",
     "            \n",
     "            if photon_energy > 0:\n",
     "                d_clu /= photon_energy\n",
-    "                d_sing /= photon_energy\n",
     "\n",
     "            data_clu[index, ...] = np.squeeze(d_clu)\n",
-    "            data_sing[index, ...] = np.squeeze(d_sing)\n",
-    "            \n",
+    "\n",
     "            histCalGainCorClusters.fill(d_clu)\n",
-    "            histCalGainCorSingles.fill(d_sing)\n",
+    "            \n",
+    "            d_sing = d_clu[patterns==100] # pattern 100 corresponds to single photons events\n",
+    "            if len(d_sing):\n",
+    "                histCalGainCorSingles.fill(d_sing)\n",
     "\n",
     "    data[index, ...] = np.squeeze(d)\n",
     "    histCalCor.fill(d)"
@@ -652,7 +649,6 @@
     "\n",
     "    if pattern_classification:\n",
     "        data_clu = context.alloc(shape=dshape, dtype=np.float32)\n",
-    "        data_sing = context.alloc(shape=dshape, dtype=np.float32)\n",
     "        data_patterns = context.alloc(shape=dshape, dtype=np.int32)\n",
     "\n",
     "    step_timer.start()\n",