diff --git a/notebooks/REMI/REMI_Digitize_and_Transform.ipynb b/notebooks/REMI/REMI_Digitize_and_Transform.ipynb
index 76e784362825db4e7ad53a34efe1726935ce3ddb..e17775c7101b04496092c7024e3cd40509c59a4b 100644
--- a/notebooks/REMI/REMI_Digitize_and_Transform.ipynb
+++ b/notebooks/REMI/REMI_Digitize_and_Transform.ipynb
@@ -46,6 +46,7 @@
     "ppt_source = 'SQS_RR_UTC/TSYS/TIMESERVER:outputBunchPattern'\n",
     "ignore_fel = False  # Ignore any FEL entries in the PPT.\n",
     "ignore_ppl = False  # Ignore any PPL entries in the PPT.\n",
+    "trailing_trigger = True  # Add a trigger after all regular pulses with the remaining trace.\n",
     "ppl_offset = 0  # In units of the PPT.\n",
     "laser_ppt_mask = -1  # Bit mask for used laser, negative to auto-detect from instrument. \n",
     "instrument_sase = 3  # Which SASE we're running at for PPT decoding.\n",
@@ -316,6 +317,10 @@
     "\n",
     "    # Fill any missing values with the highest.\n",
     "    pulse_counts[has_ppt == False] = pulse_counts.max()\n",
+    "    \n",
+    "    if trailing_trigger:\n",
+    "        # Add a single count to every train for trailing trigger.\n",
+    "        pulse_counts += 1\n",
     "\n",
     "    # Compute offsets based on pulse counts.\n",
     "    pulse_offsets = np.zeros_like(pulse_counts)\n",
@@ -375,98 +380,84 @@
     "\n",
     "clock_factor = remi['digitizer']['clock_factor']\n",
     "\n",
+    "min_trace_len = min([\n",
+    "    dc[src, key].entry_shape[0] for det_name in remi['detector'].keys()\n",
+    "    for src, key in remi.get_detector_sourcekeys(det_name)\n",
+    "])\n",
+    "\n",
     "def trigger_by_ppt(worker_id, index, train_id, ppt):\n",
     "    all_pos, fel_pos, ppl_pos = get_pulse_positions(ppt, instrument_sase, laser_ppt_mask, ppl_offset)\n",
     "    num_pulses = len(all_pos)\n",
     "    \n",
-    "    if num_pulses == 0:\n",
-    "        return\n",
-    "    elif len(ppl_pos) == 0 and ppl_offset < 0:\n",
-    "        # No PPL pulses, but a negative offset is configured. This will cause\n",
-    "        # first_pulse_offset to start early and most likely miss pulses at the\n",
-    "        # end, so we correct by adding the ppl_offset to relative positions\n",
-    "        # when computing trace positions.\n",
-    "        pos_corr = abs(ppl_offset)\n",
-    "    else:\n",
-    "        pos_corr = 0\n",
-    "        \n",
+    "    if num_pulses > 0:\n",
+    "        if len(ppl_pos) == 0 and ppl_offset < 0:\n",
+    "            # No PPL pulses, but a negative offset is configured. This will cause\n",
+    "            # first_pulse_offset to start early and most likely miss pulses at the\n",
+    "            # end, so we correct by adding the ppl_offset to relative positions\n",
+    "            # when computing trace positions.\n",
+    "            pos_corr = abs(ppl_offset)\n",
+    "        else:\n",
+    "            pos_corr = 0\n",
     "\n",
-    "    rel_pos = all_pos - all_pos[0]\n",
+    "        rel_pos = all_pos - all_pos[0]\n",
     "\n",
-    "    if num_pulses > 1:\n",
-    "        pulse_len = np.unique(rel_pos[1:] - rel_pos[:-1]).min()\n",
-    "    elif num_pulses == 1:\n",
-    "        pulse_len = single_pulse_length\n",
+    "        if num_pulses > 1:\n",
+    "            pulse_len = np.unique(rel_pos[1:] - rel_pos[:-1]).min()\n",
+    "        elif num_pulses == 1:\n",
+    "            pulse_len = single_pulse_length\n",
     "\n",
-    "    start_frac = first_pulse_offset + (rel_pos + pos_corr) * 2 * clock_factor\n",
-    "    start_int = start_frac.astype(int)\n",
+    "        start_frac = first_pulse_offset + (rel_pos + pos_corr) * 2 * clock_factor\n",
+    "        start_int = start_frac.astype(int)\n",
     "\n",
-    "    pulse_offset = pulse_offsets[index]\n",
-    "    pulse_count = pulse_counts[index]\n",
+    "        train_triggers = triggers[pulse_offsets[index]:int(pulse_offsets[index]+num_pulses)]\n",
+    "        train_triggers['start'] = start_int + pulse_start_offset\n",
+    "        train_triggers['stop'] = start_int + int(pulse_len * 2 * clock_factor) - 1 + pulse_end_offset\n",
+    "        train_triggers['offset'] = start_frac - start_int\n",
+    "        train_triggers['pulse'] = all_pos.astype(np.int16)\n",
+    "        train_triggers['fel'] = [pos in fel_pos for pos in all_pos]\n",
+    "        train_triggers['ppl'] = [pos in ppl_pos for pos in all_pos]\n",
     "        \n",
-    "    train_triggers = triggers[pulse_offset:pulse_offset+pulse_count]\n",
-    "    train_triggers['start'] = start_int + pulse_start_offset\n",
-    "    train_triggers['stop'] = start_int + int(pulse_len * 2 * clock_factor) - 1 + pulse_end_offset\n",
-    "    train_triggers['offset'] = start_frac - start_int\n",
-    "    train_triggers['pulse'] = all_pos.astype(np.int16)\n",
-    "    train_triggers['fel'] = [pos in fel_pos for pos in all_pos]\n",
-    "    train_triggers['ppl'] = [pos in ppl_pos for pos in all_pos]\n",
-    "\n",
-    "    \n",
-    "if ignore_fel and ignore_ppl:\n",
-    "    # Both FEL and PPL are ignored, use virtual full train triggers.\n",
-    "    print('WARNING: Both FEL and PPL pulses are ignored, '\n",
-    "          'virtual trigger is inserted covering the entire train')\n",
-    "    \n",
-    "    # Overwrite global pulse statistics computed before,\n",
-    "    num_pulses = len(dc.train_ids)\n",
-    "    triggers = np.empty(num_pulses, dtype=trigger_dt)\n",
-    "    \n",
-    "    pulse_counts[:] = 1\n",
-    "    pulse_counts = pulse_counts.astype(np.int32)\n",
-    "    pulse_offsets = np.arange(len(pulse_counts)).astype(np.int32)\n",
-    "\n",
-    "    # Obtain minimal trace length.\n",
-    "    min_trace_len = min([\n",
-    "        dc[src, key].entry_shape[0]\n",
-    "        for det_name in remi['detector'].keys()\n",
-    "        for src, key in remi.get_detector_sourcekeys(det_name)\n",
-    "    ])\n",
-    "\n",
-    "    triggers['start'] = first_pulse_offset\n",
-    "    triggers['stop'] = min_trace_len\n",
-    "    triggers['offset'] = 0.0\n",
-    "    triggers['pulse'] = -1\n",
-    "    triggers['fel'] = False\n",
-    "    triggers['ppl'] = False    \n",
-    "    \n",
-    "else:\n",
-    "    with timing('find_triggers'):\n",
-    "        psh.map(trigger_by_ppt, ppt_data)\n",
+    "        last_sample = train_triggers['stop'].max()\n",
+    "        \n",
+    "    else:\n",
+    "        last_sample = first_pulse_offset\n",
+    "        \n",
+    "    if trailing_trigger:\n",
+    "        # Add trailing trigger if required.\n",
+    "        trigger = triggers[int(pulse_offsets[index]+pulse_counts[index]-1)]\n",
+    "        trigger['start'] = last_sample\n",
+    "        trigger['stop'] = min_trace_len\n",
+    "        trigger['offset'] = 0.0\n",
+    "        trigger['pulse'] = -1\n",
+    "        trigger['fel'] = False\n",
+    "        trigger['ppl'] = False\n",
+    "\n",
+    "with timing('find_triggers'):\n",
+    "    psh.map(trigger_by_ppt, ppt_data)\n",
     "    \n",
-    "    if (np.unique(triggers['pulse'][1:] - triggers['pulse'][:-1]) > 0).sum() > 1:\n",
-    "        # There is more than one delta between pulse entries across all pulses. This is not\n",
-    "        # necessarily a problem, as the pattern could simply have changed in between trains\n",
-    "        # with each train being split properly.\n",
-    "        # If there's more than one delta in a single train, this likely points to a mismatch\n",
-    "        # of FEL and PPL repetition rate. This is most likely not intended.\n",
-    "\n",
-    "        one = np.uint64(1)  # Because np.uint64 + int = np.float64\n",
-    "        pulse_deltas = set()\n",
-    "\n",
-    "        for pulse_id, (offset, count) in enumerate(zip(pulse_offsets, pulse_counts)):\n",
-    "            deltas = triggers['pulse'][offset+one:offset+count] - triggers['pulse'][offset:offset+count-one]\n",
-    "\n",
-    "            if len(np.unique(deltas)) > 1:\n",
-    "                for delta in deltas:\n",
-    "                    pulse_deltas.add(delta)\n",
-    "\n",
-    "        if len(pulse_deltas) > 1:\n",
-    "            delta_str = ', '.join([str(x) for x in sorted(pulse_deltas)])\n",
-    "            warning(f'Different pulse lengths (PPT: {delta_str}) encountered within single trains, '\n",
-    "                    f'separated pulse spectra may split up signals!')\n",
-    "        else:\n",
-    "            warning('Different pulse lengths encountered across trains, separation may be unstable!')"
+    "if (np.unique(triggers['pulse'][1:] - triggers['pulse'][:-1]) > 0).sum() > 1:\n",
+    "    # There is more than one delta between pulse entries across all pulses. This is not\n",
+    "    # necessarily a problem, as the pattern could simply have changed in between trains\n",
+    "    # with each train being split properly.\n",
+    "    # If there's more than one delta in a single train, this likely points to a mismatch\n",
+    "    # of FEL and PPL repetition rate. This is most likely not intended.\n",
+    "\n",
+    "    one = np.uint64(1)  # Because np.uint64 + int = np.float64\n",
+    "    pulse_deltas = set()\n",
+    "\n",
+    "    for pulse_id, (offset, count) in enumerate(zip(\n",
+    "        pulse_offsets, pulse_counts - one if trailing_trigger else pulse_counts\n",
+    "    )):\n",
+    "        deltas = triggers['pulse'][offset+one:offset+count] - triggers['pulse'][offset:offset+count-one]\n",
+    "\n",
+    "        if len(np.unique(deltas)) > 1:\n",
+    "            for delta in deltas:\n",
+    "                pulse_deltas.add(delta)\n",
+    "\n",
+    "    if len(pulse_deltas) > 1:\n",
+    "        delta_str = ', '.join([str(x) for x in sorted(pulse_deltas)])\n",
+    "        warning(f'Different pulse lengths (PPT: {delta_str}) encountered within single trains, '\n",
+    "                f'separated pulse spectra may split up signals!')"
    ]
   },
   {