diff --git a/src/toolbox_scs/detectors/digitizers.py b/src/toolbox_scs/detectors/digitizers.py
index 04c17f5005303cc53187e31950c4e1e081fcc028..7fe7c3cd2b43770231a83ea45631e8e58385fc74 100644
--- a/src/toolbox_scs/detectors/digitizers.py
+++ b/src/toolbox_scs/detectors/digitizers.py
@@ -143,7 +143,7 @@ def peaks_from_apd(array, params, digitizer, bpt, bunchPattern):
                     f'but the array length is only {array.sizes[arr_dim]}.')
         npulses_apd = array.sizes[arr_dim]
     mask = is_pulse_at(bpt, bunchPattern).rename({'pulse_slot': pulse_dim})
-    mask = mask.sel(trainId=array.trainId)
+    mask = mask.where(mask.trainId.isin(array.trainId), drop=True)
     mask = mask.assign_coords({pulse_dim: np.arange(bpt.sizes['pulse_slot'])})
     pid = np.sort(np.unique(np.where(mask)[1]))
     npulses_bpt = len(pid)
@@ -152,7 +152,8 @@ def peaks_from_apd(array, params, digitizer, bpt, bunchPattern):
     if len(np.intersect1d(apd_coords, pid, assume_unique=True)) < npulses_bpt:
         log.warning('Not all pulses were recorded. The digitizer '
                     'was set to record pulse ids '
-                    f'{apd_coords[apd_coords<2700]} but the bunch pattern for'
+                    f'{apd_coords[apd_coords<bpt.sizes["pulse_slot"]]} but the'
+                    'bunch pattern for'
                     f' {bunchPattern} is {pid}. Skipping pulse ID alignment.')
         noalign = True
     array = array.isel({arr_dim: slice(0, npulses_apd)})
@@ -285,7 +286,6 @@ def get_peaks(run,
 
     # 2. Use raw data from digitizer
     # minimum pulse period @ 4.5MHz, according to digitizer type
-    digitizer = digitizer_type(source=source)
     min_distance = 1
     if digitizer == 'FastADC':
         min_distance = 24
@@ -727,7 +727,7 @@ def check_peak_params(run, mnemonic, raw_trace=None, ntrains=200, params=None,
         log.warning('The digitizer did not record peak-integrated data.')
     if not plot:
         return params
-    digitizer = digitizer_type(mnemonic, run_mnemonics)
+    digitizer = digitizer_type(run, run_mnemonics[mnemonic]['source'].split(':')[0])
     min_distance = 24 if digitizer == "FastADC" else 440
     if 'bunchPatternTable' in run_mnemonics and bunchPattern != 'None':
         sel = run.select_trains(np.s_[:ntrains])
@@ -829,27 +829,34 @@ def plotPeakIntegrationWindow(raw_trace, params, bp_params, show_all=False):
     return fig, ax
 
 
-def digitizer_type(mnemonic=None, mnemo_dict=None, source=None):
-    if mnemonic is not None:
-        source = mnemo_dict[mnemonic]['source']
-    if ':channel' in source:
-        return 'FastADC'
-    if ':output' in source:
-        return 'FastADC'
-    if ':network' in source:
-        return 'ADQ412'
-    dic = {'XTD10_MCP': 'FastADC',
-           'FastADC': 'FastADC',
-           'PES': 'ADQ412',
-           'MCP': 'ADQ412'}
-    for k, v in dic.items():
-        if k in mnemonic:
-            return v
-    log.warning(f'Could not find digitizer type from mnemonic {mnemonic}.')
-    return 'ADQ412'
-
-
-def get_tim_peaks(run, mnemonics=None, merge_with=None,
+def digitizer_type(run, source):
+    """
+    Finds the digitizer type based on the class Id / name of the source.
+    Example source: 'SCS_UTC1_MCP/ADC/1'. Defaults to ADQ412 if not found.
+    """
+    ret  = None
+    if '_MCP/ADC/1' in source:
+        ret = 'FastADC'
+    if '_ADQ/ADC/1' in source:
+        ret = 'ADQ412'
+    if ret is None:
+        digi_dict = {'FastAdc': 'FastADC',
+                     'FastAdcLegacy': 'FastADC',
+                     'AdqDigitizer': 'ADQ412',
+                     'PyADCChannel': 'FastADC',
+                     'PyADCChannelLegacy': 'FastADC'
+                    }
+        try:
+            classId = run.get_run_value(source, 'classId.value')
+            ret = digi_dict.get(classId)
+        except Exception as e:
+            log.warning(str(e))
+            log.warning(f'Could not find digitizer type from source {source}.')
+            ret = 'ADQ412'
+    return ret
+
+
+def get_tim_peaks(run, mnemonic=None, merge_with=None,
                   bunchPattern='sase3', integParams=None,
                   keepAllSase=False):
     """
@@ -862,10 +869,8 @@ def get_tim_peaks(run, mnemonics=None, merge_with=None,
     ----------
     run: extra_data.DataCollection
         DataCollection containing the digitizer data.
-    mnemonics: str or list of str
-        mnemonics for TIM, e.g. "MCP2apd" or ["MCP2apd", "MCP3raw"].
-        If None, defaults to "MCP2apd" in case no merge_with dataset
-        is provided.
+    mnemonic: str
+        mnemonics for TIM, e.g. "MCP2apd".
     merge_with: xarray Dataset
         If provided, the resulting Dataset will be merged with this
         one. The TIM variables of merge_with (if any) will also be
@@ -885,16 +890,16 @@ def get_tim_peaks(run, mnemonics=None, merge_with=None,
 
     Returns
     -------
-    xarray Dataset with all TIM variables substituted by
+    xarray Dataset with TIM variables substituted by
     the peak caclulated values (e.g. "MCP2raw" becomes
     "MCP2peaks"), merged with Dataset *merge_with* if provided.
     """
-    return get_digitizer_peaks(run, mnemonics, merge_with,
+    return get_digitizer_peaks(run, mnemonic, merge_with,
                                bunchPattern, integParams,
                                keepAllSase)
 
 
-def get_laser_peaks(run, mnemonics=None, merge_with=None,
+def get_laser_peaks(run, mnemonic=None, merge_with=None,
                     bunchPattern='scs_ppl', integParams=None):
     """
     Extracts laser photodiode signal (peak intensity) from Fast ADC
@@ -906,10 +911,9 @@ def get_laser_peaks(run, mnemonics=None, merge_with=None,
     ----------
     run: extra_data.DataCollection
         DataCollection containing the digitizer data.
-    mnemonics: str or list of str
-        mnemonics for FastADC corresponding to laser signal, e.g.
-        "FastADC2peaks" or ["FastADC2raw", "FastADC3peaks"]. If None,
-        defaults to "MCP2apd" in case no merge_with dataset is provided.
+    mnemonic: str
+        mnemonic for FastADC corresponding to laser signal, e.g.
+        "FastADC2peaks" or 'I0_ILHraw'.
     merge_with: xarray Dataset
         If provided, the resulting Dataset will be merged with this
         one. The FastADC variables of merge_with (if any) will also be
@@ -930,16 +934,16 @@ def get_laser_peaks(run, mnemonics=None, merge_with=None,
     the peak caclulated values (e.g. "FastADC2raw" becomes
     "FastADC2peaks").
     """
-    return get_digitizer_peaks(run, mnemonics, merge_with,
+    return get_digitizer_peaks(run, mnemonic, merge_with,
                                bunchPattern, integParams, False)
 
 
-def get_digitizer_peaks(run, mnemonics=None, merge_with=None,
-                        bunchPattern='None', integParams=None,
+def get_digitizer_peaks(run, mnemonic, merge_with=None,
+                        bunchPattern='sase3', integParams=None,
                         digitizer=None, keepAllSase=False):
     """
-    Automatically computes digitizer peaks. Sources can be loaded on the
-    fly via the mnemonics argument, or processed from an existing data set
+    Automatically computes digitizer peaks. A source can be loaded on the
+    fly via the mnemonic argument, or processed from an existing data set
     (merge_with). The bunch pattern table is used to assign the pulse
     id coordinates.
 
@@ -947,18 +951,14 @@ def get_digitizer_peaks(run, mnemonics=None, merge_with=None,
     ----------
     run: extra_data.DataCollection
         DataCollection containing the digitizer data.
-    mnemonics: str or list of str
-        mnemonics for FastADC or TIM, e.g. "FastADC2raw" or ["MCP2raw",
-        "MCP3apd"]. If None and no merge_with dataset is provided,
-        defaults to "MCP2apd" if digitizer is ADQ412 or
-        "FastADC5raw" if digitizer is FastADC.
+    mnemonic: str
+        mnemonic for FastADC or ADQ412, e.g. "I0_ILHraw" or "MCP3apd".
     merge_with: xarray Dataset
         If provided, the resulting Dataset will be merged with this
         one. The FastADC variables of merge_with (if any) will also be
         computed and merged.
-    bunchPattern: str
+    bunchPattern: str or dict
         'sase1' or 'sase3' or 'scs_ppl', 'None': bunch pattern
-        used to extract peaks.
     integParams: dict
         dictionnary for raw trace integration, e.g.
         {'pulseStart':100, 'pulsestop':200, 'baseStart':50,
@@ -970,48 +970,22 @@ def get_digitizer_peaks(run, mnemonics=None, merge_with=None,
 
     Returns
     -------
-    xarray Dataset with all Fast ADC variables substituted by
-    the peak caclulated values (e.g. "FastADC2raw" becomes
+    xarray Dataset with digitizer peak variables. Raw variables are
+    substituted by the peak caclulated values (e.g. "FastADC2raw" becomes
     "FastADC2peaks").
     """
-    if mnemonics is None and merge_with is None:
-        raise ValueError("at least one of mnemonics or merge_with "
-                         "arguments is expected.")
-
     run_mnemonics = mnemonics_for_run(run)
-    # find digitizer type and get the list of mnemonics to process
-    def to_processed_name(name):
-        return name.replace('raw', 'peaks').replace('apd', 'peaks')
-    if mnemonics is None:
-        if digitizer is None:
-            for v in merge_with:
-                if 'FastADC2_' in v:
-                    digitizer = 'FastADC2'
-                    break
-                if 'FastADC' in v:
-                    digitizer = 'FastADC'
-                    break
-                if 'MCP' in v:
-                    digitizer = 'ADQ412'
-                    break
-        if digitizer is None:
-            log.warning(f'No array with digitizer data '
-                     'to extract. Skipping.')
-            return merge_with
-        mnemonics = mnemonics_to_process(mnemonics, merge_with,
-                                         digitizer, to_processed_name)
-    else:
-        mnemonics = [mnemonics] if isinstance(mnemonics, str) else mnemonics
-        digitizer = digitizer_type(mnemonics[0], run_mnemonics)
-
-    if len(mnemonics) == 0:
-        log.info(f'No array with unaligned {digitizer} peaks to extract. '
-                 'Skipping.')
+    if mnemonic not in run_mnemonics:
+        log.warning('Mnemonic not found in run. Skipping.')
         return merge_with
-    else:
-        log.info(f'Extracting {digitizer} peaks from {mnemonics}.')
-
+    if bool(merge_with) and mnemonic in merge_with:
+        for d in merge_with[mnemonic].dims:
+            if d in ['sa3_pId', 'ol_pId']:
+                log.warning(f'{mnemonic} already extracted. '
+                            'Skipping.')
+                return merge_with
     # check if bunch pattern table exists
+    bpt = None
     if bool(merge_with) and 'bunchPatternTable' in merge_with:
         bpt = merge_with['bunchPatternTable']
         log.debug('Using bpt from merge_with dataset.')
@@ -1024,39 +998,33 @@ def get_digitizer_peaks(run, mnemonics=None, merge_with=None,
         bpt = run.get_array(m['source'], m['key'], m['dim'])
         log.debug('Loaded bpt from DataCollection.')
     else:
-        bpt = None
-
-    # iterate over mnemonics and merge arrays in dataset
+        log.warning('Could not load bunch pattern table.')
+    # prepare resulting dataset
     if bool(merge_with):
-        mw_ds = merge_with.drop(mnemonics, errors='ignore')
+        mw_ds = merge_with.drop(mnemonic, errors='ignore')
     else:
         mw_ds = xr.Dataset()
+    # iterate over mnemonics and merge arrays in dataset
     autoFind = True if integParams is None else False
-    names = []
-    vals = []
-    for k in mnemonics:
-        useRaw = True if 'raw' in k else False
-        m = run_mnemonics[k]
-        if bool(merge_with) and k in merge_with:
-            data = merge_with[k]
-        else:
-            data = None
-        peaks = get_peaks(run, data,
-                          source=m['source'],
-                          key=m['key'],
-                          digitizer=digitizer,
-                          useRaw=useRaw,
-                          autoFind=autoFind,
-                          integParams=integParams,
-                          bunchPattern=bunchPattern,
-                          bpt=bpt)
-        name = to_processed_name(k)
-        names.append(name)
-        vals.append(peaks)
+    m = run_mnemonics[mnemonic]
+    digitizer = digitizer_type(run, m['source'].split(':')[0])
+    useRaw = True if 'raw' in mnemonic else False
+    if bool(merge_with) and mnemonic in merge_with:
+        data = merge_with[mnemonic]
+    else:
+        data = run.get_array(m['source'], m['key'], m['dim'])
+    peaks = get_peaks(run, data,
+                      source=m['source'],
+                      key=m['key'],
+                      digitizer=digitizer,
+                      useRaw=useRaw,
+                      autoFind=autoFind,
+                      integParams=integParams,
+                      bunchPattern=bunchPattern,
+                      bpt=bpt)
+    name = mnemonic.replace('raw', 'peaks').replace('apd', 'peaks')
     join = 'outer' if keepAllSase else 'inner'
-    aligned_vals = xr.align(*vals, join=join)
-    ds = xr.Dataset(dict(zip(names, aligned_vals)))
-    ds = mw_ds.merge(ds, join=join)
+    ds = mw_ds.merge(peaks.rename(name), join=join)
     return ds
 
 
diff --git a/src/toolbox_scs/load.py b/src/toolbox_scs/load.py
index 268dc9fff81e50c51c12f4270bd363bb7168f6d7..275e555a12a1fcceae21c3983974014957370aa6 100644
--- a/src/toolbox_scs/load.py
+++ b/src/toolbox_scs/load.py
@@ -238,7 +238,7 @@ def load(proposalNB=None, runNB=None,
                 if bp is None:
                     continue
             ds = tbdet.get_digitizer_peaks(
-                run, mnemonics=k, merge_with=ds, bunchPattern=bp)
+                run, mnemonic=k, merge_with=ds, bunchPattern=bp)
     if extract_xgm:
         for k, v in run_mnemonics.items():
             if k not in ds or v.get('extract') != 'XGM':