diff --git a/src/toolbox_scs/detectors/__init__.py b/src/toolbox_scs/detectors/__init__.py
index a4a9d916d827875b4048b16d89e9dbcd5d1d9924..83aecbee474cd6639cb5b6ee70a4c95501fdd9c6 100644
--- a/src/toolbox_scs/detectors/__init__.py
+++ b/src/toolbox_scs/detectors/__init__.py
@@ -1,9 +1,11 @@
+from .load_detectors import (
+    get_all_detectors)
 from .xgm import (
     load_xgm, get_xgm, cleanXGMdata, matchXgmTimPulseId, calibrateXGMs,
     autoFindFastAdcPeaks)
 from .tim import (
     load_TIM,)
-from .digitizers import(
+from .digitizers import (
     get_peaks, get_tim_peaks, get_fast_adc_peaks, check_peak_params)
 from .dssc_data import (
     save_xarray, load_xarray, get_data_formatted, save_attributes_h5)
@@ -14,10 +16,12 @@ from .dssc_processing import (
     process_dssc_data)
 from .dssc import (
     DSSCBinner, DSSCFormatter)
-from .azimuthal_integrator import AzimuthalIntegrator, AzimuthalIntegratorDSSC
+from .azimuthal_integrator import (
+    AzimuthalIntegrator, AzimuthalIntegratorDSSC)
 
 __all__ = (
     # Functions
+    "get_all_detectors",
     "load_xgm",
     "get_xgm",
     "cleanXGMdata",
@@ -67,7 +71,8 @@ clean_ns = [
     'FastCCD',
     'tim',
     'xgm',
-    'digitizers'
+    'digitizers',
+    'load_detectors'
     ]
 
 
diff --git a/src/toolbox_scs/detectors/digitizers.py b/src/toolbox_scs/detectors/digitizers.py
index dd4537b51dadd755f81529820ed51c9440bcc2be..9f2095c19b328bfb3ff5f36618c079d7d97f056f 100644
--- a/src/toolbox_scs/detectors/digitizers.py
+++ b/src/toolbox_scs/detectors/digitizers.py
@@ -317,7 +317,6 @@ def get_peaks(run,
         period_bpt = 0
     else:
         period_bpt = np.min(np.diff(pid))
-    print(integParams['period'])
     if autoFind and period_bpt*min_distance != integParams['period']:
         log.warning('The period from the bunch pattern is different than ' +
                     ' that found by the peak-finding algorithm. Either ' +
@@ -877,12 +876,10 @@ def get_digitizer_peaks(run, key, digitizer, merge_with,
     """
     if digitizer == 'FastADC':
         default_key = 'FastADC5raw'
-        dig_keys = [f'FastADC{c}raw' for c in range(1, 10)]
-        dig_keys += [f'FastADC{c}peaks' for c in range(1, 10)]
+        dig_keys = [k for k in _mnemonics if 'FastADC' in k]
     if digitizer == 'ADQ412':
         default_key = 'MCP2apd'
-        dig_keys = [f'MCP{c}raw' for c in range(1, 5)]
-        dig_keys += [f'MCP{c}apd' for c in range(1, 5)]
+        dig_keys = [k for k in _mnemonics if 'MCP' in k]
         dig_keys += [f'MCP{c}peaks' for c in range(1, 5)]
     if merge_with is None:
         if key is None:
diff --git a/src/toolbox_scs/detectors/load_detectors.py b/src/toolbox_scs/detectors/load_detectors.py
new file mode 100644
index 0000000000000000000000000000000000000000..a42a182418d0ac0e6c344c258f7672aa9503f68b
--- /dev/null
+++ b/src/toolbox_scs/detectors/load_detectors.py
@@ -0,0 +1,41 @@
+""" Detectors related sub-routines
+
+    Copyright (2021) SCS Team.
+
+    (contributions preferrably comply with pep8 code structure
+    guidelines.)
+"""
+
+from ..constants import mnemonics as _mnemonics
+from .digitizers import get_tim_peaks, get_fast_adc_peaks
+from .xgm import get_xgm
+
+
+def get_all_detectors(run, data, tim_bp='sase3', fadc_bp='scs_ppl'):
+    """
+    From raw data arrays, extracts all the data from pulse-resolved
+    detectors (xgm, tim, optical laser photodiodes, etc...).
+    This function calls individual functions specific to each detector
+    with default parameters, it may not work in every situation.
+
+    Parameters
+    ----------
+    run: extra_data.DataCollection
+        DataCollection containing the digitizer data.
+    data: xarray Dataset
+        dataset containing the unprocessed arrays.
+    tim_bp: str
+        {'sase1', 'sase3', 'scs_ppl'} bunch pattern used to extract tim peaks.
+        See get_tim_peaks for details.
+    fadc_bp: str
+        {'sase1', 'sase3', 'scs_ppl'} bunch pattern used to extract fast
+        adc peaks. See get_fast_adc_peaks for details.
+    """
+    tim = [k for k in _mnemonics if 'MCP' in k and k in data]
+    fadc = [k for k in _mnemonics if 'FastADC' in k and k in data]
+    xgm = [k for k in _mnemonics if ('_SA3' in k or '_SA1' in k
+           or '_XGM' in k) and k in data]
+    ds = get_tim_peaks(run, key=tim, merge_with=data, bunchPattern=tim_bp)
+    ds = get_fast_adc_peaks(run, key=fadc, merge_with=ds, bunchPattern=fadc_bp)
+    ds = get_xgm(run, key=xgm, merge_with=ds)
+    return ds
diff --git a/src/toolbox_scs/load.py b/src/toolbox_scs/load.py
index c9735342fdd4649036eb3c18b7cb7cdb3eb4f616..83fc9fb43cfa6c0682184198dfbc5a43df950130 100644
--- a/src/toolbox_scs/load.py
+++ b/src/toolbox_scs/load.py
@@ -15,21 +15,20 @@ import xarray as xr
 
 import extra_data as ed
 
-from .misc.bunch_pattern import extractBunchPattern
-from .constants import mnemonics as _mnemonics_ld
+from .constants import mnemonics as _mnemonics
 from .util.exceptions import ToolBoxValueError, ToolBoxPathError
 from .util.data_access import find_run_dir
 
 log = logging.getLogger(__name__)
 
 
-def load(fields, runNB, proposalNB,
+def load(proposalNB=None, runNB=None,
+         fields=None,
          subFolder='raw',
          display=False,
          validate=False,
          subset=ed.by_index[:],
-         rois={},
-         useBPTable=True):
+         rois={}):
     """
     Load a run and extract the data. Output is an xarray with aligned
     trainIds
@@ -37,16 +36,17 @@ def load(fields, runNB, proposalNB,
     Parameters
     ----------
 
+    proposalNB: (str, int)
+        of the proposal number e.g. 'p002252' or 2252
+    runNB: (str, int)
+        run number as integer
     fields: list of strings, list of dictionaries
         list of mnemonic strings to load specific data such as "fastccd",
         "SCS_XGM", or dictionnaries defining a custom mnemonic such as
             {"extra":
-                {'SCS_CDIFFT_MAG/SUPPLY/CURRENT',
-                 'actual_current.value', None}}
-    runNB: (str, int)
-        run number as integer
-    proposalNB: (str, int)
-        of the proposal number e.g. 'p002252' or 2252
+                {'source: 'SCS_CDIFFT_MAG/SUPPLY/CURRENT',
+                 'key': 'actual_current.value',
+                 'dim': None}}
     subFolder: (str)
         sub-folder from which to load the data. Use 'raw' for raw data
         or 'proc' for processed data.
@@ -60,17 +60,10 @@ def load(fields, runNB, proposalNB,
     rois: dictionary
         a dictionnary of mnemonics with a list of rois definition and
         the desired names, for example:
-            {'fastccd':
-                {'ref':
-                    {'roi': by_index[730:890, 535:720],
-                     'dim': ['ref_x', 'ref_y']},
-                 'sam':
-                    {'roi':by_index[1050:1210, 535:720],
-                     'dim': ['sam_x', 'sam_y']}}}
-    useBPTable: boolean
-        If True, uses the raw bunch pattern table to extract sase pulse number
-        and indices in the trains. If false, load the data from BUNCH_DECODER
-        middle layer device.
+            {'fastccd': {'ref': {'roi': by_index[730:890, 535:720],
+                                 'dim': ['ref_x', 'ref_y']},
+                         'sam': {'roi':by_index[1050:1210, 535:720],
+                                 'dim': ['sam_x', 'sam_y']}}}
 
     Returns
     -------
@@ -82,8 +75,9 @@ def load(fields, runNB, proposalNB,
     except ToolBoxPathError as err:
         log.error(f"{err.message}")
         raise
-
     run = ed.RunDirectory(runFolder).select_trains(subset)
+    if fields is None:
+        return run, xr.Dataset()
 
     if validate:
         # get_ipython().system('extra-data-validate ' + runFolder)
@@ -96,25 +90,14 @@ def load(fields, runNB, proposalNB,
     vals = []
 
     # load pulse pattern info
-    if useBPTable:
-        bp_mnemo = _mnemonics_ld['bunchPatternTable']
-        if bp_mnemo['source'] not in run.all_sources:
-            print('Source {} not found in run. Skipping!'.format(
-                                _mnemonics_ld['bunchPatternTable']['source']))
-        else:
-            bp_table = run.get_array(bp_mnemo['source'], bp_mnemo['key'],
-                                     extra_dims=bp_mnemo['dim'])
-            sase1, npulses_sase1, dummy = extractBunchPattern(
-                                              bp_table, 'sase1')
-            sase3, npulses_sase3, dummy = extractBunchPattern(
-                                              bp_table, 'sase3')
-            keys += ["sase1", "npulses_sase1", "sase3", "npulses_sase3"]
-            vals += [sase1, npulses_sase1, sase3, npulses_sase3]
+    if _mnemonics['bunchPatternTable']['source'] in run.all_sources:
+        bpt = run.get_array(*_mnemonics['bunchPatternTable'].values())
+        keys.append("bunchPatternTable")
+        vals.append(bpt)
     else:
-        fields += ["sase1", "sase3", "npulses_sase3", "npulses_sase1"]
-
+        print('Source {} not found in run. Skipping!'.format(
+                            _mnemonics['bunchPatternTable']['source']))
     for f in fields:
-
         if type(f) == dict:
             # extracting mnemomic defined on the spot
             if len(f.keys()) > 1:
@@ -124,27 +107,22 @@ def load(fields, runNB, proposalNB,
             v = f[k]
         else:
             # extracting mnemomic from the table
-            if f in _mnemonics_ld:
-                v = _mnemonics_ld[f]
+            if f in _mnemonics:
+                v = _mnemonics[f]
                 k = f
             else:
-                print('Unknow mnemonic "{}". Skipping!'.format(f))
+                print(f'Unknow mnemonic "{f}". Skipping!')
                 continue
-
         if k in keys:
             continue  # already loaded, skip
-
         if display:
             print('Loading {}'.format(k))
-
         if v['source'] not in run.all_sources:
             print('Source {} not found in run. Skipping!'.format(v['source']))
             continue
-
         if k not in rois:
             # no ROIs selection, we read everything
-            vals.append(run.get_array(v['source'], v['key'],
-                                      extra_dims=v['dim']))
+            vals.append(run.get_array(*v.values()))
             keys.append(k)
         else:
             # ROIs selection, for each ROI we select a region of the data and
@@ -158,9 +136,8 @@ def load(fields, runNB, proposalNB,
     aligned_vals = xr.align(*vals, join='inner')
     result = dict(zip(keys, aligned_vals))
     result = xr.Dataset(result)
-    result.attrs['run'] = run
     result.attrs['runFolder'] = runFolder
-    return result
+    return run, result
 
 
 def load_run(proposal, run, **kwargs):
@@ -168,7 +145,7 @@ def load_run(proposal, run, **kwargs):
     Get run in given proposal
 
     Wraps the extra_data open_run routine, out of convenience for the toolbox
-    user. More information can be found in the karabo_data documentation.
+    user. More information can be found in the extra_data documentation.
 
     Parameters
     ----------
@@ -280,8 +257,8 @@ def get_array(run, mnemonic_key=None, stepsize=None):
             data = xr.DataArray(
                         np.ones(len(run.train_ids), dtype=np.int16),
                         dims=['trainId'], coords={'trainId': run.train_ids})
-        elif mnemonic_key in _mnemonics_ld:
-            mnem = _mnemonics_ld[mnemonic_key]
+        elif mnemonic_key in _mnemonics:
+            mnem = _mnemonics[mnemonic_key]
             data = run.get_array(*mnem.values())
         else:
             raise ToolBoxValueError("Invalid mnemonic", mnemonic_key)
diff --git a/src/toolbox_scs/routines/XAS.py b/src/toolbox_scs/routines/XAS.py
index 0880a0c08e26b09ca0071bded81e7d4d0e8e8237..1c9ca1758f2f6f15e30b75ba21224007127c30db 100644
--- a/src/toolbox_scs/routines/XAS.py
+++ b/src/toolbox_scs/routines/XAS.py
@@ -14,6 +14,7 @@ import matplotlib.gridspec as gridspec
 import matplotlib.pyplot as plt
 import re
 
+
 def absorption(T, Io, fluorescence=False):
     """ Compute the absorption A = -ln(T/Io) (or A = T/Io
         for fluorescence)
@@ -49,11 +50,11 @@ def absorption(T, Io, fluorescence=False):
     Io = Io[good]
     # return type of the structured array
     fdtype = [('muA', 'f8'), ('sigmaA', 'f8'), ('weights', 'f8'),
-        ('muT', 'f8'), ('sigmaT', 'f8'), ('muIo', 'f8'), ('sigmaIo', 'f8'),
-        ('p', 'f8'), ('counts', 'i8')]
+              ('muT', 'f8'), ('sigmaT', 'f8'), ('muIo', 'f8'),
+              ('sigmaIo', 'f8'), ('p', 'f8'), ('counts', 'i8')]
     if counts == 0:
         return np.array([(np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, np.NaN,
-            np.NaN, np.NaN, 0)], dtype=fdtype)
+                          np.NaN, np.NaN, 0)], dtype=fdtype)
 
     muT = np.mean(T)
     sigmaT = np.std(T)
@@ -62,29 +63,30 @@ def absorption(T, Io, fluorescence=False):
     sigmaIo = np.std(Io)
     weights = np.sum(Io)
 
-    p = np.corrcoef(T, Io)[0,1]
-    
+    p = np.corrcoef(T, Io)[0, 1]
+
     # weighted average of T/Io with Io as weights
-    muA = muT/muIo
-    
-    #Derivation of standard deviation
-    #1. using biased weighted sample variance:
-    #sigmaA = np.sqrt(np.average((T/Io - muA)**2, weights=Io))
+    muA = muT / muIo
+
+    # Derivation of standard deviation
+    # 1. using biased weighted sample variance:
+    # sigmaA = np.sqrt(np.average((T/Io - muA)**2, weights=Io))
 
-    #2. using unbiased weighted sample variance (reliablility weights):
+    # 2. using unbiased weighted sample variance (reliablility weights):
     V2 = np.sum(Io**2)
     sigmaA = np.sqrt(np.sum(Io*(T/Io - muA)**2) / (weights - V2/weights))
 
-    #3. using error propagation for correlated data:
-    #sigmaA = np.abs(muA)*(np.sqrt((sigmaT/muT)**2 +
-    #    (sigmaIo/muIo)**2 - 2*p*sigmaIo*sigmaT/(muIo*muT)))
-    
+    # 3. using error propagation for correlated data:
+    # sigmaA = np.abs(muA)*(np.sqrt((sigmaT/muT)**2 +
+    #          (sigmaIo/muIo)**2 - 2*p*sigmaIo*sigmaT/(muIo*muT)))
+
     if not fluorescence:
-        sigmaA = sigmaA/np.abs(muA)
+        sigmaA = sigmaA / np.abs(muA)
         muA = -np.log(muA)
-    
+
     return np.array([(muA, sigmaA, weights, muT, sigmaT, muIo, sigmaIo,
-        p, counts)], dtype=fdtype)
+                      p, counts)], dtype=fdtype)
+
 
 def binning(x, data, func, bins=100, bin_length=None):
     """ General purpose 1-dimension data binning
@@ -111,16 +113,17 @@ def binning(x, data, func, bins=100, bin_length=None):
         bins = np.linspace(bin_start, bin_end, bins)
     bin_centers = (bins[1:]+bins[:-1])/2
     nb_bins = len(bin_centers)
-    
+
     bin_idx = np.digitize(x, bins)
     dummy = func([])
     res = np.empty((nb_bins), dtype=dummy.dtype)
     for k in range(nb_bins):
-        res[k] = func(data[k+1==bin_idx])
+        res[k] = func(data[k+1 == bin_idx])
 
     return bins, res
 
-def xas(nrun, bins=None, Iokey='SCS_SA3', Itkey='MCP3apd', nrjkey='nrj', 
+
+def xas(nrun, bins=None, Iokey='SCS_SA3', Itkey='MCP3peaks', nrjkey='nrj',
         Iooffset=0, plot=False, fluorescence=False):
     """ Compute the XAS spectra from a xarray nrun.
 
@@ -145,21 +148,21 @@ def xas(nrun, bins=None, Iokey='SCS_SA3', Itkey='MCP3apd', nrjkey='nrj',
                 muIo: the mean of the Io
                 counts: the number of events in each bin
     """
-    
-    stacked = nrun.stack(dummy_=['trainId','sa3_pId'])
+
+    stacked = nrun.stack(dummy_=['trainId', 'sa3_pId'])
 
     Io = stacked[Iokey].values + Iooffset
     It = stacked[Itkey].values
     nrj = stacked[nrjkey].values
-    
+
     names_list = ['nrj', 'Io', 'It']
     rundata = np.vstack((nrj, Io, It))
     rundata = np.rec.fromarrays(rundata, names=names_list)
-    
+
     def whichIo(data):
         """ Select which fields to use as I0 and which to use as I1
         """
-        
+
         if 'mcp' in Iokey.lower():
             Io_sign = -1
         else:
@@ -170,11 +173,11 @@ def xas(nrun, bins=None, Iokey='SCS_SA3', Itkey='MCP3apd', nrjkey='nrj',
         else:
             It_sign = 1
 
-        
         if len(data) == 0:
             return absorption([], [], fluorescence)
         else:
-            return absorption(It_sign*data['It'], Io_sign*data['Io'], fluorescence)
+            return absorption(It_sign*data['It'], Io_sign*data['Io'],
+                              fluorescence)
 
     if bins is None:
         num_bins = 80
@@ -186,16 +189,15 @@ def xas(nrun, bins=None, Iokey='SCS_SA3', Itkey='MCP3apd', nrjkey='nrj',
     elif type(bins) == float:
         energy_limits = [np.min(nrj), np.max(nrj)]
         bins = np.arange(energy_limits[0], energy_limits[1], bins)
-        
+
     dummy, nosample = binning(rundata['nrj'], rundata, whichIo, bins)
     muA = nosample['muA']
-    sterrA = nosample['sigmaA']/np.sqrt(nosample['counts'])
-    
+    sterrA = nosample['sigmaA'] / np.sqrt(nosample['counts'])
+
     bins_c = 0.5*(bins[1:] + bins[:-1])
-    bin_idx = np.digitize(rundata['nrj'], bins)
     if plot:
-        f = plt.figure(figsize=(6.5,6))
-        gs = gridspec.GridSpec(2,1,height_ratios=[4,1])
+        f = plt.figure(figsize=(6.5, 6))
+        gs = gridspec.GridSpec(2, 1, height_ratios=[4, 1])
         ax1 = plt.subplot(gs[0])
         ax1.plot(bins_c, muA, color='C1', label=r'$\sigma$')
         if fluorescence:
@@ -205,25 +207,28 @@ def xas(nrun, bins=None, Iokey='SCS_SA3', Itkey='MCP3apd', nrjkey='nrj',
         ax1.set_xlabel('Energy (eV)')
         ax1.legend()
         ax1_twin = ax1.twinx()
-        ax1_twin.bar(bins_c, nosample['muIo'], width=0.80*(bins_c[1]-bins_c[0]),
-                color='C1', alpha=0.2)
+        ax1_twin.bar(bins_c, nosample['muIo'],
+                     width=0.80*(bins_c[1]-bins_c[0]), color='C1', alpha=0.2)
         ax1_twin.set_ylabel('Io')
         try:
-            proposalNB=int(re.findall(r'p(\d{6})', nrun.attrs['runFolder'])[0])
-            runNB=int(re.findall(r'r(\d{4})', nrun.attrs['runFolder'])[0])
+            proposalNB = int(re.findall(r'p(\d{6})',
+                                        nrun.attrs['runFolder'])[0])
+            runNB = int(re.findall(r'r(\d{4})', nrun.attrs['runFolder'])[0])
             ax1.set_title(f'run {runNB} p{proposalNB}')
         except:
             f.suptitle(nrun.attrs['plot_title'])
-        
+
         ax2 = plt.subplot(gs[1])
         ax2.bar(bins_c, nosample['counts'], width=0.80*(bins_c[1]-bins_c[0]),
                 color='C0', alpha=0.2)
         ax2.set_xlabel('Energy (eV)')
         ax2.set_ylabel('counts')
         plt.tight_layout()
-    
-    return {'nrj':bins_c, 'muA':muA, 'sterrA':sterrA, 'sigmaA':nosample['sigmaA'],
-        'muIo':nosample['muIo'], 'counts':nosample['counts']}
+
+    return {'nrj': bins_c, 'muA': muA, 'sterrA': sterrA,
+            'sigmaA': nosample['sigmaA'], 'muIo': nosample['muIo'],
+            'counts': nosample['counts']}
+
 
 def xasxmcd(dataP, dataN):
     """ Compute XAS and XMCD from data with both magnetic field direction
@@ -237,7 +242,8 @@ def xasxmcd(dataP, dataN):
     """
 
     assert len(dataP) == len(dataN), "binned datasets must be of same lengths"
-    assert not np.any(dataP['nrj'] - dataN['nrj']), "Energy points for dataP and dataN should be the same"
+    assert not np.any(dataP['nrj'] - dataN['nrj']), "Energy points for " \
+        "dataP and dataN should be the same"
 
     muXAS = dataP['muA'] + dataN['muA']
     muXMCD = dataP['muA'] - dataN['muA']
@@ -245,8 +251,9 @@ def xasxmcd(dataP, dataN):
     # standard error is the same for XAS and XMCD
     sigma = np.sqrt(dataP['sterrA']**2 + dataN['sterrA']**2)
 
-    res = np.empty(len(muXAS), dtype=[('nrj', 'f8'), ('muXAS', 'f8'), ('sigmaXAS', 'f8'),
-        ('muXMCD', 'f8'), ('sigmaXMCD', 'f8')])
+    res = np.empty(len(muXAS), dtype=[('nrj', 'f8'), ('muXAS', 'f8'),
+                                      ('sigmaXAS', 'f8'), ('muXMCD', 'f8'),
+                                      ('sigmaXMCD', 'f8')])
     res['nrj'] = dataP['nrj']
     res['muXAS'] = muXAS
     res['muXMCD'] = muXMCD