diff --git a/xgm.py b/xgm.py
index f9d341ed6a362798e540432f5b8a6e9796c5904f..f60f8b4cb117011ce7823ed0c9ca4fda5b82ab8c 100644
--- a/xgm.py
+++ b/xgm.py
@@ -17,7 +17,7 @@ def cleanXGMdata(data, npulses=None, sase3First=True):
         The XGM "TD" data arrays have arbitrary size of 1000 and default value 1.0
         when there is no pulse. This function sorts the SASE 1 and SASE 3 pulses.
         For DAQ runs after April 2019, sase-resolved arrays can be used. For older runs,
-        the function selectSASEinXGM can be used to extract sase-resolved pulses.
+        the function selectSASEinXGM is used to extract sase-resolved pulses.
         Inputs:
             data: xarray Dataset containing XGM TD arrays.
             npulses: number of pulses, needed if pulse pattern not available.
@@ -29,43 +29,55 @@ def cleanXGMdata(data, npulses=None, sase3First=True):
     '''
     dropList = []
     mergeList = []
-    dedicated = False
+    load_sa1 = True
     if 'sase3' in data:
         if np.all(data['npulses_sase1'].where(data['npulses_sase3'] !=0,
                                               drop=True) == 0):
-            dedicated = True
             print('Dedicated trains, skip loading SASE 1 data.')
+            load_sa1 = False
+        npulses_sa1 = None
+    else:
+        print('Missing bunch pattern info!')
+        if npulses is None:
+            raise TypeError('npulses argument is required when bunch pattern ' +
+                             'info is missing.')
     #pulse-resolved signals from XGMs
     keys = ["XTD10_XGM", "XTD10_SA3", "XTD10_SA1", 
             "XTD10_XGM_sigma", "XTD10_SA3_sigma", "XTD10_SA1_sigma",
             "SCS_XGM", "SCS_SA3", "SCS_SA1",
             "SCS_XGM_sigma", "SCS_SA3_sigma", "SCS_SA1_sigma"]
     
-    if ("SCS_SA3" not in data and "SCS_XGM" in data):
-        #no SASE-resolved arrays available
-        sa3 = selectSASEinXGM(data, xgm='SCS_XGM', sase='sase3', npulses=npulses,
-               sase3First=sase3First).rename({'XGMbunchId':'sa3_pId'})
-        mergeList.append(sa3)
-        if not dedicated:
-            sa1 = selectSASEinXGM(data, xgm='SCS_XGM', sase='sase1',
-                                  npulses=npulses, sase3First=sase3First).rename(
-                                  {'XGMbunchId':'sa1_pId'})
-            mergeList.append(sa1)
-        dropList.append('SCS_XGM')
-        keys.remove('SCS_XGM')
-
-    if ("XTD10_SA3" not in data and "XTD10_XGM" in data):
-        #no SASE-resolved arrays available
-        sa3 = selectSASEinXGM(data, xgm='XTD10_XGM', sase='sase3', npulses=npulses,
-                   sase3First=sase3First).rename({'XGMbunchId':'sa3_pId'})
-        mergeList.append(sa3)
-        if not dedicated:
-            sa1 = selectSASEinXGM(data, xgm='XTD10_XGM', sase='sase1', 
-                                  npulses=npulses, sase3First=sase3First).rename(
-                                  {'XGMbunchId':'sa1_pId'})
-            mergeList.append(sa1)
-        dropList.append('XTD10_XGM')
-        keys.remove('XTD10_XGM')
+    for whichXgm in ['SCS', 'XTD10']:
+        load_sa1 = True
+        if (f"{whichXgm}_SA3" not in data and f"{whichXgm}_XGM" in data):
+            #no SASE-resolved arrays available
+            if not 'sase3' in data:
+                npulses_xgm = data[f'{whichXgm}_XGM'].where(data[f'{whichXgm}_XGM'], drop=True).shape[1]
+                npulses_sa1 = npulses_xgm - npulses
+                if npulses_sa1==0:
+                    load_sa1 = False
+                if npulses_sa1 < 0:
+                    raise ValueError(f'npulses = {npulses} is larger than the total number'
+                                     +f' of pulses per train = {npulses_xgm}')
+            sa3 = selectSASEinXGM(data, xgm=f'{whichXgm}_XGM', sase='sase3', npulses=npulses,
+                   sase3First=sase3First).rename({'XGMbunchId':'sa3_pId'}).rename(f"{whichXgm}_SA3")
+            mergeList.append(sa3)
+            if f"{whichXgm}_XGM_sigma" in data:
+                sa3_sigma = selectSASEinXGM(data, xgm=f'{whichXgm}_XGM_sigma', sase='sase3', npulses=npulses,
+                       sase3First=sase3First).rename({'XGMbunchId':'sa3_pId'}).rename(f"{whichXgm}_SA3_sigma")
+                mergeList.append(sa3_sigma)
+                dropList.append(f'{whichXgm}_XGM_sigma')
+            if load_sa1:
+                sa1 = selectSASEinXGM(data, xgm=f'{whichXgm}_XGM', sase='sase1',
+                                      npulses=npulses_sa1, sase3First=sase3First).rename(
+                                      {'XGMbunchId':'sa1_pId'}).rename(f"{whichXgm}_SA1")
+                mergeList.append(sa1)
+                if f"{whichXgm}_XGM_sigma" in data:
+                    sa1_sigma = selectSASEinXGM(data, xgm=f'{whichXgm}_XGM_sigma', sase='sase1', npulses=npulses_sa1,
+                           sase3First=sase3First).rename({'XGMbunchId':'sa1_pId'}).rename(f"{whichXgm}_SA1_sigma")
+                    mergeList.append(sa1_sigma)
+            dropList.append(f'{whichXgm}_XGM')
+            keys.remove(f'{whichXgm}_XGM')
         
     for key in keys:
         if key not in data:
@@ -74,7 +86,7 @@ def cleanXGMdata(data, npulses=None, sase3First=True):
             sase = 'sa3_'
         elif "sa1" in key.lower():
             sase = 'sa1_'
-            if dedicated:
+            if not load_sa1:
                 dropList.append(key)
                 continue
         else:
@@ -103,7 +115,7 @@ def selectSASEinXGM(data, sase='sase3', xgm='SCS_XGM', sase3First=True, npulses=
         Inputs:
             data: xarray Dataset containing xgm data
             sase: key of sase to select: {'sase1', 'sase3'}
-            xgm: key of xgm to select: {'XTD10_XGM', 'SCS_XGM'}
+            xgm: key of xgm to select: {'XTD10_XGM[_sigma]', 'SCS_XGM[_sigma]'}
             sase3First: bool, optional. Used in case no bunch pattern was recorded
             npulses: int, optional. Required in case no bunch pattern was recorded.
             
@@ -115,24 +127,20 @@ def selectSASEinXGM(data, sase='sase3', xgm='SCS_XGM', sase3First=True, npulses=
     '''
     #1. case where bunch pattern is missing:
     if sase not in data:
-        print('Missing bunch pattern info!')
-        if npulses is None:
-            raise TypeError('npulses argument is required when bunch pattern ' +
-                             'info is missing.')
         print('Retrieving {} SASE {} pulses assuming that '.format(npulses, sase[4])
               +'SASE {} pulses come first.'.format('3' if sase3First else '1'))
         #in older version of DAQ, non-data numbers were filled with 0.0.
         xgmData = data[xgm].where(data[xgm]!=0.0, drop=True)
         xgmData = xgmData.fillna(0.0).where(xgmData!=1.0, drop=True)
         if (sase3First and sase=='sase3') or (not sase3First and sase=='sase1'):
-            return xgmData[:,:npulses]
+            return xgmData[:,:npulses].assign_coords(XGMbunchId=np.arange(npulses))
         else:
             if xr.ufuncs.isnan(xgmData).any():
                 raise Exception('The number of pulses changed during the run. '
                       'This is not supported yet.')
             else:
                 start=xgmData.shape[1]-npulses
-                return xgmData[:,start:start+npulses]
+                return xgmData[:,start:start+npulses].assign_coords(XGMbunchId=np.arange(npulses))
     
     #2. case where bunch pattern is provided
     #2.1 Merge sase1 and sase3 bunch patterns to get indices of all pulses
@@ -208,49 +216,89 @@ def saseContribution(data, sase='sase1', xgm='XTD10_XGM'):
     else:
         return 1 - contrib
 
-def calibrateXGMs(data, rollingWindow=200, plot=False):
+def calibrateXGMs(data, allPulses=False, plot=False, display=False):
     ''' Calibrate the fast (pulse-resolved) signals of the XTD10 and SCS XGM 
         (read in intensityTD property) to the respective slow ion signal 
-        (photocurrent read by Keithley, channel 'pulseEnergy.photonFlux.value').
-        If the sase-resolved signal (introduced in May 2019) are recorded, the
-        calibration is defined as the mean ratio between the photocurrent and
-        the low-pass slowTrain signal. Otherwise, calibrateXGMsFromAllPulses()
-        is called.
+        (photocurrent read by Keithley, key 'pulseEnergy.photonFlux.value').
+        If the sase-resolved averaged signals ("slowTrain", introduced in May 
+        2019) are recorded, the calibration is defined as the mean ratio 
+        between the  photonFlux and the slowTrain signal. Otherwise, the 
+        averaged fast signals are computed using a rolling average.
 
         Inputs:
             data: xarray Dataset
-            rollingWindow: length of running average to calculate E_fast_avg
-            plot: boolean, plot the calibration output
+            allPulses: if True, uses "XTD10_XGM" and "SCS_XGM" arrays and 
+                computes the relative contributions of SASE1 and SASE3 to 
+                photonFlux. This should be more accurate in cases where the
+                number of SASE1 pulses is large and/or the SASE1 pulse 
+                intensity cannot be neglected.
+            plot: bool, plot the calibration output
+            display: bool, displays info if True
 
         Output:
-            factors: numpy ndarray of shape 1 x 2 containing 
-                     [XTD10 calibration factor, SCS calibration factor]
+            ndarray with [XTD10 calibration factor, SCS calibration factor]
     '''
-    XTD10_factor = np.nan
-    SCS_factor = np.nan
-    if "XTD10_slowTrain" in data or "SCS_slowTrain" in data:
-        if "XTD10_slowTrain" in data:
-            XTD10_factor = np.mean(data.XTD10_photonFlux/data.XTD10_slowTrain)
-            
+    if allPulses:
+        return calibrateXGMsFromAllPulses(data, plot)
+    hasSlowTrain=[True,True]
+    results = np.array([np.nan, np.nan], dtype=float)
+    slowTrainData = []
+    for i,whichXgm in enumerate(['XTD10', 'SCS']):
+        #1. Try to load fast data averages (in DAQ since May 2019)
+        if f'{whichXgm}_slowTrain' in data:
+            if display:
+                print(f'Using fast data averages (slowTrain) for {whichXgm}')
+            slowTrainData.append(data[f'{whichXgm}_slowTrain'])
         else:
-            print('no XTD10 XGM data. Skipping calibration for XTD10 XGM')
-        if "SCS_slowTrain" in data:
-            #XTD10_SA3_contrib = data.XTD10_slowTrain_SA3 * data.npulses_sase3 / (
-            #                data.XTD10_slowTrain * (data.npulses_sase3+data.npulses_sase1))
-            #SCS_SA3_SLOW = data.SCS_photonFlux*(data.npulses_sase3+
-            #                                    data.npulses_sase1)*XTD10_SA3_contrib/data.npulses_sase3
-            #SCS_factor = np.mean(SCS_SA3_SLOW/data.SCS_slowTrain_SA3)
-            SCS_factor = np.mean(data.SCS_photonFlux/data.SCS_slowTrain)
-        else:
-            print('no SCS XGM data. Skipping calibration for SCS XGM')
-            
-        #TODO: plot the results of calibration
-        return np.array([XTD10_factor, SCS_factor])
-    else:
-        return calibrateXGMsFromAllPulses(data, rollingWindow, plot)
-        
+            mnemo = tb.mnemonics[f'{whichXgm}_slowTrain']
+            if mnemo['key'] in data.attrs['run'].keys_for_source(mnemo['source']):
+                if display:
+                    print(f'Using fast data averages (slowTrain) for {whichXgm}')
+                slowTrainData.append(data.attrs['run'].get_array(mnemo['source'], mnemo['key']))
+        #2. Calculate fast data average from fast data
+            else:
+                if display:
+                    print(f'No averages of fast data (slowTrain) available for {whichXgm}.'+
+                      ' Attempting calibration from fast data.')
+                if f'{whichXgm}_SA3' in data:
+                    if display:
+                        print(f'Calculating slowTrain from SA3 for {whichXgm}')
+                    slowTrainData.append(data[f'{whichXgm}_SA3'].rolling(trainId=200
+                                                               ).mean().mean(axis=1))
+                elif f'{whichXgm}_XGM' in data:
+                    sa3 = selectSASEinXGM(data, xgm=f'{whichXgm}_XGM')
+                    slowTrainData.append(sa3.rolling(trainId=200).mean().mean(axis=1))
+                else:
+                    hasSlowTrain[i]=False
+        if hasSlowTrain[i]:
+            results[i] = np.mean(data[f'{whichXgm}_photonFlux']/slowTrainData[i])
+            if display:
+                print(f'Calibration factor {whichXgm} XGM: {results[i]}')
+    if plot:
+        plt.figure(figsize=(8,4))
+        plt.subplot(211)
+        plt.plot(data['XTD10_photonFlux'], label='XTD10 photon flux')
+        plt.plot(slowTrainData[0]*results[0], label='calibrated XTD10 fast signal')
+        plt.ylabel(r'Energy [$\mu$J]')
+        plt.legend(fontsize=8, loc='upper left')
+        plt.twinx()
+        plt.plot(slowTrainData[0], label='uncalibrated XTD10 fast signal', color='C4')
+        plt.ylabel(r'Uncalibrated energy')
+        plt.legend(fontsize=8, loc='upper right')
+        plt.subplot(212)
+        plt.plot(data['SCS_photonFlux'], label='SCS photon flux')
+        plt.plot(slowTrainData[1]*results[1], label='calibrated SCS fast signal')
+        plt.ylabel(r'Energy [$\mu$J]')
+        plt.xlabel('train Id')
+        plt.legend(fontsize=8, loc='upper left')
+        plt.twinx()
+        plt.plot(slowTrainData[1], label='uncalibrated SCS fast signal', color='C4')
+        plt.ylabel(r'Uncalibrated energy')
+        plt.legend(fontsize=8, loc='upper right')
+        plt.tight_layout()
+    return results
         
-def calibrateXGMsFromAllPulses(data, rollingWindow=200, plot=False):
+def calibrateXGMsFromAllPulses(data, plot=False):
     ''' Calibrate the fast (pulse-resolved) signals of the XTD10 and SCS XGM 
         (read in intensityTD property) to the respective slow ion signal 
         (photocurrent read by Keithley, channel 'pulseEnergy.photonFlux.value').
@@ -294,6 +342,7 @@ def calibrateXGMsFromAllPulses(data, rollingWindow=200, plot=False):
     stop = None
     npulses = data['npulses_sase3']
     ntrains = npulses.shape[0]
+    rollingWindow=200
     # First, in case of change in number of pulses, locate a region where
     # the number of pulses is maximum.
     if not np.all(npulses == npulses[0]):