diff --git a/xgm.py b/xgm.py index 881b8bd7869723d9d5fbfdf182f85ffe7d2db194..a41e746b01fef8831a2c2e89c7da17f899e0d905 100644 --- a/xgm.py +++ b/xgm.py @@ -120,7 +120,7 @@ def cleanXGMdata(data, npulses=None, sase3First=True): ''' Cleans the XGM data arrays obtained from load() function. The XGM "TD" data arrays have arbitrary size of 1000 and default value 1.0 when there is no pulse. This function sorts the SASE 1 and SASE 3 pulses. - For recent DAQ runs, sase-resolved arrays can be used. For older runs, + For DAQ runs after April 2019, sase-resolved arrays can be used. For older runs, the function selectSASEinXGM can be used to extract sase-resolved pulses. Inputs: data: xarray Dataset containing XGM TD arrays. @@ -176,7 +176,7 @@ def cleanXGMdata(data, npulses=None, sase3First=True): dropList.append(key) mergeList.append(res) mergeList.append(data.drop(dropList)) - subset = xr.merge(mergeList, join='inner') + subset = xr.merge(mergeList, join='outer') for k in data.attrs.keys(): subset.attrs[k] = data.attrs[k] return subset @@ -539,13 +539,15 @@ def mcpPeaks(data, intstart, intstop, bkgstart, bkgstop, mcp=1, t_offset=None, n t_offset = 440 * step else: t_offset = 1 - results = xr.DataArray(np.empty((data.trainId.shape[0], npulses)), coords=data[keyraw].coords, + results = xr.DataArray(np.zeros((data.trainId.shape[0], npulses)), coords=data[keyraw].coords, dims=['trainId', 'MCP{}fromRaw'.format(mcp)]) for i in range(npulses): a = intstart + t_offset*i b = intstop + t_offset*i bkga = bkgstart + t_offset*i bkgb = bkgstop + t_offset*i + if b > data.dims['samplesId']: + break bg = np.outer(np.median(data[keyraw][:,bkga:bkgb], axis=1), np.ones(b-a)) results[:,i] = np.trapz(data[keyraw][:,a:b] - bg, axis=1) return results @@ -578,6 +580,7 @@ def getTIMapd(data, mcp=1, use_apd=True, intstart=None, intstop=None, tim: DataArray of shape trainId only for SASE3 pulses x N with N=max(number of pulses per train) ''' + #1. case where no bunch pattern is available: if 'sase3' not in data: print('Missing bunch pattern info!\n') if npulses is None: @@ -586,91 +589,43 @@ def getTIMapd(data, mcp=1, use_apd=True, intstart=None, intstop=None, print('Retrieving {} SASE 3 pulses assuming that '.format(npulses) + 'SASE 3 pulses come first.') if use_apd: - tim = data['MCP{}apd'.format(mcp)][:,:npulses:stride] + tim = data[f'MCP{mcp}apd'][:,:npulses:stride] else: tim = mcpPeaks(data, intstart, intstop, bkgstart, bkgstop, mcp=mcp, t_offset=t_offset, npulses=npulses) return tim - #1. case where number of SASE 3 pulses were not changed during the run: + #2. If bunch pattern available, define a mask that corresponds to the SASE 3 pulses + initialDelay = data.attrs['run'].get_array( + 'SCS_UTC1_ADQ/ADC/1', 'board1.apd.channel_0.initialDelay.value')[0].values + upperLimit = data.attrs['run'].get_array( + 'SCS_UTC1_ADQ/ADC/1', 'board1.apd.channel_0.upperLimit.value')[0].values + period = upperLimit - initialDelay #period of the apd in number of digitizer samples sa3 = data['sase3'].where(data['sase3']>1, drop=True) sa3 -= sa3[0,0] - idx = np.unique(sa3, axis=0) - npulses_sa3 = data['npulses_sase3'] - maxpulses = int(npulses_sa3.max().values) - if npulses is not None: - maxpulses = np.min([npulses, maxpulses]) - if len(idx)==1: - if use_apd: - apd = data['MCP{}apd'.format(mcp)] - initialDelay = data.attrs['run'].get_array( - 'SCS_UTC1_ADQ/ADC/1', 'board1.apd.channel_0.initialDelay.value')[0].values - upperLimit = data.attrs['run'].get_array( - 'SCS_UTC1_ADQ/ADC/1', 'board1.apd.channel_0.upperLimit.value')[0].values - period = upperLimit - initialDelay #period of the apd in number of digitizer samples - idx /= int(period/440) #440 samples correspond to the separation between two pulses at 4.5 MHz - idx = idx[0].astype(int) - return apd.isel(apdId=idx) - else: - return mcpPeaks(data, intstart, intstop, bkgstart, bkgstop, mcp=mcp, - t_offset=t_offset, npulses=maxpulses) + sa3 = sa3/int(period/440) #440 = samples between two pulses @4.5 MHz with ADQ412 digitizer + idxList, inv = np.unique(sa3, axis=0, return_inverse=True) + mask = xr.DataArray(np.zeros((data.dims['trainId'], data.dims['apdId']), dtype=bool), + dims=['trainId', 'apdId'], + coords={'trainId':data.trainId, + 'apdId':np.arange(data.dims['apdId'])}, + name='mask') + mask = mask.sel(trainId=sa3.trainId) + for i,idxApd in enumerate(idxList): + idxApd = idxApd[idxApd>=0].astype(int) + idxTid = inv==i + mask[idxTid, idxApd] = True - #2. case where the number of SASE 3 pulses varied during the run: - npulses_sa3 = data['npulses_sase3'] - maxpulses = int(npulses_sa3.max().values) - if npulses is not None: - maxpulses = np.min([npulses, maxpulses]) - step = 1 - if maxpulses > 1: - #Calculate the number of non-lasing pulses between two lasing pulses (step) - step = sa3.where(data['npulses_sase3']>1, drop=True)[0,:2].values - step = int(step[1] - step[0]) + #2.1 case where apd is used: if use_apd: - apd = data['MCP{}apd'.format(mcp)] - initialDelay = data.attrs['run'].get_array( - 'SCS_UTC1_ADQ/ADC/1', 'board1.apd.channel_0.initialDelay.value')[0].values - upperLimit = data.attrs['run'].get_array( - 'SCS_UTC1_ADQ/ADC/1', 'board1.apd.channel_0.upperLimit.value')[0].values - nsamples = upperLimit - initialDelay - npulses_per_apd = int(nsamples/440) - sa3 /= npulses_per_apd + return data[f'MCP{mcp}apd'].where(mask, drop=True) + + #2.2 case where integration is performed on raw trace: else: - apd = mcpPeaks(data, intstart, intstop, bkgstart, bkgstop, mcp=mcp, - t_offset=t_offset, npulses=npulses) - sa3 /= step - sa3 -= sa3[:,0] - sa3 = sa3.astype(int) - if np.all(npulses_sa3 == npulses_sa3[0]): - tim = apd[:, sa3[0].values[:maxpulses]] - return tim - - stride = 1 - if use_apd: - stride = np.max([stride,int(step/npulses_per_apd)]) - diff = npulses_sa3.diff(dim='trainId') - #only keep trainIds where a change occured: - diff = diff.where(diff != 0, drop=True) - #get a list of indices where a change occured: - idx_change = np.argwhere(np.isin(npulses_sa3.trainId.values, - diff.trainId.values, assume_unique=True))[:,0] - #add index 0 to get the initial pulse number per train: - idx_change = np.insert(idx_change, 0, 0) - tim = None - for i,idx in enumerate(idx_change): - if npulses_sa3[idx]==0: - continue - if i==len(idx_change)-1: - l = None - else: - l = idx_change[i+1] - b = npulses_sa3[idx].values - temp = apd[idx:l,:maxpulses*stride:stride].copy() - temp[:,b:] = np.NaN - if tim is None: - tim = temp - else: - tim = xr.concat([tim, temp], dim='trainId') - return tim + peaks = mcpPeaks(data, intstart, intstop, bkgstart, bkgstop, mcp=mcp, + t_offset=period, npulses=data.dims['apdId']) + mask = mask.rename({'apdId':f'MCP{mcp}fromRaw'}) + return peaks.where(mask, drop=True) def calibrateTIM(data, rollingWindow=200, mcp=1, plot=False, use_apd=True, intstart=None, @@ -966,7 +921,7 @@ def matchXgmTimPulseId(data, use_apd=True, intstart=None, intstop=None, ndata = cleanXGMdata(data, npulses, sase3First) for mcp in range(1,5): if 'MCP{}apd'.format(mcp) in data or 'MCP{}raw'.format(mcp) in data: - MCPapd = getTIMapd(ndata, mcp=mcp, use_apd=use_apd, intstart=intstart, + MCPapd = getTIMapd(data, mcp=mcp, use_apd=use_apd, intstart=intstart, intstop=intstop,bkgstart=bkgstart, bkgstop=bkgstop, t_offset=t_offset, npulses=npulses, stride=stride).rename('MCP{}apd'.format(mcp))