diff --git a/Load.py b/Load.py index 4da21d762611f0c7e53fbf43cf1dd347e09c6536..e6f60662b301fb246a8cc635c00a79e89fe2a66c 100644 --- a/Load.py +++ b/Load.py @@ -38,6 +38,18 @@ mnemonics = { "npulses_sase1": {'source':'SCS_RR_UTC/MDL/BUNCH_DECODER', 'key':'sase1.nPulses.value', 'dim':None}, + + #Bunch Arrival Monitors + "BAM6": {'source':'SCS_ILH_LAS/DOOCS/BAM_414_B2:output', + 'key':'data.lowChargeArrivalTime', + 'dim':['BAMbunchId']}, + "BAM7": {'source':'SCS_ILH_LAS/DOOCS/BAM_1932M_TL:output', + 'key':'data.lowChargeArrivalTime', + 'dim':['BAMbunchId']}, + "BAM8": {'source':'SCS_ILH_LAS/DOOCS/BAM_1932S_TL:output', + 'key':'data.lowChargeArrivalTime', + 'dim':['BAMbunchId']}, + # SA3 "nrj": {'source':'SA3_XTD10_MONO/MDL/PHOTON_ENERGY', 'key':'actualEnergy.value', @@ -437,8 +449,8 @@ def load(fields, runNB, proposalNB, subFolder='raw', display=False, validate=Fal else: bp_table = run.get_array(bp_mnemo['source'],bp_mnemo['key'], extra_dims=bp_mnemo['dim']) - sase1, npulses_sase1 = extractBunchPattern(bp_table, 'sase1') - sase3, npulses_sase3 = extractBunchPattern(bp_table, 'sase3') + sase1, npulses_sase1, dummy = extractBunchPattern(bp_table, 'sase1') + sase3, npulses_sase3, dummy = extractBunchPattern(bp_table, 'sase3') keys += ["sase1", "npulses_sase1", "sase3", "npulses_sase3"] vals += [sase1, npulses_sase1, sase3, npulses_sase3] else: diff --git a/bunch_pattern.py b/bunch_pattern.py index 8e2b8557a8a02bf412a5f12f6a0dec48b707341b..42f34a61f8724762be1c3670d1287b74a64ac5d6 100644 --- a/bunch_pattern.py +++ b/bunch_pattern.py @@ -25,6 +25,7 @@ def extractBunchPattern(bp_table=None, key='sase3', runDir=None): bunchPattern: DataArray containing indices of the sase/laser pulses for each train npulses: DataArray containing the number of pulses for each train + matched: 2-D DataArray mask (trainId x 2700), True where 'key' has pulses ''' keys=['sase1', 'sase2', 'sase3', 'scs_ppl'] @@ -74,7 +75,7 @@ def extractBunchPattern(bp_table=None, key='sase3', runDir=None): npulses = xr.DataArray(npulses, dims=['trainId'], coords={'trainId':matched.trainId}, name=f'npulses_{key}') - return bunchPattern, npulses + return bunchPattern, npulses, matched def pulsePatternInfo(data, plot=False): @@ -179,3 +180,38 @@ def repRate(data, sase='sase3'): return 0 f = 1/((sase[0,1] - sase[0,0])*12e-3/54.1666667) return f + +def sortBAMdata(data, key='sase3'): + ''' Extracts beam arrival monitor data from the raw arrays 'BAM6', 'BAM7', etc... + according to the bunchPatternTable. The BAM arrays contain 7220 values, which + corresponds to FLASH busrt length of 800 us @ 9 MHz. The bunchPatternTable + only has 2700 values, corresponding to XFEL 600 us burst length @ 4.5 MHz. + Hence, we truncate the BAM arrays to 5400 with a stride of 2 and match them + to the bunchPatternTable. If key is one of the sase, the given dimension name + of the bam arrays is 'sa[sase number]_pId', to match other data (XGM, TIM...). + Inputs: + data: xarray Dataset containing BAM arrays + key: str, ['sase1', 'sase2', 'sase3', 'scs_ppl'] + + Output: + ndata: xarray Dataset with same keys as input data (but new bam arrays) + ''' + a, b, mask = extractBunchPattern(key=key, runDir=data.attrs['run']) + mask = mask.rename({'pulse_slot':'BAMbunchId'}) + ndata = data + dropList = [] + mergeList = [] + for k in data: + if 'BAM' in k: + dropList.append(k) + bam = data[k].isel(BAMbunchId=slice(0,5400,2)) + bam = bam.where(mask, drop=True) + if 'sase' in key: + name = f'sa{key[4]}_pId' + bam = bam.rename({'BAMbunchId':name}) + mergeList.append(bam) + mergeList.append(data.drop(dropList)) + ndata = xr.merge(mergeList, join='inner') + for k in data.attrs.keys(): + ndata.attrs[k] = data.attrs[k] + return ndata \ No newline at end of file