diff --git a/DSSC.py b/DSSC.py index 8a95d3a001c70b7aa61ca4221cae7a2cf5376fa7..90373df2b79ce684ffa25e2524ff4d0680956db7 100644 --- a/DSSC.py +++ b/DSSC.py @@ -316,6 +316,10 @@ class DSSC: print('processing', self.chunksize, 'trains per chunk') + # load scan variable + scan = xr.load_dataset(self.vds_scan, group='data')['scan_variable'] + scan.name = 'scan' + jobs = [] for m in range(16): jobs.append(dict( @@ -323,7 +327,7 @@ class DSSC: fpt=self.fpt, vdf_module=os.path.join(self.tempdir, f'dssc{m}_vds.h5'), chunksize=self.chunksize, - vdf_scan=self.vds_scan, + scan=scan, nbunches=self.nbunches, run_nr=self.run_nr, do_pulse_mean=do_pulse_mean @@ -612,7 +616,7 @@ def process_one_module(job): module = job['module'] fpt = job['fpt'] data_vdf = job['vdf_module'] - scan_vdf = job['vdf_scan'] + scan = job['scan'] chunksize = job['chunksize'] nbunches = job['nbunches'] do_pulse_mean = job['do_pulse_mean'] @@ -624,9 +628,6 @@ def process_one_module(job): n_trains = len(all_trainIds) chunk_start = np.arange(n_trains, step=chunksize, dtype=int) - # load scan variable - scan = xr.open_dataset(scan_vdf, group='data')['scan_variable'] - scan.name = 'scan' len_scan = len(scan.groupby(scan)) if do_pulse_mean: