diff --git a/src/toolbox_scs/detectors/digitizers.py b/src/toolbox_scs/detectors/digitizers.py index 067a75b22c345571346e2a91af8a751ecb84410b..506335db77819fffa2cd1259e6fc46039b2045bb 100644 --- a/src/toolbox_scs/detectors/digitizers.py +++ b/src/toolbox_scs/detectors/digitizers.py @@ -13,12 +13,9 @@ import numpy as np import xarray as xr import matplotlib.pyplot as plt from scipy.signal import find_peaks -from scipy.signal import fftconvolve from ..misc.bunch_pattern_external import is_pulse_at -from ..util.exceptions import ToolBoxValueError -from ..mnemonics_machinery import (mnemonics_to_process, - mnemonics_for_run) +from ..mnemonics_machinery import mnemonics_for_run from extra_data import open_run from extra_data.read_machinery import find_proposal from extra.components import XrayPulses, OpticalLaserPulses @@ -445,7 +442,6 @@ def find_peak_integration_parameters(run, mnemonic, raw_trace=None, raise ValueError(add_text + 'All keys of integParams argument ' f'{required_keys} are required.') params = integParams.copy() - # extract pulse ids from the parameters (starting at 0) pulse_ids_params = None if hasattr(params['pulseStart'], '__len__'): @@ -458,9 +454,12 @@ def find_peak_integration_parameters(run, mnemonic, raw_trace=None, pulse_ids_params = ((np.array(params['pulseStart']) - params['pulseStart'][0]) / pulse_period).astype(int) elif 'npulses' in params and 'period' in params: + if params['npulses'] == 1: + pulse_ids_params = np.array([0]) + else: pulse_ids_params = np.arange(0, - params['npulses'] * params['period'] / pulse_period, - params['period'] / pulse_period).astype(int) + params['npulses'] * params['period'] / pulse_period, + params['period'] / pulse_period).astype(int) # Extract pulse_ids, period and npulses from bunch pattern pulse_ids_bp, npulses_bp, period_bp = None, None, 0 @@ -512,7 +511,7 @@ def find_peak_integration_parameters(run, mnemonic, raw_trace=None, params['npulses'] = npulses_bp params['period'] = period_bp - if regular == False: + if not regular: # Irregular pattern if hasattr(params['pulseStart'], '__len__'): start = params['pulseStart'][0] @@ -590,8 +589,8 @@ def check_peak_params(proposal, runNB, mnemonic, raw_trace=None, if regular: add_text = '' if len(pulse_ids) > 1: - add_text = f's, {(pulse_ids[1]-pulse_ids[0]) * pulse_period}' +\ - ' samples between two pulses' + add_text = f's, {(pulse_ids[1]-pulse_ids[0]) * pulse_period}'\ + + ' samples between two pulses' print(f'Bunch pattern {bunchPattern}: {len(pulse_ids)} pulse' + add_text) else: @@ -607,7 +606,7 @@ def check_peak_params(proposal, runNB, mnemonic, raw_trace=None, no_change = no_change & (v == params[k]) if hasattr(no_change, '__len__'): no_change = no_change.all() - if no_change == False: + if no_change is False: print('The provided parameters did not match the bunch ' 'pattern and were adjusted.') title += ' (adjusted)' @@ -625,7 +624,7 @@ def check_peak_params(proposal, runNB, mnemonic, raw_trace=None, raw_trace = get_dig_avg_trace(run, mnemonic) fig, ax = plotPeakIntegrationWindow(raw_trace, params, show_all=show_all) - fig.suptitle(f'p{proposal} r{runNB} '+ title, size=12) + fig.suptitle(f'p{proposal} r{runNB} ' + title, size=12) return params @@ -667,12 +666,12 @@ def plotPeakIntegrationWindow(raw_trace, params, show_all=False): for j in range(min(2, len(starts))): if plot == 1: j = -j - label='baseline' if j == 0 else '' + label = 'baseline' if j == 0 else '' ax[plot].axvline(baseStarts[i+j], ls='--', color='k') ax[plot].axvline(baseStops[i+j], ls='--', color='k') ax[plot].axvspan(baseStarts[i+j], baseStops[i+j], alpha=0.5, color='grey', label=label) - label='peak' if j == 0 else '' + label = 'peak' if j == 0 else '' ax[plot].axvline(starts[i+j], ls='--', color='r') ax[plot].axvline(stops[i+j], ls='--', color='r') ax[plot].axvspan(starts[i+j], stops[i+j], @@ -685,7 +684,7 @@ def plotPeakIntegrationWindow(raw_trace, params, show_all=False): xmin = np.max([0, baseStarts[i] - 200]) xmax = np.min([stops[i] + 200, raw_trace.size]) ax[plot].plot(np.arange(xmin, xmax), - raw_trace[xmin:xmax], color='C0', label=title) + raw_trace[xmin:xmax], color='C0', label='raw trace') ax[plot].legend(fontsize=8) ax[plot].set_xlim(xmin, xmax) ax[plot].set_xlabel('digitizer samples') @@ -962,7 +961,7 @@ def load_all_processed_peaks(proposal, runNB, data='usr/processed_runs', join='inner') return xr.load_dataset(fname) else: - print(f'{fname} not found.') + log.warning(f'{fname} not found.') return merge_with @@ -1007,13 +1006,15 @@ def check_processed_peak_params(proposal, runNB, mnemonic, if mnemonic.replace('raw', 'peaks') not in ds: log.warning(f'Mnemonic {mnemonic} not found in {fname}') return {} - params = {k.replace(f'{mnemonic}_', ''): ds.attrs[k] for\ + params = {k.replace(f'{mnemonic}_', ''): ds.attrs[k] for k in ds.attrs if f'{mnemonic}_' in k} if plot: title = 'Processed peak parameters' - fig, ax = plotPeakIntegrationWindow(ds[mnemonic.replace('peaks', 'avg')], - params, show_all=show_all) - fig.suptitle(f'p{proposal} r{runNB} '+ title, size=12) + fig, ax = plotPeakIntegrationWindow( + ds[mnemonic.replace('peaks', 'avg')], + params, + show_all=show_all) + fig.suptitle(f'p{proposal} r{runNB} ' + title, size=12) return params else: