Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • SCS/ToolBox
  • kluyvert/ToolBox
2 results
Show changes
Commits on Source (7)
Showing
with 311 additions and 216 deletions
...@@ -15,10 +15,10 @@ If the toolbox has been installed in your home directory previously, everything ...@@ -15,10 +15,10 @@ If the toolbox has been installed in your home directory previously, everything
.. code:: bash .. code:: bash
pip install --user . pip install --user ".[maxwell]"
Alternatively, use the -e flag for installation to install the package in development mode. Alternatively, use the -e flag for installation to install the package in development mode.
.. code:: bash .. code:: bash
pip install --user -e . pip install --user -e ".[maxwell]"
...@@ -8,6 +8,13 @@ with open('VERSION') as f: ...@@ -8,6 +8,13 @@ with open('VERSION') as f:
_version = _version.strip("\n") _version = _version.strip("\n")
basic_analysis_reqs = ['numpy', 'scipy',] # and is readily available in Karabo
advanced_analysis_reqs = [
'pandas', 'imageio', 'xarray>=0.13.0', 'h5py', 'h5netcdf',]
interactive_reqs = ['ipykernel', 'matplotlib', 'tqdm',]
maxwell_reqs = ['joblib', 'extra_data', 'euxfel_bunch_pattern>=0.6']
setup(name='toolbox_scs', setup(name='toolbox_scs',
version=_version, version=_version,
description="A collection of code for the SCS beamline", description="A collection of code for the SCS beamline",
...@@ -20,9 +27,11 @@ setup(name='toolbox_scs', ...@@ -20,9 +27,11 @@ setup(name='toolbox_scs',
package_dir={'': 'src'}, package_dir={'': 'src'},
packages=find_packages('src'), packages=find_packages('src'),
package_data={}, package_data={},
install_requires=[ install_requires=basic_analysis_reqs,
'xarray>=0.13.0', 'numpy', 'matplotlib', extras_require={
'pandas', 'scipy', 'h5py', 'h5netcdf', 'advanced': advanced_analysis_reqs,
'extra_data', 'euxfel_bunch_pattern>=0.6', 'interactive': interactive_reqs,
], 'maxwell': advanced_analysis_reqs + interactive_reqs + maxwell_reqs,
) 'test': ['pytest']
}
)
from .load import (load, concatenateRuns, get_array, run_by_path) from .constants import *
from .constants import mnemonics from .detectors import *
from .mnemonics_machinery import mnemonics_for_run
__all__ = (
# functions
"load",
"concatenateRuns",
"get_array",
"run_by_path",
"mnemonics_for_run",
# Classes
# Variables
"mnemonics",
)
# Module name is the same as a child function, we use alias to avoid conflict
import toolbox_scs.load as load_module
from .load import *
# ------------------------------------------------------------------------------ from .misc import *
# Clean namespace from .mnemonics_machinery import *
# clean_ns is a collection of undesired items in the namespace from .routines import *
# ------------------------------------------------------------------------------
clean_ns = [ __all__ = (
# filenames # top-level modules
'constants', constants.__all__
'mnemonics_machinery' + load_module.__all__
# folders + mnemonics_machinery.__all__
'misc',
'util',
'detectors',
'routines',
]
for name in dir():
if name in clean_ns:
del globals()[name]
del globals()['clean_ns'] # submodules
del globals()['name'] + detectors.__all__
+ misc.__all__
+ routines.__all__
)
__all__ = [
'mnemonics'
]
mnemonics = { mnemonics = {
# Machine # Machine
"sase3": ({'source': 'SCS_RR_UTC/MDL/BUNCH_DECODER', "sase3": ({'source': 'SCS_RR_UTC/MDL/BUNCH_DECODER',
...@@ -142,6 +147,17 @@ mnemonics = { ...@@ -142,6 +147,17 @@ mnemonics = {
'key': 'interlock.AActionState.value', 'key': 'interlock.AActionState.value',
'dim': None},), 'dim': None},),
# XTD10 MCP (after GATT)
'XTD10_MCP3raw': ({'source': 'SA3_XTD10_MCP/ADC/1:channel_3.output',
'key': 'data.rawData',
'dim': ['XTD10_MCPsampleId']},),
'XTD10_MCP5raw': ({'source': 'SA3_XTD10_MCP/ADC/1:channel_5.output',
'key': 'data.rawData',
'dim': ['XTD10_MCPsampleId']},),
'XTD10_MCP9raw': ({'source': 'SA3_XTD10_MCP/ADC/1:channel_9.output',
'key': 'data.rawData',
'dim': ['XTD10_MCPsampleId']},),
# DPS imagers # DPS imagers
"DPS1CAM2": ({'source': 'SCS_BLU_DPS-1/CAM/IMAGER2CAMERA:daqOutput', "DPS1CAM2": ({'source': 'SCS_BLU_DPS-1/CAM/IMAGER2CAMERA:daqOutput',
'key': 'data.image.pixels', 'key': 'data.image.pixels',
......
from .xgm import ( from .azimuthal_integrator import *
get_xgm, calibrate_xgm) from .bam_detectors import *
from .digitizers import ( from .digitizers import *
get_peaks, get_tim_peaks, get_laser_peaks, get_digitizer_peaks, from .dssc import *
check_peak_params) from .dssc_data import *
from .bam_detectors import get_bam from .dssc_misc import *
from .pes import get_pes_tof, get_pes_params from .dssc_processing import *
from .dssc_data import ( from .pes import *
save_xarray, load_xarray, get_data_formatted, save_attributes_h5) from .xgm import *
from .dssc_misc import (
load_dssc_info, create_dssc_bins, quickmask_DSSC_ASIC,
get_xgm_formatted, load_mask)
from .dssc_processing import (
process_dssc_data)
from .dssc import (
DSSCBinner, DSSCFormatter)
from .azimuthal_integrator import (
AzimuthalIntegrator, AzimuthalIntegratorDSSC)
__all__ = ( __all__ = (
# Functions azimuthal_integrator.__all__
"get_xgm", + bam_detectors.__all__
"calibrate_xgm", + digitizers.__all__
"get_peaks", + dssc.__all__
"get_tim_peaks", + dssc_data.__all__
"get_laser_peaks", + dssc_misc.__all__
"get_digitizer_peaks", + dssc_processing.__all__
"check_peak_params", + pes.__all__
"get_bam", + xgm.__all__
"get_pes_tof",
"get_pes_params",
"save_xarray",
"load_xarray",
"get_data_formatted",
"save_attributes_h5",
"load_dssc_info",
"create_dssc_bins",
"quickmask_DSSC_ASIC",
"get_xgm_formatted",
"load_mask",
"calc_xgm_frame_indices",
"process_dssc_data",
# Classes
"DSSCBinner",
"DSSCFormatter",
"AzimuthalIntegrator",
"AzimuthalIntegratorDSSC",
# Variables
) )
# -----------------------------------------------------------------------------
# Clean namespace
# -> certain filenames we dont need in the namespace. Especially not those
# that are marked as private by using an underscore (_<filename>.py).
# -----------------------------------------------------------------------------
clean_ns = [
# filenames
'DSSC_bkp',
'DSSC1module',
'dssc',
'dssc_routines',
'dssc_processing',
'dssc_data',
'dssc_misc',
'dssc_plot',
'azimuthal_integrator',
'FastCCD',
'xgm',
'digitizers',
'bam_detectors',
'pes'
]
for name in dir():
if name in clean_ns:
del globals()[name]
del globals()['clean_ns']
del globals()['name']
import logging import logging
import numpy as np import numpy as np
__all__ = [
'AzimuthalIntegrator',
'AzimuthalIntegratorDSSC'
]
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
......
...@@ -15,6 +15,9 @@ from ..misc.bunch_pattern_external import is_pulse_at ...@@ -15,6 +15,9 @@ from ..misc.bunch_pattern_external import is_pulse_at
from ..mnemonics_machinery import (mnemonics_to_process, from ..mnemonics_machinery import (mnemonics_to_process,
mnemonics_for_run) mnemonics_for_run)
__all__ = [
'get_bam',
]
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
......
...@@ -18,6 +18,14 @@ from ..util.exceptions import ToolBoxValueError ...@@ -18,6 +18,14 @@ from ..util.exceptions import ToolBoxValueError
from ..mnemonics_machinery import (mnemonics_to_process, from ..mnemonics_machinery import (mnemonics_to_process,
mnemonics_for_run) mnemonics_for_run)
__all__ = [
'check_peak_params',
'get_digitizer_peaks',
'get_laser_peaks',
'get_peaks',
'get_tim_peaks',
]
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
...@@ -271,6 +279,7 @@ def get_peaks(run, ...@@ -271,6 +279,7 @@ def get_peaks(run,
# 2. Use raw data from digitizer # 2. Use raw data from digitizer
# minimum pulse period @ 4.5MHz, according to digitizer type # minimum pulse period @ 4.5MHz, according to digitizer type
digitizer = digitizer_type(source=source)
min_distance = 1 min_distance = 1
if digitizer == 'FastADC': if digitizer == 'FastADC':
min_distance = 24 min_distance = 24
...@@ -348,8 +357,7 @@ def get_peaks(run, ...@@ -348,8 +357,7 @@ def get_peaks(run,
return peaks.assign_coords({extra_dim: pid}) return peaks.assign_coords({extra_dim: pid})
def channel_peak_params(run, source, key=None, digitizer=None, def channel_peak_params(run, source, key=None, channel=None, board=None):
channel=None, board=None):
""" """
Extract peak-integration parameters used by a channel of the digitizer. Extract peak-integration parameters used by a channel of the digitizer.
...@@ -364,13 +372,12 @@ def channel_peak_params(run, source, key=None, digitizer=None, ...@@ -364,13 +372,12 @@ def channel_peak_params(run, source, key=None, digitizer=None,
key: str key: str
optional, used in combination of source (if source is not a ToolBox optional, used in combination of source (if source is not a ToolBox
mnemonics) instead of digitizer, channel and board. mnemonics) instead of digitizer, channel and board.
digitizer: {"FastADC", "ADQ412"} str
Type of digitizer. If None, inferred from the source mnemonic.
channel: int or str channel: int or str
The digitizer channel for which to retrieve the parameters. If None, The digitizer channel for which to retrieve the parameters. If None,
inferred from the source mnemonic. inferred from the source mnemonic or source + key arguments.
board: int board: int
Board of the ADQ412. If None, inferred from the source mnemonic. Board of the ADQ412. If None, inferred from the source mnemonic or
source + key arguments.
Returns Returns
------- -------
...@@ -381,58 +388,116 @@ def channel_peak_params(run, source, key=None, digitizer=None, ...@@ -381,58 +388,116 @@ def channel_peak_params(run, source, key=None, digitizer=None,
m = run_mnemonics[source] m = run_mnemonics[source]
source = m['source'] source = m['source']
key = m['key'] key = m['key']
if key is not None: if 'network' in source:
if 'network' in source: return adq412_channel_peak_params(run, source, key, channel, board)
digitizer = 'ADQ412' else:
ch_to_int = {'A': 0, 'B': 1, 'C': 2, 'D': 3} return fastADC_channel_peak_params(run, source, channel)
k = key.split('.')[1].split('_')
channel = ch_to_int[k[2]]
board = k[1] def fastADC_channel_peak_params(run, source, channel=None):
else: """
digitizer = 'FastADC' Extract peak-integration parameters used by a channel of the Fast ADC.
channel = int(source.split(':')[1].split('.')[0].split('_')[1])
if digitizer is None: Parameters
raise ValueError('digitizer argument is missing.') ----------
run: extra_data.DataCollection
DataCollection containing the digitizer data.
source: str
Name of Fast ADC source, e.g. 'SCS_UTC1_MCP/ADC/1:channel_5.output'.
channel: int
The Fast ADC channel for which to retrieve the parameters. If None,
inferred from the source.
Returns
-------
dict with peak integration parameters.
"""
fastADC_keys = {
'baseStart': ('baselineSettings.baseStart.value',
'baseStart.value'),
'baseStop': ('baseStop.value',),
'baseLength': ('baselineSettings.length.value',),
'enable': ('enablePeakComputation.value',),
'pulseStart': ('initialDelay.value',),
'pulseLength': ('peakSamples.value',),
'npulses': ('numPulses.value',),
'period': ('pulsePeriod.value',)
}
if channel is None: if channel is None:
raise ValueError('channel argument is missing.') channel = int(source.split(':')[1].split('.')[0].split('_')[1])
if isinstance(channel, str): baseName = f'channel_{channel}.'
ch_to_int = {'A': 0, 'B': 1, 'C': 2, 'D': 3} source = source.split(':')[0]
channel = ch_to_int[channel] data = run.select(source).train_from_index(0)[1][source]
if board is None and digitizer == 'ADQ412': result = {}
raise ValueError('board argument is missing.') for mnemo, versions in fastADC_keys.items():
keys = None for v in versions:
if digitizer == 'ADQ412': key = baseName + v
baseKey = f'board{board}.apd.channel_{channel}.' if key in data:
keys = ['baseStart', 'baseStop', 'pulseStart', result[mnemo] = data[key]
'pulseStop', 'initialDelay', 'upperLimit', if 'baseLength' in result:
'enable'] result['baseStop'] = result['baseStart'] + \
keys = {k: baseKey + k + '.value' for k in keys} result.pop('baseLength')
keys['npulses'] = f'board{board}.apd.numberOfPulses.value' if 'pulseLength' in result:
if digitizer == 'FastADC': result['pulseStop'] = result['pulseStart'] + \
if ":" in source: result.pop('pulseLength')
baseKey = source.split(':')[1].split('.')[0]+'.' return result
else:
baseKey = f'channel_{channel}.'
keys = ['baseStart', 'baseStop', 'initialDelay', def adq412_channel_peak_params(run, source, key=None,
'peakSamples', 'numPulses', 'pulsePeriod', channel=None, board=None):
'enablePeakComputation'] """
keys = {k: baseKey + k + '.value' for k in keys} Extract peak-integration parameters used by a channel of the ADQ412.
if ":" in source:
source = source.split(':')[0] Parameters
tid, data = run.select(source).train_from_index(0) ----------
result = [data[source][k] for k in keys.values()] run: extra_data.DataCollection
result = dict(zip(keys.keys(), result)) DataCollection containing the digitizer data.
if digitizer == 'ADQ412': source: str
result['period'] = result.pop('upperLimit') - \ Nname of ADQ412 source, e.g. 'SCS_UTC1_ADQ/ADC/1:network'.
result.pop('initialDelay') key: str
if digitizer == 'FastADC': optional, e.g. 'digitizers.channel_1_D.apd.pulseIntegral', used in
result['period'] = result.pop('pulsePeriod') combination of source instead of channel and board.
result['npulses'] = result.pop('numPulses') channel: int or str
result['pulseStart'] = result['initialDelay'] The ADQ412 channel for which to retrieve the parameters. If None,
result['pulseStop'] = result.pop('initialDelay') + \ inferred from the source mnemonic or source + key arguments.
result.pop('peakSamples') board: int
result['enable'] = result.pop('enablePeakComputation') Board of the ADQ412. If None, inferred from the source mnemonic or
source + key arguments.
Returns
-------
dict with peak integration parameters.
"""
if key is None:
if channel is None or board is None:
raise ValueError('key or channel + board arguments are '
'missing.')
else:
k = key.split('.')[1].split('_')
ch_to_int = {'A': 0, 'B': 1, 'C': 2, 'D': 3}
channel = ch_to_int[k[2]]
board = k[1]
baseName = f'board{board}.apd.channel_{channel}.'
source = source.split(':')[0]
adq412_keys = {
'baseStart': (baseName + 'baseStart.value',),
'baseStop': (baseName + 'baseStop.value',),
'enable': (baseName + 'enable.value',),
'pulseStart': (baseName + 'pulseStart.value',),
'pulseStop': (baseName + 'pulseStop.value',),
'initialDelay': (baseName + 'initialDelay.value',),
'upperLimit': (baseName + 'upperLimit.value',),
'npulses': (f'board{board}.apd.numberOfPulses.value',)
}
data = run.select(source).train_from_index(0)[1][source]
result = {}
for mnemo, versions in adq412_keys.items():
for key in versions:
if key in data:
result[mnemo] = data[key]
result['period'] = result.pop('upperLimit') - \
result.pop('initialDelay')
return result return result
...@@ -523,7 +588,8 @@ def get_peak_params(run, mnemonic, raw_trace=None, ntrains=200): ...@@ -523,7 +588,8 @@ def get_peak_params(run, mnemonic, raw_trace=None, ntrains=200):
title = 'Digitizer peak params' title = 'Digitizer peak params'
else: else:
mnemo_raw = mnemonic mnemo_raw = mnemonic
min_distance = 24 if "FastADC" in mnemonic else 440 digitizer = digitizer_type(mnemonic, run_mnemonics)
min_distance = 24 if digitizer == "FastADC" else 440
title = 'Auto-find peak params' title = 'Auto-find peak params'
if raw_trace is None: if raw_trace is None:
sel = run.select_trains(np.s_[:ntrains]) sel = run.select_trains(np.s_[:ntrains])
...@@ -589,7 +655,8 @@ def check_peak_params(run, mnemonic, raw_trace=None, ntrains=200, params=None, ...@@ -589,7 +655,8 @@ def check_peak_params(run, mnemonic, raw_trace=None, ntrains=200, params=None,
log.warning('The digitizer did not record peak-integrated data.') log.warning('The digitizer did not record peak-integrated data.')
if not plot: if not plot:
return params return params
min_distance = 24 if "FastADC" in mnemonic else 440 digitizer = digitizer_type(mnemonic, run_mnemonics)
min_distance = 24 if digitizer == "FastADC" else 440
if 'bunchPatternTable' in run_mnemonics and bunchPattern != 'None': if 'bunchPatternTable' in run_mnemonics and bunchPattern != 'None':
sel = run.select_trains(np.s_[:ntrains]) sel = run.select_trains(np.s_[:ntrains])
bp_params = {} bp_params = {}
...@@ -689,6 +756,24 @@ def plotPeakIntegrationWindow(raw_trace, params, bp_params, show_all=False): ...@@ -689,6 +756,24 @@ def plotPeakIntegrationWindow(raw_trace, params, bp_params, show_all=False):
return fig, ax return fig, ax
def digitizer_type(mnemonic=None, mnemo_dict=None, source=None):
if mnemonic is not None:
source = mnemo_dict[mnemonic]['source']
if ':channel' in source:
return 'FastADC'
if ':network' in source:
return 'ADQ412'
dic = {'XTD10_MCP': 'FastADC',
'FastADC': 'FastADC',
'PES': 'ADQ412',
'MCP': 'ADQ412'}
for k, v in dic.items():
if k in mnemonic:
return v
log.warning(f'Could not find digitizer type from mnemonic {mnemonic}.')
return 'ADQ412'
def get_tim_peaks(run, mnemonics=None, merge_with=None, def get_tim_peaks(run, mnemonics=None, merge_with=None,
bunchPattern='sase3', integParams=None, bunchPattern='sase3', integParams=None,
keepAllSase=False): keepAllSase=False):
...@@ -837,6 +922,9 @@ def get_digitizer_peaks(run, mnemonics, digitizer, merge_with=None, ...@@ -837,6 +922,9 @@ def get_digitizer_peaks(run, mnemonics, digitizer, merge_with=None,
elif 'bunchPatternTable' in run_mnemonics: elif 'bunchPatternTable' in run_mnemonics:
bpt = run.get_array(*run_mnemonics['bunchPatternTable'].values()) bpt = run.get_array(*run_mnemonics['bunchPatternTable'].values())
log.debug('Loaded bpt from DataCollection.') log.debug('Loaded bpt from DataCollection.')
elif 'bunchPatternTable_SA3' in run_mnemonics:
bpt = run.get_array(*run_mnemonics['bunchPatternTable_SA3'].values())
log.debug('Loaded bpt from DataCollection.')
else: else:
bpt = None bpt = None
......
...@@ -29,7 +29,10 @@ from .dssc_misc import ( ...@@ -29,7 +29,10 @@ from .dssc_misc import (
from .dssc_processing import ( from .dssc_processing import (
process_dssc_data, create_empty_dataset) process_dssc_data, create_empty_dataset)
__all__ = ["DSSCBinner", "DSSCFormatter"] __all__ = [
"DSSCBinner",
"DSSCFormatter"]
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
......
...@@ -6,6 +6,13 @@ import xarray as xr ...@@ -6,6 +6,13 @@ import xarray as xr
from ..util.exceptions import ToolBoxFileError from ..util.exceptions import ToolBoxFileError
__all__ = [
'get_data_formatted',
'load_xarray',
'save_attributes_h5',
'save_xarray',
]
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
......
...@@ -14,6 +14,14 @@ import extra_data as ed ...@@ -14,6 +14,14 @@ import extra_data as ed
from .xgm import get_xgm from .xgm import get_xgm
from .digitizers import get_tim_peaks from .digitizers import get_tim_peaks
__all__ = [
'create_dssc_bins',
'get_xgm_formatted',
'load_dssc_info',
'load_mask',
'quickmask_DSSC_ASIC',
]
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
......
...@@ -15,6 +15,10 @@ import extra_data as ed ...@@ -15,6 +15,10 @@ import extra_data as ed
from ..mnemonics_machinery import mnemonics_for_run from ..mnemonics_machinery import mnemonics_for_run
from .dssc_data import save_xarray from .dssc_data import save_xarray
__all__ = [
'process_dssc_data'
]
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
......
from joblib import Parallel, delayed, parallel_backend from joblib import Parallel, delayed, parallel_backend
from time import strftime from time import strftime
import tempfile
import shutil import shutil
from tqdm.auto import tqdm from tqdm.auto import tqdm
import os import os
...@@ -17,11 +16,11 @@ import h5py ...@@ -17,11 +16,11 @@ import h5py
from glob import glob from glob import glob
from imageio import imread from imageio import imread
import ToolBox as tb
from ..constants import mnemonics as _mnemonics from ..constants import mnemonics as _mnemonics
from .azimuthal_integrator import AzimuthalIntegrator from .azimuthal_integrator import AzimuthalIntegrator
from ..misc.laser_utils import positionToDelay from ..misc.laser_utils import positionToDelay
class FastCCD: class FastCCD:
def __init__(self, proposal, distance=1, raw=False): def __init__(self, proposal, distance=1, raw=False):
......
...@@ -16,6 +16,12 @@ from ..mnemonics_machinery import (mnemonics_to_process, ...@@ -16,6 +16,12 @@ from ..mnemonics_machinery import (mnemonics_to_process,
mnemonics_for_run) mnemonics_for_run)
__all__ = [
'get_pes_params',
'get_pes_tof',
]
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
......
...@@ -16,6 +16,11 @@ from ..misc.bunch_pattern_external import is_sase_1, is_sase_3 ...@@ -16,6 +16,11 @@ from ..misc.bunch_pattern_external import is_sase_1, is_sase_3
from ..mnemonics_machinery import (mnemonics_to_process, from ..mnemonics_machinery import (mnemonics_to_process,
mnemonics_for_run) mnemonics_for_run)
__all__ = [
'calibrate_xgm',
'get_xgm',
]
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
......
...@@ -21,6 +21,13 @@ from .mnemonics_machinery import mnemonics_for_run ...@@ -21,6 +21,13 @@ from .mnemonics_machinery import mnemonics_for_run
from .util.exceptions import ToolBoxValueError from .util.exceptions import ToolBoxValueError
import toolbox_scs.detectors as tbdet import toolbox_scs.detectors as tbdet
__all__ = [
'concatenateRuns',
'get_array',
'load',
'run_by_path',
]
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
...@@ -123,20 +130,13 @@ def load(proposalNB=None, runNB=None, ...@@ -123,20 +130,13 @@ def load(proposalNB=None, runNB=None,
run.info() run.info()
data_arrays = [] data_arrays = []
run_mnemonics = mnemonics_for_run(run) run_mnemonics = mnemonics_for_run(run)
# load pulse pattern info # load pulse pattern info
if 'bunchPatternTable' in run_mnemonics: bpt = load_bpt(run, run_mnemonics=run_mnemonics)
bpt = run.get_array(*run_mnemonics['bunchPatternTable'].values(), if bpt is None:
name='bunchPatternTable')
data_arrays.append(bpt)
elif 'bunchPatternTable_SA3' in run_mnemonics:
log.info('Did not find SCS bunch pattern table but found the SA3 one.')
bpt = run.get_array(*run_mnemonics['bunchPatternTable_SA3'].values(),
name='bunchPatternTable')
data_arrays.append(bpt)
else:
log.warning('Bunch pattern table not found in run. Skipping!') log.warning('Bunch pattern table not found in run. Skipping!')
else:
data_arrays.append(bpt)
for f in fields: for f in fields:
if type(f) == dict: if type(f) == dict:
...@@ -322,3 +322,20 @@ def get_array(run, mnemonic_key=None, stepsize=None): ...@@ -322,3 +322,20 @@ def get_array(run, mnemonic_key=None, stepsize=None):
raise raise
return data return data
def load_bpt(run, merge_with=None, run_mnemonics=None):
if run_mnemonics is None:
run_mnemonics = mnemonics_for_run(run)
for key in ['bunchPatternTable', 'bunchPatternTable_SA3']:
if bool(merge_with) and key in merge_with:
log.debug(f'Using {key} from merge_with dataset.')
return merge_with[key]
if key in run_mnemonics:
bpt = run.get_array(*run_mnemonics[key].values(),
name='bunchPatternTable')
log.debug(f'Loaded {key} from DataCollection.')
return bpt
log.debug('Could not find bunch pattern table.')
return None
from .bunch_pattern import (extractBunchPattern, pulsePatternInfo, from .bunch_pattern import *
repRate) from .bunch_pattern_external import *
from .bunch_pattern_external import (is_sase_3, is_sase_1, from .laser_utils import *
is_ppl, is_pulse_at)
from .laser_utils import positionToDelay, degToRelPower
__all__ = ( __all__ = (
# Functions bunch_pattern.__all__
"extractBunchPattern", + bunch_pattern_external.__all__
"pulsePatternInfo", + laser_utils.__all__
"repRate",
"sortBAMdata",
"is_sase_3",
"is_sase_1",
"is_ppl",
"is_pulse_at",
"get_index_ppl",
"get_index_sase1",
"get_index_sase3",
"positionToDelay",
"degToRelPower",
# Classes
# Variables
) )
...@@ -16,6 +16,12 @@ from extra_data import RunDirectory ...@@ -16,6 +16,12 @@ from extra_data import RunDirectory
# import and hide variable, such that it does not alter namespace. # import and hide variable, such that it does not alter namespace.
from ..constants import mnemonics as _mnemonics_bp from ..constants import mnemonics as _mnemonics_bp
__all__ = [
'extractBunchPattern',
'pulsePatternInfo',
'repRate'
]
def extractBunchPattern(bp_table=None, key='sase3', runDir=None): def extractBunchPattern(bp_table=None, key='sase3', runDir=None):
''' generate the bunch pattern and number of pulses of a source directly from the ''' generate the bunch pattern and number of pulses of a source directly from the
......
...@@ -11,6 +11,13 @@ import logging ...@@ -11,6 +11,13 @@ import logging
import euxfel_bunch_pattern as ebp import euxfel_bunch_pattern as ebp
__all__ = [
'is_sase_3',
'is_sase_1',
'is_ppl',
'is_pulse_at',
]
PPL_SCS = ebp.LASER_SEED6 PPL_SCS = ebp.LASER_SEED6
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
......
__all__ = [
'degToRelPower',
'positionToDelay',
]
def positionToDelay(pos, origin=0, invert = False, reflections=1): def positionToDelay(pos, origin=0, invert = False, reflections=1):
''' converts a motor position in mm into optical delay in picosecond ''' converts a motor position in mm into optical delay in picosecond
Inputs: Inputs:
......