Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • SCS/ToolBox
  • kluyvert/ToolBox
2 results
Show changes
Commits on Source (7)
Showing
with 311 additions and 216 deletions
......@@ -15,10 +15,10 @@ If the toolbox has been installed in your home directory previously, everything
.. code:: bash
pip install --user .
pip install --user ".[maxwell]"
Alternatively, use the -e flag for installation to install the package in development mode.
.. code:: bash
pip install --user -e .
pip install --user -e ".[maxwell]"
......@@ -8,6 +8,13 @@ with open('VERSION') as f:
_version = _version.strip("\n")
basic_analysis_reqs = ['numpy', 'scipy',] # and is readily available in Karabo
advanced_analysis_reqs = [
'pandas', 'imageio', 'xarray>=0.13.0', 'h5py', 'h5netcdf',]
interactive_reqs = ['ipykernel', 'matplotlib', 'tqdm',]
maxwell_reqs = ['joblib', 'extra_data', 'euxfel_bunch_pattern>=0.6']
setup(name='toolbox_scs',
version=_version,
description="A collection of code for the SCS beamline",
......@@ -20,9 +27,11 @@ setup(name='toolbox_scs',
package_dir={'': 'src'},
packages=find_packages('src'),
package_data={},
install_requires=[
'xarray>=0.13.0', 'numpy', 'matplotlib',
'pandas', 'scipy', 'h5py', 'h5netcdf',
'extra_data', 'euxfel_bunch_pattern>=0.6',
],
)
install_requires=basic_analysis_reqs,
extras_require={
'advanced': advanced_analysis_reqs,
'interactive': interactive_reqs,
'maxwell': advanced_analysis_reqs + interactive_reqs + maxwell_reqs,
'test': ['pytest']
}
)
from .load import (load, concatenateRuns, get_array, run_by_path)
from .constants import mnemonics
from .mnemonics_machinery import mnemonics_for_run
__all__ = (
# functions
"load",
"concatenateRuns",
"get_array",
"run_by_path",
"mnemonics_for_run",
# Classes
# Variables
"mnemonics",
)
from .constants import *
from .detectors import *
# Module name is the same as a child function, we use alias to avoid conflict
import toolbox_scs.load as load_module
from .load import *
# ------------------------------------------------------------------------------
# Clean namespace
# clean_ns is a collection of undesired items in the namespace
# ------------------------------------------------------------------------------
from .misc import *
from .mnemonics_machinery import *
from .routines import *
clean_ns = [
# filenames
'constants',
'mnemonics_machinery'
# folders
'misc',
'util',
'detectors',
'routines',
]
for name in dir():
if name in clean_ns:
del globals()[name]
__all__ = (
# top-level modules
constants.__all__
+ load_module.__all__
+ mnemonics_machinery.__all__
del globals()['clean_ns']
del globals()['name']
# submodules
+ detectors.__all__
+ misc.__all__
+ routines.__all__
)
__all__ = [
'mnemonics'
]
mnemonics = {
# Machine
"sase3": ({'source': 'SCS_RR_UTC/MDL/BUNCH_DECODER',
......@@ -142,6 +147,17 @@ mnemonics = {
'key': 'interlock.AActionState.value',
'dim': None},),
# XTD10 MCP (after GATT)
'XTD10_MCP3raw': ({'source': 'SA3_XTD10_MCP/ADC/1:channel_3.output',
'key': 'data.rawData',
'dim': ['XTD10_MCPsampleId']},),
'XTD10_MCP5raw': ({'source': 'SA3_XTD10_MCP/ADC/1:channel_5.output',
'key': 'data.rawData',
'dim': ['XTD10_MCPsampleId']},),
'XTD10_MCP9raw': ({'source': 'SA3_XTD10_MCP/ADC/1:channel_9.output',
'key': 'data.rawData',
'dim': ['XTD10_MCPsampleId']},),
# DPS imagers
"DPS1CAM2": ({'source': 'SCS_BLU_DPS-1/CAM/IMAGER2CAMERA:daqOutput',
'key': 'data.image.pixels',
......
from .xgm import (
get_xgm, calibrate_xgm)
from .digitizers import (
get_peaks, get_tim_peaks, get_laser_peaks, get_digitizer_peaks,
check_peak_params)
from .bam_detectors import get_bam
from .pes import get_pes_tof, get_pes_params
from .dssc_data import (
save_xarray, load_xarray, get_data_formatted, save_attributes_h5)
from .dssc_misc import (
load_dssc_info, create_dssc_bins, quickmask_DSSC_ASIC,
get_xgm_formatted, load_mask)
from .dssc_processing import (
process_dssc_data)
from .dssc import (
DSSCBinner, DSSCFormatter)
from .azimuthal_integrator import (
AzimuthalIntegrator, AzimuthalIntegratorDSSC)
from .azimuthal_integrator import *
from .bam_detectors import *
from .digitizers import *
from .dssc import *
from .dssc_data import *
from .dssc_misc import *
from .dssc_processing import *
from .pes import *
from .xgm import *
__all__ = (
# Functions
"get_xgm",
"calibrate_xgm",
"get_peaks",
"get_tim_peaks",
"get_laser_peaks",
"get_digitizer_peaks",
"check_peak_params",
"get_bam",
"get_pes_tof",
"get_pes_params",
"save_xarray",
"load_xarray",
"get_data_formatted",
"save_attributes_h5",
"load_dssc_info",
"create_dssc_bins",
"quickmask_DSSC_ASIC",
"get_xgm_formatted",
"load_mask",
"calc_xgm_frame_indices",
"process_dssc_data",
# Classes
"DSSCBinner",
"DSSCFormatter",
"AzimuthalIntegrator",
"AzimuthalIntegratorDSSC",
# Variables
azimuthal_integrator.__all__
+ bam_detectors.__all__
+ digitizers.__all__
+ dssc.__all__
+ dssc_data.__all__
+ dssc_misc.__all__
+ dssc_processing.__all__
+ pes.__all__
+ xgm.__all__
)
# -----------------------------------------------------------------------------
# Clean namespace
# -> certain filenames we dont need in the namespace. Especially not those
# that are marked as private by using an underscore (_<filename>.py).
# -----------------------------------------------------------------------------
clean_ns = [
# filenames
'DSSC_bkp',
'DSSC1module',
'dssc',
'dssc_routines',
'dssc_processing',
'dssc_data',
'dssc_misc',
'dssc_plot',
'azimuthal_integrator',
'FastCCD',
'xgm',
'digitizers',
'bam_detectors',
'pes'
]
for name in dir():
if name in clean_ns:
del globals()[name]
del globals()['clean_ns']
del globals()['name']
import logging
import numpy as np
__all__ = [
'AzimuthalIntegrator',
'AzimuthalIntegratorDSSC'
]
log = logging.getLogger(__name__)
......
......@@ -15,6 +15,9 @@ from ..misc.bunch_pattern_external import is_pulse_at
from ..mnemonics_machinery import (mnemonics_to_process,
mnemonics_for_run)
__all__ = [
'get_bam',
]
log = logging.getLogger(__name__)
......
......@@ -18,6 +18,14 @@ from ..util.exceptions import ToolBoxValueError
from ..mnemonics_machinery import (mnemonics_to_process,
mnemonics_for_run)
__all__ = [
'check_peak_params',
'get_digitizer_peaks',
'get_laser_peaks',
'get_peaks',
'get_tim_peaks',
]
log = logging.getLogger(__name__)
......@@ -271,6 +279,7 @@ def get_peaks(run,
# 2. Use raw data from digitizer
# minimum pulse period @ 4.5MHz, according to digitizer type
digitizer = digitizer_type(source=source)
min_distance = 1
if digitizer == 'FastADC':
min_distance = 24
......@@ -348,8 +357,7 @@ def get_peaks(run,
return peaks.assign_coords({extra_dim: pid})
def channel_peak_params(run, source, key=None, digitizer=None,
channel=None, board=None):
def channel_peak_params(run, source, key=None, channel=None, board=None):
"""
Extract peak-integration parameters used by a channel of the digitizer.
......@@ -364,13 +372,12 @@ def channel_peak_params(run, source, key=None, digitizer=None,
key: str
optional, used in combination of source (if source is not a ToolBox
mnemonics) instead of digitizer, channel and board.
digitizer: {"FastADC", "ADQ412"} str
Type of digitizer. If None, inferred from the source mnemonic.
channel: int or str
The digitizer channel for which to retrieve the parameters. If None,
inferred from the source mnemonic.
inferred from the source mnemonic or source + key arguments.
board: int
Board of the ADQ412. If None, inferred from the source mnemonic.
Board of the ADQ412. If None, inferred from the source mnemonic or
source + key arguments.
Returns
-------
......@@ -381,58 +388,116 @@ def channel_peak_params(run, source, key=None, digitizer=None,
m = run_mnemonics[source]
source = m['source']
key = m['key']
if key is not None:
if 'network' in source:
digitizer = 'ADQ412'
ch_to_int = {'A': 0, 'B': 1, 'C': 2, 'D': 3}
k = key.split('.')[1].split('_')
channel = ch_to_int[k[2]]
board = k[1]
else:
digitizer = 'FastADC'
channel = int(source.split(':')[1].split('.')[0].split('_')[1])
if digitizer is None:
raise ValueError('digitizer argument is missing.')
if 'network' in source:
return adq412_channel_peak_params(run, source, key, channel, board)
else:
return fastADC_channel_peak_params(run, source, channel)
def fastADC_channel_peak_params(run, source, channel=None):
"""
Extract peak-integration parameters used by a channel of the Fast ADC.
Parameters
----------
run: extra_data.DataCollection
DataCollection containing the digitizer data.
source: str
Name of Fast ADC source, e.g. 'SCS_UTC1_MCP/ADC/1:channel_5.output'.
channel: int
The Fast ADC channel for which to retrieve the parameters. If None,
inferred from the source.
Returns
-------
dict with peak integration parameters.
"""
fastADC_keys = {
'baseStart': ('baselineSettings.baseStart.value',
'baseStart.value'),
'baseStop': ('baseStop.value',),
'baseLength': ('baselineSettings.length.value',),
'enable': ('enablePeakComputation.value',),
'pulseStart': ('initialDelay.value',),
'pulseLength': ('peakSamples.value',),
'npulses': ('numPulses.value',),
'period': ('pulsePeriod.value',)
}
if channel is None:
raise ValueError('channel argument is missing.')
if isinstance(channel, str):
ch_to_int = {'A': 0, 'B': 1, 'C': 2, 'D': 3}
channel = ch_to_int[channel]
if board is None and digitizer == 'ADQ412':
raise ValueError('board argument is missing.')
keys = None
if digitizer == 'ADQ412':
baseKey = f'board{board}.apd.channel_{channel}.'
keys = ['baseStart', 'baseStop', 'pulseStart',
'pulseStop', 'initialDelay', 'upperLimit',
'enable']
keys = {k: baseKey + k + '.value' for k in keys}
keys['npulses'] = f'board{board}.apd.numberOfPulses.value'
if digitizer == 'FastADC':
if ":" in source:
baseKey = source.split(':')[1].split('.')[0]+'.'
else:
baseKey = f'channel_{channel}.'
keys = ['baseStart', 'baseStop', 'initialDelay',
'peakSamples', 'numPulses', 'pulsePeriod',
'enablePeakComputation']
keys = {k: baseKey + k + '.value' for k in keys}
if ":" in source:
source = source.split(':')[0]
tid, data = run.select(source).train_from_index(0)
result = [data[source][k] for k in keys.values()]
result = dict(zip(keys.keys(), result))
if digitizer == 'ADQ412':
result['period'] = result.pop('upperLimit') - \
result.pop('initialDelay')
if digitizer == 'FastADC':
result['period'] = result.pop('pulsePeriod')
result['npulses'] = result.pop('numPulses')
result['pulseStart'] = result['initialDelay']
result['pulseStop'] = result.pop('initialDelay') + \
result.pop('peakSamples')
result['enable'] = result.pop('enablePeakComputation')
channel = int(source.split(':')[1].split('.')[0].split('_')[1])
baseName = f'channel_{channel}.'
source = source.split(':')[0]
data = run.select(source).train_from_index(0)[1][source]
result = {}
for mnemo, versions in fastADC_keys.items():
for v in versions:
key = baseName + v
if key in data:
result[mnemo] = data[key]
if 'baseLength' in result:
result['baseStop'] = result['baseStart'] + \
result.pop('baseLength')
if 'pulseLength' in result:
result['pulseStop'] = result['pulseStart'] + \
result.pop('pulseLength')
return result
def adq412_channel_peak_params(run, source, key=None,
channel=None, board=None):
"""
Extract peak-integration parameters used by a channel of the ADQ412.
Parameters
----------
run: extra_data.DataCollection
DataCollection containing the digitizer data.
source: str
Nname of ADQ412 source, e.g. 'SCS_UTC1_ADQ/ADC/1:network'.
key: str
optional, e.g. 'digitizers.channel_1_D.apd.pulseIntegral', used in
combination of source instead of channel and board.
channel: int or str
The ADQ412 channel for which to retrieve the parameters. If None,
inferred from the source mnemonic or source + key arguments.
board: int
Board of the ADQ412. If None, inferred from the source mnemonic or
source + key arguments.
Returns
-------
dict with peak integration parameters.
"""
if key is None:
if channel is None or board is None:
raise ValueError('key or channel + board arguments are '
'missing.')
else:
k = key.split('.')[1].split('_')
ch_to_int = {'A': 0, 'B': 1, 'C': 2, 'D': 3}
channel = ch_to_int[k[2]]
board = k[1]
baseName = f'board{board}.apd.channel_{channel}.'
source = source.split(':')[0]
adq412_keys = {
'baseStart': (baseName + 'baseStart.value',),
'baseStop': (baseName + 'baseStop.value',),
'enable': (baseName + 'enable.value',),
'pulseStart': (baseName + 'pulseStart.value',),
'pulseStop': (baseName + 'pulseStop.value',),
'initialDelay': (baseName + 'initialDelay.value',),
'upperLimit': (baseName + 'upperLimit.value',),
'npulses': (f'board{board}.apd.numberOfPulses.value',)
}
data = run.select(source).train_from_index(0)[1][source]
result = {}
for mnemo, versions in adq412_keys.items():
for key in versions:
if key in data:
result[mnemo] = data[key]
result['period'] = result.pop('upperLimit') - \
result.pop('initialDelay')
return result
......@@ -523,7 +588,8 @@ def get_peak_params(run, mnemonic, raw_trace=None, ntrains=200):
title = 'Digitizer peak params'
else:
mnemo_raw = mnemonic
min_distance = 24 if "FastADC" in mnemonic else 440
digitizer = digitizer_type(mnemonic, run_mnemonics)
min_distance = 24 if digitizer == "FastADC" else 440
title = 'Auto-find peak params'
if raw_trace is None:
sel = run.select_trains(np.s_[:ntrains])
......@@ -589,7 +655,8 @@ def check_peak_params(run, mnemonic, raw_trace=None, ntrains=200, params=None,
log.warning('The digitizer did not record peak-integrated data.')
if not plot:
return params
min_distance = 24 if "FastADC" in mnemonic else 440
digitizer = digitizer_type(mnemonic, run_mnemonics)
min_distance = 24 if digitizer == "FastADC" else 440
if 'bunchPatternTable' in run_mnemonics and bunchPattern != 'None':
sel = run.select_trains(np.s_[:ntrains])
bp_params = {}
......@@ -689,6 +756,24 @@ def plotPeakIntegrationWindow(raw_trace, params, bp_params, show_all=False):
return fig, ax
def digitizer_type(mnemonic=None, mnemo_dict=None, source=None):
if mnemonic is not None:
source = mnemo_dict[mnemonic]['source']
if ':channel' in source:
return 'FastADC'
if ':network' in source:
return 'ADQ412'
dic = {'XTD10_MCP': 'FastADC',
'FastADC': 'FastADC',
'PES': 'ADQ412',
'MCP': 'ADQ412'}
for k, v in dic.items():
if k in mnemonic:
return v
log.warning(f'Could not find digitizer type from mnemonic {mnemonic}.')
return 'ADQ412'
def get_tim_peaks(run, mnemonics=None, merge_with=None,
bunchPattern='sase3', integParams=None,
keepAllSase=False):
......@@ -837,6 +922,9 @@ def get_digitizer_peaks(run, mnemonics, digitizer, merge_with=None,
elif 'bunchPatternTable' in run_mnemonics:
bpt = run.get_array(*run_mnemonics['bunchPatternTable'].values())
log.debug('Loaded bpt from DataCollection.')
elif 'bunchPatternTable_SA3' in run_mnemonics:
bpt = run.get_array(*run_mnemonics['bunchPatternTable_SA3'].values())
log.debug('Loaded bpt from DataCollection.')
else:
bpt = None
......
......@@ -29,7 +29,10 @@ from .dssc_misc import (
from .dssc_processing import (
process_dssc_data, create_empty_dataset)
__all__ = ["DSSCBinner", "DSSCFormatter"]
__all__ = [
"DSSCBinner",
"DSSCFormatter"]
log = logging.getLogger(__name__)
......
......@@ -6,6 +6,13 @@ import xarray as xr
from ..util.exceptions import ToolBoxFileError
__all__ = [
'get_data_formatted',
'load_xarray',
'save_attributes_h5',
'save_xarray',
]
log = logging.getLogger(__name__)
......
......@@ -14,6 +14,14 @@ import extra_data as ed
from .xgm import get_xgm
from .digitizers import get_tim_peaks
__all__ = [
'create_dssc_bins',
'get_xgm_formatted',
'load_dssc_info',
'load_mask',
'quickmask_DSSC_ASIC',
]
log = logging.getLogger(__name__)
......
......@@ -15,6 +15,10 @@ import extra_data as ed
from ..mnemonics_machinery import mnemonics_for_run
from .dssc_data import save_xarray
__all__ = [
'process_dssc_data'
]
log = logging.getLogger(__name__)
......
from joblib import Parallel, delayed, parallel_backend
from time import strftime
import tempfile
import shutil
from tqdm.auto import tqdm
import os
......@@ -17,11 +16,11 @@ import h5py
from glob import glob
from imageio import imread
import ToolBox as tb
from ..constants import mnemonics as _mnemonics
from .azimuthal_integrator import AzimuthalIntegrator
from ..misc.laser_utils import positionToDelay
class FastCCD:
def __init__(self, proposal, distance=1, raw=False):
......
......@@ -16,6 +16,12 @@ from ..mnemonics_machinery import (mnemonics_to_process,
mnemonics_for_run)
__all__ = [
'get_pes_params',
'get_pes_tof',
]
log = logging.getLogger(__name__)
......
......@@ -16,6 +16,11 @@ from ..misc.bunch_pattern_external import is_sase_1, is_sase_3
from ..mnemonics_machinery import (mnemonics_to_process,
mnemonics_for_run)
__all__ = [
'calibrate_xgm',
'get_xgm',
]
log = logging.getLogger(__name__)
......
......@@ -21,6 +21,13 @@ from .mnemonics_machinery import mnemonics_for_run
from .util.exceptions import ToolBoxValueError
import toolbox_scs.detectors as tbdet
__all__ = [
'concatenateRuns',
'get_array',
'load',
'run_by_path',
]
log = logging.getLogger(__name__)
......@@ -123,20 +130,13 @@ def load(proposalNB=None, runNB=None,
run.info()
data_arrays = []
run_mnemonics = mnemonics_for_run(run)
# load pulse pattern info
if 'bunchPatternTable' in run_mnemonics:
bpt = run.get_array(*run_mnemonics['bunchPatternTable'].values(),
name='bunchPatternTable')
data_arrays.append(bpt)
elif 'bunchPatternTable_SA3' in run_mnemonics:
log.info('Did not find SCS bunch pattern table but found the SA3 one.')
bpt = run.get_array(*run_mnemonics['bunchPatternTable_SA3'].values(),
name='bunchPatternTable')
data_arrays.append(bpt)
else:
bpt = load_bpt(run, run_mnemonics=run_mnemonics)
if bpt is None:
log.warning('Bunch pattern table not found in run. Skipping!')
else:
data_arrays.append(bpt)
for f in fields:
if type(f) == dict:
......@@ -322,3 +322,20 @@ def get_array(run, mnemonic_key=None, stepsize=None):
raise
return data
def load_bpt(run, merge_with=None, run_mnemonics=None):
if run_mnemonics is None:
run_mnemonics = mnemonics_for_run(run)
for key in ['bunchPatternTable', 'bunchPatternTable_SA3']:
if bool(merge_with) and key in merge_with:
log.debug(f'Using {key} from merge_with dataset.')
return merge_with[key]
if key in run_mnemonics:
bpt = run.get_array(*run_mnemonics[key].values(),
name='bunchPatternTable')
log.debug(f'Loaded {key} from DataCollection.')
return bpt
log.debug('Could not find bunch pattern table.')
return None
from .bunch_pattern import (extractBunchPattern, pulsePatternInfo,
repRate)
from .bunch_pattern_external import (is_sase_3, is_sase_1,
is_ppl, is_pulse_at)
from .laser_utils import positionToDelay, degToRelPower
from .bunch_pattern import *
from .bunch_pattern_external import *
from .laser_utils import *
__all__ = (
# Functions
"extractBunchPattern",
"pulsePatternInfo",
"repRate",
"sortBAMdata",
"is_sase_3",
"is_sase_1",
"is_ppl",
"is_pulse_at",
"get_index_ppl",
"get_index_sase1",
"get_index_sase3",
"positionToDelay",
"degToRelPower",
# Classes
# Variables
bunch_pattern.__all__
+ bunch_pattern_external.__all__
+ laser_utils.__all__
)
......@@ -16,6 +16,12 @@ from extra_data import RunDirectory
# import and hide variable, such that it does not alter namespace.
from ..constants import mnemonics as _mnemonics_bp
__all__ = [
'extractBunchPattern',
'pulsePatternInfo',
'repRate'
]
def extractBunchPattern(bp_table=None, key='sase3', runDir=None):
''' generate the bunch pattern and number of pulses of a source directly from the
......
......@@ -11,6 +11,13 @@ import logging
import euxfel_bunch_pattern as ebp
__all__ = [
'is_sase_3',
'is_sase_1',
'is_ppl',
'is_pulse_at',
]
PPL_SCS = ebp.LASER_SEED6
log = logging.getLogger(__name__)
......
__all__ = [
'degToRelPower',
'positionToDelay',
]
def positionToDelay(pos, origin=0, invert = False, reflections=1):
''' converts a motor position in mm into optical delay in picosecond
Inputs:
......