Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • calibration/pycalibration
1 result
Show changes
Commits on Source (33)
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
# Jungfrau Offline Correction # # Jungfrau Offline Correction #
Author: European XFEL Detector Group, Version: 2.0 Author: European XFEL Detector Group, Version: 2.0
Offline Calibration for the Jungfrau Detector Offline Calibration for the Jungfrau Detector
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
in_folder = "/gpfs/exfel/exp/SPB/202130/p900204/raw" # the folder to read data from, required in_folder = "/gpfs/exfel/exp/SPB/202130/p900204/raw" # the folder to read data from, required
out_folder = "/gpfs/exfel/data/scratch/ahmedk/test/remove" # the folder to output to, required out_folder = "/gpfs/exfel/data/scratch/ahmedk/test/remove" # the folder to output to, required
run = 91 # run to process, required run = 91 # run to process, required
metadata_folder = "" # Directory containing calibration_metadata.yml when run by xfel-calibrate metadata_folder = "" # Directory containing calibration_metadata.yml when run by xfel-calibrate
sequences = [-1] # sequences to correct, set to [-1] for all, range allowed sequences = [-1] # sequences to correct, set to [-1] for all, range allowed
sequences_per_node = 1 # number of sequence files per cluster node if run as slurm job, set to 0 to not run SLURM parallel sequences_per_node = 1 # number of sequence files per cluster node if run as slurm job, set to 0 to not run SLURM parallel
# Parameters used to access raw data. # Parameters used to access raw data.
karabo_id = "SPB_IRDA_JF4M" # karabo prefix of Jungfrau devices karabo_id = "SPB_IRDA_JF4M" # karabo prefix of Jungfrau devices
karabo_da = ['JNGFR01', 'JNGFR02', 'JNGFR03', 'JNGFR04', 'JNGFR05', 'JNGFR06', 'JNGFR07', 'JNGFR08'] # data aggregators karabo_da = ['JNGFR01', 'JNGFR02', 'JNGFR03', 'JNGFR04', 'JNGFR05', 'JNGFR06', 'JNGFR07', 'JNGFR08'] # data aggregators
receiver_template = "JNGFR{:02d}" # Detector receiver template for accessing raw data files. e.g. "JNGFR{:02d}" receiver_template = "JNGFR{:02d}" # Detector receiver template for accessing raw data files. e.g. "JNGFR{:02d}"
instrument_source_template = '{}/DET/{}:daqOutput' # template for source name (filled with karabo_id & receiver_id). e.g. 'SPB_IRDA_JF4M/DET/JNGFR01:daqOutput' instrument_source_template = '{}/DET/{}:daqOutput' # template for source name (filled with karabo_id & receiver_id). e.g. 'SPB_IRDA_JF4M/DET/JNGFR01:daqOutput'
ctrl_source_template = '{}/DET/CONTROL' # template for control source name (filled with karabo_id_control) ctrl_source_template = '{}/DET/CONTROL' # template for control source name (filled with karabo_id_control)
karabo_id_control = "" # if control is on a different ID, set to empty string if it is the same a karabo-id karabo_id_control = "" # if control is on a different ID, set to empty string if it is the same a karabo-id
# Parameters for calibration database. # Parameters for calibration database.
cal_db_interface = "tcp://max-exfl016:8017#8025" # the database interface to use cal_db_interface = "tcp://max-exfl016:8017#8025" # the database interface to use
cal_db_timeout = 180000 # timeout on caldb requests cal_db_timeout = 180000 # timeout on caldb requests
creation_time = "" # To overwrite the measured creation_time. Required Format: YYYY-MM-DD HR:MN:SC e.g. "2022-06-28 13:00:00" creation_time = "" # To overwrite the measured creation_time. Required Format: YYYY-MM-DD HR:MN:SC e.g. "2022-06-28 13:00:00"
# Parameters affecting corrected data. # Parameters affecting corrected data.
relative_gain = True # do relative gain correction. relative_gain = True # do relative gain correction.
strixel_sensor = False # reordering for strixel detector layout. strixel_sensor = False # reordering for strixel detector layout.
strixel_double_norm = 2.0 # normalization to use for double-size pixels, only applied for strixel sensors. strixel_double_norm = 2.0 # normalization to use for double-size pixels, only applied for strixel sensors.
limit_trains = 0 # ONLY FOR TESTING. process only first N trains, Use 0 to process all. limit_trains = 0 # ONLY FOR TESTING. process only first N trains, Use 0 to process all.
chunks_ids = 32 # HDF chunk size for memoryCell and frameNumber. chunks_ids = 32 # HDF chunk size for memoryCell and frameNumber.
chunks_data = 1 # HDF chunk size for pixel data in number of frames. chunks_data = 1 # HDF chunk size for pixel data in number of frames.
# Parameters for retrieving calibration constants # Parameters for retrieving calibration constants
manual_slow_data = False # if true, use manually entered bias_voltage, integration_time, gain_setting, and gain_mode values manual_slow_data = False # if true, use manually entered bias_voltage, integration_time, gain_setting, and gain_mode values
integration_time = 4.96 # integration time in us, will be overwritten by value in file integration_time = 4.96 # integration time in us, will be overwritten by value in file
gain_setting = 0 # 0 for dynamic gain, 1 for dynamic HG0, will be overwritten by value in file gain_setting = 0 # 0 for dynamic gain, 1 for dynamic HG0, will be overwritten by value in file
gain_mode = 0 # 0 for runs with dynamic gain setting, 1 for fixgain. It will be overwritten by value in file, if manual_slow_data is set to True. gain_mode = 0 # 0 for runs with dynamic gain setting, 1 for fixgain. It will be overwritten by value in file, if manual_slow_data is set to True.
mem_cells = -1 # Set mem_cells to -1 to automatically use the value stored in RAW data. mem_cells = -1 # Set mem_cells to -1 to automatically use the value stored in RAW data.
bias_voltage = 180 # will be overwritten by value in file bias_voltage = 180 # will be overwritten by value in file
# Parameters for plotting # Parameters for plotting
skip_plots = False # exit after writing corrected files skip_plots = False # exit after writing corrected files
plot_trains = 500 # Number of trains to plot for RAW and CORRECTED plots. Set to -1 to automatically plot all trains. plot_trains = 500 # Number of trains to plot for RAW and CORRECTED plots. Set to -1 to automatically plot all trains.
cell_id_preview = 15 # cell Id used for preview in single-shot plots cell_id_preview = 15 # cell Id used for preview in single-shot plots
# Parameters for ROI selection and reduction # Parameters for ROI selection and reduction
roi_definitions = [-1] # List with groups of 6 values defining ROIs, e.g. [3, 120, 180, 200, 550, -2] for module 3 (JNGFR03), slice 120:180, 200:550, average along axis -2 (slow scan, or -1 for fast scan) roi_definitions = [-1] # List with groups of 6 values defining ROIs, e.g. [3, 120, 180, 200, 550, -2] for module 3 (JNGFR03), slice 120:180, 200:550, average along axis -2 (slow scan, or -1 for fast scan)
def balance_sequences(in_folder, run, sequences, sequences_per_node, karabo_da): def balance_sequences(in_folder, run, sequences, sequences_per_node, karabo_da):
from xfel_calibrate.calibrate import balance_sequences as bs from xfel_calibrate.calibrate import balance_sequences as bs
return bs(in_folder, run, sequences, sequences_per_node, karabo_da) return bs(in_folder, run, sequences, sequences_per_node, karabo_da)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
import fnmatch import fnmatch
import multiprocessing import multiprocessing
import sys import sys
import warnings import warnings
from logging import warning from logging import warning
from pathlib import Path from pathlib import Path
import h5py import h5py
import matplotlib import matplotlib
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import numpy as np import numpy as np
import pasha as psh import pasha as psh
import tabulate import tabulate
from IPython.display import Latex, Markdown, display from IPython.display import Latex, Markdown, display
from extra_data import DataCollection, H5File, RunDirectory, by_id, components from extra_data import DataCollection, H5File, RunDirectory, by_id, components
from extra_geom import JUNGFRAUGeometry
from matplotlib.colors import LogNorm from matplotlib.colors import LogNorm
import cal_tools.restful_config as rest_cfg import cal_tools.restful_config as rest_cfg
from cal_tools.calcat_interface import JUNGFRAU_CalibrationData from cal_tools.calcat_interface import JUNGFRAU_CalibrationData
from cal_tools.jungfraulib import JungfrauCtrl from cal_tools.jungfraulib import JungfrauCtrl
from cal_tools.enums import BadPixels from cal_tools.enums import BadPixels
from cal_tools.jungfraulib import JungfrauCtrl
from cal_tools.plotting import init_jungfrau_geom
from cal_tools.files import DataFile from cal_tools.files import DataFile
from cal_tools.step_timing import StepTimer from cal_tools.step_timing import StepTimer
from cal_tools.tools import ( from cal_tools.tools import (
calcat_creation_time, calcat_creation_time,
map_seq_files, map_seq_files,
CalibrationMetadata, CalibrationMetadata,
) )
warnings.filterwarnings('ignore') warnings.filterwarnings('ignore')
matplotlib.use('agg') matplotlib.use('agg')
%matplotlib inline %matplotlib inline
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
in_folder = Path(in_folder) in_folder = Path(in_folder)
out_folder = Path(out_folder) out_folder = Path(out_folder)
run_folder = in_folder / f'r{run:04d}' run_folder = in_folder / f'r{run:04d}'
run_dc = RunDirectory(run_folder) run_dc = RunDirectory(run_folder)
instrument_src = instrument_source_template.format(karabo_id, receiver_template) instrument_src = instrument_source_template.format(karabo_id, receiver_template)
metadata = CalibrationMetadata(metadata_folder or out_folder) metadata = CalibrationMetadata(metadata_folder or out_folder)
# NOTE: this notebook will not overwrite calibration metadata file # NOTE: this notebook will not overwrite calibration metadata file
const_yaml = metadata.get("retrieved-constants", {}) const_yaml = metadata.get("retrieved-constants", {})
out_folder.mkdir(parents=True, exist_ok=True) out_folder.mkdir(parents=True, exist_ok=True)
print(f"Run is: {run}") print(f"Run is: {run}")
print(f"Instrument H5File source: {instrument_src}") print(f"Instrument H5File source: {instrument_src}")
karabo_da = sorted(karabo_da) karabo_da = sorted(karabo_da)
print(f"Process modules: {karabo_da}") print(f"Process modules: {karabo_da}")
# Run's creation time: # Run's creation time:
creation_time = calcat_creation_time(in_folder, run, creation_time) creation_time = calcat_creation_time(in_folder, run, creation_time)
print(f"Creation time: {creation_time}") print(f"Creation time: {creation_time}")
if karabo_id_control == "": if karabo_id_control == "":
karabo_id_control = karabo_id karabo_id_control = karabo_id
if any(axis_no not in {-2, -1, 2, 3} for axis_no in roi_definitions[5::6]): if any(axis_no not in {-2, -1, 2, 3} for axis_no in roi_definitions[5::6]):
print("ROI averaging must be on axis 2/3 (or equivalently -2/-1). " print("ROI averaging must be on axis 2/3 (or equivalently -2/-1). "
f"Axis numbers given: {roi_definitions[5::6]}") f"Axis numbers given: {roi_definitions[5::6]}")
sys.exit(1) sys.exit(1)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
ctrl_src = ctrl_source_template.format(karabo_id_control) ctrl_src = ctrl_source_template.format(karabo_id_control)
ctrl_data = JungfrauCtrl(run_dc, ctrl_src) ctrl_data = JungfrauCtrl(run_dc, ctrl_src)
if mem_cells < 0: if mem_cells < 0:
memory_cells, sc_start = ctrl_data.get_memory_cells() memory_cells, sc_start = ctrl_data.get_memory_cells()
mem_cells_name = "single cell" if memory_cells == 1 else "burst" mem_cells_name = "single cell" if memory_cells == 1 else "burst"
print(f"Run is in {mem_cells_name} mode.\nStorage cell start: {sc_start:02d}") print(f"Run is in {mem_cells_name} mode.\nStorage cell start: {sc_start:02d}")
else: else:
memory_cells = mem_cells memory_cells = mem_cells
mem_cells_name = "single cell" if memory_cells == 1 else "burst" mem_cells_name = "single cell" if memory_cells == 1 else "burst"
print(f"Run is in manually set to {mem_cells_name} mode. With {memory_cells} memory cells") print(f"Run is in manually set to {mem_cells_name} mode. With {memory_cells} memory cells")
if not manual_slow_data: if not manual_slow_data:
integration_time = ctrl_data.get_integration_time() integration_time = ctrl_data.get_integration_time()
bias_voltage = ctrl_data.get_bias_voltage() bias_voltage = ctrl_data.get_bias_voltage()
gain_setting = ctrl_data.get_gain_setting() gain_setting = ctrl_data.get_gain_setting()
gain_mode = ctrl_data.get_gain_mode() gain_mode = ctrl_data.get_gain_mode()
print(f"Integration time is {integration_time} us") print(f"Integration time is {integration_time} us")
print(f"Gain setting is {gain_setting} (run settings: {ctrl_data.run_settings})") print(f"Gain setting is {gain_setting} (run settings: {ctrl_data.run_settings})")
print(f"Gain mode is {gain_mode} ({ctrl_data.run_mode})") print(f"Gain mode is {gain_mode} ({ctrl_data.run_mode})")
print(f"Bias voltage is {bias_voltage} V") print(f"Bias voltage is {bias_voltage} V")
print(f"Number of memory cells are {memory_cells}") print(f"Number of memory cells are {memory_cells}")
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
### Retrieving calibration constants ### Retrieving calibration constants
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
jf_cal = JUNGFRAU_CalibrationData( jf_cal = JUNGFRAU_CalibrationData(
detector_name=karabo_id, detector_name=karabo_id,
sensor_bias_voltage=bias_voltage, sensor_bias_voltage=bias_voltage,
event_at=creation_time, event_at=creation_time,
modules=karabo_da, modules=karabo_da,
memory_cells=memory_cells, memory_cells=memory_cells,
integration_time=integration_time, integration_time=integration_time,
gain_setting=gain_setting, gain_setting=gain_setting,
gain_mode=gain_mode, gain_mode=gain_mode,
client=rest_cfg.calibration_client(), client=rest_cfg.calibration_client(),
) )
da_to_pdu = {} da_to_pdu = {}
for mod_info in jf_cal.physical_detector_units.values(): for mod_info in jf_cal.physical_detector_units.values():
da_to_pdu[mod_info["karabo_da"]] = mod_info["physical_name"] da_to_pdu[mod_info["karabo_da"]] = mod_info["physical_name"]
if const_yaml: if const_yaml:
const_data = dict() const_data = dict()
for mod, constants in const_yaml.items(): for mod, constants in const_yaml.items():
if mod not in karabo_da: if mod not in karabo_da:
continue # skip other keys like time-summary continue # skip other keys like time-summary
const_data[mod] = dict() const_data[mod] = dict()
for cname, mdata in constants["constants"].items(): for cname, mdata in constants["constants"].items():
const_data[mod][cname] = dict() const_data[mod][cname] = dict()
if mdata["creation-time"]: if mdata["creation-time"]:
with h5py.File(mdata["path"], "r") as cf: with h5py.File(mdata["path"], "r") as cf:
const_data[mod][cname] = np.copy( const_data[mod][cname] = np.copy(
cf[f"{mdata['dataset']}/data"]) cf[f"{mdata['dataset']}/data"])
else: else:
constant_names = ["Offset10Hz", "BadPixelsDark10Hz"] constant_names = ["Offset10Hz", "BadPixelsDark10Hz"]
if relative_gain: if relative_gain:
constant_names += ["BadPixelsFF10Hz", "RelativeGain10Hz"] constant_names += ["BadPixelsFF10Hz", "RelativeGain10Hz"]
const_data = jf_cal.ndarray_map(calibrations=constant_names) const_data = jf_cal.ndarray_map(calibrations=constant_names)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Validate the constants availability and raise/warn correspondingly. # Validate the constants availability and raise/warn correspondingly.
for mod in karabo_da[:]: for mod in karabo_da[:]:
calibrations = const_data.get(mod, {}) calibrations = const_data.get(mod, {})
missing_dark_constants = {"Offset10Hz", "BadPixelsDark10Hz"} - set(calibrations) missing_dark_constants = {"Offset10Hz", "BadPixelsDark10Hz"} - set(calibrations)
missing_gain_constants = {"BadPixelsFF10Hz", "RelativeGain10Hz"} - set(calibrations) missing_gain_constants = {"BadPixelsFF10Hz", "RelativeGain10Hz"} - set(calibrations)
if missing_dark_constants: if missing_dark_constants:
warning( warning(
f"Dark constants {missing_dark_constants} are not available to correct {mod}." f"Dark constants {missing_dark_constants} are not available to correct {mod}."
f" Module {mod} won't be corrected.") f" Module {mod} won't be corrected.")
karabo_da.remove(mod) karabo_da.remove(mod)
if relative_gain and missing_gain_constants: if relative_gain and missing_gain_constants:
warning(f"Gain constants {missing_gain_constants} were not retrieved for {mod}." warning(f"Gain constants {missing_gain_constants} were not retrieved for {mod}."
" No Relative gain correction for this module") " No Relative gain correction for this module")
if not karabo_da: # Dark constants are missing for all modules. if not karabo_da: # Dark constants are missing for all modules.
raise ValueError("Dark constants are missing for all modules.") raise ValueError("Dark constants are missing for all modules.")
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
def prepare_constants(module: str): def prepare_constants(module: str):
"""Prepare constant arrays. """Prepare constant arrays.
:param module: The module name (karabo_da) :param module: The module name (karabo_da)
:return: :return:
offset_map (offset map), offset_map (offset map),
mask (mask of bad pixels), mask (mask of bad pixels),
gain_map (map of relative gain factors), gain_map (map of relative gain factors),
module (name of module), module (name of module),
""" """
constant_arrays = const_data[module] constant_arrays = const_data[module]
offset_map = constant_arrays["Offset10Hz"] offset_map = constant_arrays["Offset10Hz"]
mask = constant_arrays["BadPixelsDark10Hz"] mask = constant_arrays["BadPixelsDark10Hz"]
gain_map = constant_arrays.get("RelativeGain10Hz") gain_map = constant_arrays.get("RelativeGain10Hz")
mask_ff = constant_arrays.get("BadPixelsFF10Hz") mask_ff = constant_arrays.get("BadPixelsFF10Hz")
# Combine masks # Combine masks
if mask_ff is not None: if mask_ff is not None:
mask |= np.moveaxis(mask_ff, 0, 1) mask |= np.moveaxis(mask_ff, 0, 1)
if memory_cells > 1: if memory_cells > 1:
# move from x, y, cell, gain to cell, x, y, gain # move from x, y, cell, gain to cell, x, y, gain
offset_map = np.moveaxis(offset_map, [0, 1], [1, 2]) offset_map = np.moveaxis(offset_map, [0, 1], [1, 2])
mask = np.moveaxis(mask, [0, 1], [1, 2]) mask = np.moveaxis(mask, [0, 1], [1, 2])
else: else:
offset_map = np.squeeze(offset_map) offset_map = np.squeeze(offset_map)
mask = np.squeeze(mask) mask = np.squeeze(mask)
# masking double size pixels # masking double size pixels
mask[..., [255, 256], :, :] |= BadPixels.NON_STANDARD_SIZE mask[..., [255, 256], :, :] |= BadPixels.NON_STANDARD_SIZE
mask[..., [255, 256, 511, 512, 767, 768], :] |= BadPixels.NON_STANDARD_SIZE mask[..., [255, 256, 511, 512, 767, 768], :] |= BadPixels.NON_STANDARD_SIZE
if gain_map is not None: if gain_map is not None:
if memory_cells > 1: if memory_cells > 1:
gain_map = np.moveaxis(gain_map, [0, 2], [2, 0]) gain_map = np.moveaxis(gain_map, [0, 2], [2, 0])
# add extra empty cell constant # add extra empty cell constant
b = np.ones(((1,)+gain_map.shape[1:])) b = np.ones(((1,)+gain_map.shape[1:]))
gain_map = np.concatenate((gain_map, b), axis=0) gain_map = np.concatenate((gain_map, b), axis=0)
else: else:
gain_map = np.moveaxis(np.squeeze(gain_map), 1, 0) gain_map = np.moveaxis(np.squeeze(gain_map), 1, 0)
return offset_map, mask, gain_map, module return offset_map, mask, gain_map, module
with multiprocessing.Pool() as pool: with multiprocessing.Pool() as pool:
r = pool.map(prepare_constants, karabo_da) r = pool.map(prepare_constants, karabo_da)
# Print timestamps for the retrieved constants. # Print timestamps for the retrieved constants.
constants = {} constants = {}
for offset_map, mask, gain_map, k_da in r: for offset_map, mask, gain_map, k_da in r:
constants[k_da] = (offset_map, mask, gain_map) constants[k_da] = (offset_map, mask, gain_map)
const_data.clear() const_data.clear()
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Read available sequence files to correct. # Read available sequence files to correct.
mapped_files, num_seq_files = map_seq_files( mapped_files, num_seq_files = map_seq_files(
run_folder, karabo_da, sequences) run_folder, karabo_da, sequences)
if not len(mapped_files): if not len(mapped_files):
raise IndexError( raise IndexError(
"No sequence files available to correct for the selected sequences and karabo_da.") "No sequence files available to correct for the selected sequences and karabo_da.")
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
print(f"Processing a total of {num_seq_files} sequence files") print(f"Processing a total of {num_seq_files} sequence files")
table = [] table = []
fi = 0 fi = 0
for kda, sfiles in mapped_files.items(): for kda, sfiles in mapped_files.items():
for k, f in enumerate(sfiles): for k, f in enumerate(sfiles):
if k == 0: if k == 0:
table.append((fi, kda, k, f)) table.append((fi, kda, k, f))
else: else:
table.append((fi, "", k, f)) table.append((fi, "", k, f))
fi += 1 fi += 1
md = display(Latex(tabulate.tabulate( md = display(Latex(tabulate.tabulate(
table, tablefmt='latex', table, tablefmt='latex',
headers=["#", "module", "# module", "file"]))) headers=["#", "module", "# module", "file"])))
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
if strixel_sensor: if strixel_sensor:
from cal_tools.jfstrixel import STRIXEL_SHAPE as strixel_frame_shape, double_pixel_indices, to_strixel from cal_tools.jfstrixel import STRIXEL_SHAPE as strixel_frame_shape, double_pixel_indices, to_strixel
Ydouble, Xdouble = double_pixel_indices() Ydouble, Xdouble = double_pixel_indices()
print('Strixel sensor transformation enabled') print('Strixel sensor transformation enabled')
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Correct a chunk of images for offset and gain # Correct a chunk of images for offset and gain
def correct_train(wid, index, d): def correct_train(wid, index, d):
d = d.astype(np.float32) # [cells, x, y] d = d.astype(np.float32) # [cells, x, y]
g = gain[index] g = gain[index]
# Copy gain over first to keep it at the original 3 for low gain. # Copy gain over first to keep it at the original 3 for low gain.
if strixel_sensor: if strixel_sensor:
to_strixel(g, out=gain_corr[index, ...]) to_strixel(g, out=gain_corr[index, ...])
else: else:
gain_corr[index, ...] = g gain_corr[index, ...] = g
# Jungfrau gains 0[00], 1[01], 3[11] # Jungfrau gains 0[00], 1[01], 3[11]
# Change low gain to 2 for indexing purposes. # Change low gain to 2 for indexing purposes.
g[g==3] = 2 g[g==3] = 2
# Select memory cells # Select memory cells
if memory_cells > 1: if memory_cells > 1:
""" """
Even though it is correct to assume that memory cells pattern Even though it is correct to assume that memory cells pattern
can be the same across all trains (for one correction run can be the same across all trains (for one correction run
taken with one acquisition), it is preferred to not assume taken with one acquisition), it is preferred to not assume
this to account for exceptions that can happen. this to account for exceptions that can happen.
""" """
m = memcells[index].copy() m = memcells[index].copy()
# 255 is a cell value pointing to no cell image data (image of 0 pixels). # 255 is a cell value pointing to no cell image data (image of 0 pixels).
# Corresponding image will be corrected with constant of cell 0. To avoid values of 0. # Corresponding image will be corrected with constant of cell 0. To avoid values of 0.
# This line is depending on not storing the modified memory cells in the corrected data. # This line is depending on not storing the modified memory cells in the corrected data.
m[m==255] = 0 m[m==255] = 0
offset_map_cell = offset_map[m, ...] # [16 + empty cell, x, y] offset_map_cell = offset_map[m, ...] # [16 + empty cell, x, y]
mask_cell = mask[m, ...] mask_cell = mask[m, ...]
else: else:
offset_map_cell = offset_map offset_map_cell = offset_map
mask_cell = mask mask_cell = mask
# Offset correction # Offset correction
offset = np.choose(g, np.moveaxis(offset_map_cell, -1, 0)) offset = np.choose(g, np.moveaxis(offset_map_cell, -1, 0))
d -= offset d -= offset
# Gain correction # Gain correction
if relative_gain and gain_map is not None: if relative_gain and gain_map is not None:
if memory_cells > 1: if memory_cells > 1:
gain_map_cell = gain_map[m, ...] gain_map_cell = gain_map[m, ...]
else: else:
gain_map_cell = gain_map gain_map_cell = gain_map
cal = np.choose(g, np.moveaxis(gain_map_cell, -1, 0)) cal = np.choose(g, np.moveaxis(gain_map_cell, -1, 0))
d /= cal d /= cal
msk = np.choose(g, np.moveaxis(mask_cell, -1, 0)) msk = np.choose(g, np.moveaxis(mask_cell, -1, 0))
if strixel_sensor: if strixel_sensor:
to_strixel(d, out=data_corr[index, ...]) to_strixel(d, out=data_corr[index, ...])
data_corr[index, :, Ydouble, Xdouble] /= strixel_double_norm data_corr[index, :, Ydouble, Xdouble] /= strixel_double_norm
to_strixel(msk, out=mask_corr[index, ...]) to_strixel(msk, out=mask_corr[index, ...])
else: else:
data_corr[index, ...] = d data_corr[index, ...] = d
mask_corr[index, ...] = msk mask_corr[index, ...] = msk
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
step_timer = StepTimer() step_timer = StepTimer()
n_cpus = multiprocessing.cpu_count() n_cpus = multiprocessing.cpu_count()
context = psh.context.ProcessContext(num_workers=n_cpus) context = psh.context.ProcessContext(num_workers=n_cpus)
print(f"Using {n_cpus} workers for correction.") print(f"Using {n_cpus} workers for correction.")
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
def save_reduced_rois(ofile, data_corr, mask_corr, karabo_da): def save_reduced_rois(ofile, data_corr, mask_corr, karabo_da):
"""If ROIs are defined for this karabo_da, reduce them and save to the output file""" """If ROIs are defined for this karabo_da, reduce them and save to the output file"""
rois_defined = 0 rois_defined = 0
module_no = int(karabo_da[-2:]) module_no = int(karabo_da[-2:])
params_source = f'{karabo_id}/ROIPROC/{karabo_da}' params_source = f'{karabo_id}/ROIPROC/{karabo_da}'
rois_source = f'{params_source}:output' rois_source = f'{params_source}:output'
if roi_definitions != [-1]: if roi_definitions != [-1]:
# Create Instrument and Control sections to later add datasets. # Create Instrument and Control sections to later add datasets.
outp_source = ofile.create_instrument_source(rois_source) outp_source = ofile.create_instrument_source(rois_source)
ctrl_source = ofile.create_control_source(params_source) ctrl_source = ofile.create_control_source(params_source)
for i in range(len(roi_definitions) // 6): for i in range(len(roi_definitions) // 6):
roi_module, a1, a2, b1, b2, mean_axis = roi_definitions[i*6 : (i+1)*6] roi_module, a1, a2, b1, b2, mean_axis = roi_definitions[i*6 : (i+1)*6]
if roi_module == module_no: if roi_module == module_no:
rois_defined += 1 rois_defined += 1
# Apply the mask and average remaining pixels to 1D # Apply the mask and average remaining pixels to 1D
roi_data = data_corr[..., a1:a2, b1:b2].mean( roi_data = data_corr[..., a1:a2, b1:b2].mean(
axis=mean_axis, where=(mask_corr[..., a1:a2, b1:b2] == 0) axis=mean_axis, where=(mask_corr[..., a1:a2, b1:b2] == 0)
) )
# Add roi corrected datasets # Add roi corrected datasets
outp_source.create_key(f'data.roi{rois_defined}.data', data=roi_data) outp_source.create_key(f'data.roi{rois_defined}.data', data=roi_data)
# Add roi run control datasets. # Add roi run control datasets.
ctrl_source.create_run_key(f'roi{rois_defined}.region', np.array([[a1, a2, b1, b2]])) ctrl_source.create_run_key(f'roi{rois_defined}.region', np.array([[a1, a2, b1, b2]]))
ctrl_source.create_run_key(f'roi{rois_defined}.reduce_axis', np.array([mean_axis])) ctrl_source.create_run_key(f'roi{rois_defined}.reduce_axis', np.array([mean_axis]))
if rois_defined: if rois_defined:
# Copy the index for the new source # Copy the index for the new source
# Create count/first datasets at INDEX source. # Create count/first datasets at INDEX source.
ofile.copy(f'INDEX/{karabo_id}/DET/{karabo_da}:daqOutput/data', ofile.copy(f'INDEX/{karabo_id}/DET/{karabo_da}:daqOutput/data',
f'INDEX/{rois_source}/data') f'INDEX/{rois_source}/data')
ntrains = ofile['INDEX/trainId'].shape[0] ntrains = ofile['INDEX/trainId'].shape[0]
ctrl_source.create_index(ntrains) ctrl_source.create_index(ntrains)
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
### Correcting RAW data ### ### Correcting RAW data ###
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Loop over modules # Loop over modules
empty_seq = 0 empty_seq = 0
corrected_files = [] corrected_files = []
for local_karabo_da, mapped_files_module in mapped_files.items(): for local_karabo_da, mapped_files_module in mapped_files.items():
instrument_src_kda = instrument_src.format(int(local_karabo_da[-2:])) instrument_src_kda = instrument_src.format(int(local_karabo_da[-2:]))
for sequence_file in mapped_files_module: for sequence_file in mapped_files_module:
# Save corrected data in an output file with name # Save corrected data in an output file with name
# of corresponding raw sequence file. # of corresponding raw sequence file.
ofile_name = sequence_file.name.replace("RAW", "CORR") ofile_name = sequence_file.name.replace("RAW", "CORR")
out_file = out_folder / ofile_name out_file = out_folder / ofile_name
corrected_files.append(ofile_name) corrected_files.append(ofile_name)
# Load sequence file data collection, data.adc keydata, # Load sequence file data collection, data.adc keydata,
# the shape for data to later created arrays of the same shape, # the shape for data to later created arrays of the same shape,
# and number of available trains to correct. # and number of available trains to correct.
seq_dc = H5File(sequence_file) seq_dc = H5File(sequence_file)
seq_dc_adc = seq_dc[instrument_src_kda, "data.adc"] seq_dc_adc = seq_dc[instrument_src_kda, "data.adc"]
ishape = seq_dc_adc.shape # input shape. ishape = seq_dc_adc.shape # input shape.
corr_ntrains = ishape[0] # number of available trains to correct. corr_ntrains = ishape[0] # number of available trains to correct.
all_train_ids = seq_dc_adc.train_ids all_train_ids = seq_dc_adc.train_ids
# Raise a WARNING if this sequence has no trains to correct. # Raise a WARNING if this sequence has no trains to correct.
# Otherwise, print number of trains with no data. # Otherwise, print number of trains with no data.
if corr_ntrains == 0: if corr_ntrains == 0:
warning(f"No trains to correct for {sequence_file.name}: " warning(f"No trains to correct for {sequence_file.name}: "
"Skipping the processing of this file.") "Skipping the processing of this file.")
empty_seq += 1 empty_seq += 1
continue continue
elif len(all_train_ids) != corr_ntrains: elif len(all_train_ids) != corr_ntrains:
print(f"{sequence_file.name} has {len(seq_dc_adc.train_ids) - corr_ntrains} " print(f"{sequence_file.name} has {len(seq_dc_adc.train_ids) - corr_ntrains} "
"trains with missing data.") "trains with missing data.")
# For testing, limit corrected trains. i.e. Getting output faster. # For testing, limit corrected trains. i.e. Getting output faster.
if limit_trains > 0: if limit_trains > 0:
print(f"\nCorrected trains are limited to: {limit_trains} trains") print(f"\nCorrected trains are limited to: {limit_trains} trains")
corr_ntrains = min(corr_ntrains, limit_trains) corr_ntrains = min(corr_ntrains, limit_trains)
print(f"\nNumber of corrected trains are: {corr_ntrains} for {ofile_name}") print(f"\nNumber of corrected trains are: {corr_ntrains} for {ofile_name}")
# Load constants from the constants dictionary. # Load constants from the constants dictionary.
# These arrays are used by `correct_train()` function # These arrays are used by `correct_train()` function
offset_map, mask, gain_map = constants[local_karabo_da] offset_map, mask, gain_map = constants[local_karabo_da]
# Determine total output shape. # Determine total output shape.
if strixel_sensor: if strixel_sensor:
oshape = (*ishape[:-2], *strixel_frame_shape) oshape = (*ishape[:-2], *strixel_frame_shape)
else: else:
oshape = ishape oshape = ishape
# Allocate shared arrays for corrected data. Used in `correct_train()` # Allocate shared arrays for corrected data. Used in `correct_train()`
data_corr = context.alloc(shape=oshape, dtype=np.float32) data_corr = context.alloc(shape=oshape, dtype=np.float32)
gain_corr = context.alloc(shape=oshape, dtype=np.uint8) gain_corr = context.alloc(shape=oshape, dtype=np.uint8)
mask_corr = context.alloc(shape=oshape, dtype=np.uint32) mask_corr = context.alloc(shape=oshape, dtype=np.uint32)
step_timer.start() step_timer.start()
# Overwrite seq_dc after eliminating empty trains or/and applying limited images. # Overwrite seq_dc after eliminating empty trains or/and applying limited images.
seq_dc = seq_dc.select( seq_dc = seq_dc.select(
instrument_src_kda, "*", require_all=True).select_trains(np.s_[:corr_ntrains]) instrument_src_kda, "*", require_all=True).select_trains(np.s_[:corr_ntrains])
# Load raw images(adc), gain, memcells, and frame numbers. # Load raw images(adc), gain, memcells, and frame numbers.
data = seq_dc[instrument_src_kda, "data.adc"].ndarray() data = seq_dc[instrument_src_kda, "data.adc"].ndarray()
gain = seq_dc[instrument_src_kda, "data.gain"].ndarray() gain = seq_dc[instrument_src_kda, "data.gain"].ndarray()
memcells = seq_dc[instrument_src_kda, "data.memoryCell"].ndarray() memcells = seq_dc[instrument_src_kda, "data.memoryCell"].ndarray()
frame_number = seq_dc[instrument_src_kda, "data.frameNumber"].ndarray() frame_number = seq_dc[instrument_src_kda, "data.frameNumber"].ndarray()
# Validate that the selected cell id to preview is available in raw data. # Validate that the selected cell id to preview is available in raw data.
if memory_cells > 1: if memory_cells > 1:
# For plotting, assuming that memory cells are sorted the same for all trains. # For plotting, assuming that memory cells are sorted the same for all trains.
found_cells = memcells[0] == cell_id_preview found_cells = memcells[0] == cell_id_preview
if any(found_cells): if any(found_cells):
cell_idx_preview = np.where(found_cells)[0][0] cell_idx_preview = np.where(found_cells)[0][0]
else: else:
print(f"The selected cell_id_preview {cell_id_preview} is not available in burst mode. " print(f"The selected cell_id_preview {cell_id_preview} is not available in burst mode. "
f"Previewing cell `{memcells[0]}`.") f"Previewing cell `{memcells[0]}`.")
cell_idx_preview = 0 cell_idx_preview = 0
else: else:
cell_idx_preview = 0 cell_idx_preview = 0
# Correct data per train # Correct data per train
context.map(correct_train, data) context.map(correct_train, data)
step_timer.done_step(f"Correction time.") step_timer.done_step(f"Correction time.")
step_timer.start() step_timer.start()
# Create CORR files and add corrected data sections. # Create CORR files and add corrected data sections.
image_counts = seq_dc[instrument_src_kda, "data.adc"].data_counts(labelled=False) image_counts = seq_dc[instrument_src_kda, "data.adc"].data_counts(labelled=False)
with DataFile(out_file, 'w') as outp_file: with DataFile(out_file, 'w') as outp_file:
# Create INDEX datasets. # Create INDEX datasets.
outp_file.create_index(seq_dc.train_ids, from_file=seq_dc.files[0]) outp_file.create_index(seq_dc.train_ids, from_file=seq_dc.files[0])
# Create Instrument section to later add corrected datasets. # Create Instrument section to later add corrected datasets.
outp_source = outp_file.create_instrument_source(instrument_src_kda) outp_source = outp_file.create_instrument_source(instrument_src_kda)
# Create count/first datasets at INDEX source. # Create count/first datasets at INDEX source.
outp_source.create_index(data=image_counts) outp_source.create_index(data=image_counts)
# RAW memoryCell and frameNumber are not corrected. But we are storing only # RAW memoryCell and frameNumber are not corrected. But we are storing only
# the values for the corrected trains. # the values for the corrected trains.
outp_source.create_key( outp_source.create_key(
"data.memoryCell", data=memcells, "data.memoryCell", data=memcells,
chunks=(min(chunks_ids, memcells.shape[0]), 1)) chunks=(min(chunks_ids, memcells.shape[0]), 1))
outp_source.create_key( outp_source.create_key(
"data.frameNumber", data=frame_number, "data.frameNumber", data=frame_number,
chunks=(min(chunks_ids, frame_number.shape[0]), 1)) chunks=(min(chunks_ids, frame_number.shape[0]), 1))
# Add main corrected `data.adc`` dataset and store corrected data. # Add main corrected `data.adc`` dataset and store corrected data.
outp_source.create_key( outp_source.create_key(
"data.adc", data=data_corr, "data.adc", data=data_corr,
chunks=(min(chunks_data, data_corr.shape[0]), *oshape[1:])) chunks=(min(chunks_data, data_corr.shape[0]), *oshape[1:]))
outp_source.create_compressed_key( outp_source.create_compressed_key(
"data.gain", data=gain_corr) "data.gain", data=gain_corr)
outp_source.create_compressed_key( outp_source.create_compressed_key(
"data.mask", data=mask_corr) "data.mask", data=mask_corr)
# Temporary hotfix for FXE assuming this dataset is in corrected files. # Temporary hotfix for FXE assuming this dataset is in corrected files.
outp_source.create_key( outp_source.create_key(
"data.trainId", data=seq_dc.train_ids, "data.trainId", data=seq_dc.train_ids,
chunks=(min(50, len(seq_dc.train_ids)))) chunks=(min(50, len(seq_dc.train_ids))))
save_reduced_rois(outp_file, data_corr, mask_corr, local_karabo_da) save_reduced_rois(outp_file, data_corr, mask_corr, local_karabo_da)
# Create METDATA datasets # Create METDATA datasets
outp_file.create_metadata(like=seq_dc) outp_file.create_metadata(like=seq_dc)
step_timer.done_step(f'Saving data time.') step_timer.done_step(f'Saving data time.')
if empty_seq == sum([len(i) for i in mapped_files.values()]): if empty_seq == sum([len(i) for i in mapped_files.values()]):
warning("No valid trains for RAW data to correct.") warning("No valid trains for RAW data to correct.")
sys.exit(0) sys.exit(0)
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
### Processing time summary ### ### Processing time summary ###
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
print(f"Total processing time {step_timer.timespan():.01f} s") print(f"Total processing time {step_timer.timespan():.01f} s")
step_timer.print_summary() step_timer.print_summary()
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
if skip_plots: if skip_plots:
print('Skipping plots') print('Skipping plots')
sys.exit(0) sys.exit(0)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Positions are given in pixels _, geom = init_jungfrau_geom(karabo_id=karabo_id, karabo_da=karabo_da)
mod_width = (256 * 4) + (2 * 3) # inc. 2px gaps between tiles
mod_height = (256 * 2) + 2
if karabo_id == "SPB_IRDA_JF4M":
# The first 4 modules are rotated 180 degrees relative to the others.
# We pass the bottom, beam-right corner of the module regardless of its
# orientation, requiring a subtraction from the symmetric positions we'd
# otherwise calculate.
x_start, y_start = 1125, 1078
module_pos = [
(x_start - mod_width, y_start - mod_height - (i * (mod_height + 33)))
for i in range(4)
] + [
(-x_start, -y_start + (i * (mod_height + 33))) for i in range(4)
]
orientations = [(-1, -1) for _ in range(4)] + [(1, 1) for _ in range(4)]
elif karabo_id == "FXE_XAD_JF1M":
module_pos = ((-mod_width//2, 33),(-mod_width//2, -mod_height -33))
orientations = [(-1,-1), (1,1)]
else:
module_pos = ((-mod_width//2,-mod_height//2),)
orientations = None
geom = JUNGFRAUGeometry.from_module_positions(module_pos, orientations=orientations, asic_gap=0)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
first_seq = 0 if sequences == [-1] else sequences[0] first_seq = 0 if sequences == [-1] else sequences[0]
corrected_files = [ corrected_files = [
out_folder / f for f in fnmatch.filter(corrected_files, f"*{run}*S{first_seq:05d}*") out_folder / f for f in fnmatch.filter(corrected_files, f"*{run}*S{first_seq:05d}*")
] ]
with DataCollection.from_paths(corrected_files) as corr_dc: with DataCollection.from_paths(corrected_files) as corr_dc:
# Reading CORR data for plotting. # Reading CORR data for plotting.
jf_corr = components.JUNGFRAU( jf_corr = components.JUNGFRAU(
corr_dc, corr_dc,
detector_name=karabo_id, detector_name=karabo_id,
).select_trains(np.s_[:plot_trains]) ).select_trains(np.s_[:plot_trains])
tid, jf_corr_data = next(iter(jf_corr.trains(require_all=True))) tid, jf_corr_data = next(iter(jf_corr.trains(require_all=True)))
# Shape = [modules, trains, cells, x, y] # Shape = [modules, trains, cells, x, y]
# TODO: Fix the case if not all modules were requested to be corrected. # TODO: Fix the case if not all modules were requested to be corrected.
# For example if only one modules was corrected. An assertion error is expected # For example if only one modules was corrected. An assertion error is expected
# at `geom.plot_data_fast`, while plotting corrected images. # at `geom.plot_data_fast`, while plotting corrected images.
corrected = jf_corr.get_array("data.adc")[:, :, cell_idx_preview, ...].values corrected = jf_corr.get_array("data.adc")[:, :, cell_idx_preview, ...].values
corrected_train = jf_corr_data["data.adc"][ corrected_train = jf_corr_data["data.adc"][
:, cell_idx_preview, ... :, cell_idx_preview, ...
].values # loose the train axis. ].values # loose the train axis.
mask = jf_corr.get_array("data.mask")[:, :, cell_idx_preview, ...].values mask = jf_corr.get_array("data.mask")[:, :, cell_idx_preview, ...].values
mask_train = jf_corr_data["data.mask"][:, cell_idx_preview, ...].values mask_train = jf_corr_data["data.mask"][:, cell_idx_preview, ...].values
with RunDirectory(f"{in_folder}/r{run:04d}/", f"*S{first_seq:05d}*", _use_voview=False) as raw_dc: with RunDirectory(f"{in_folder}/r{run:04d}/", f"*S{first_seq:05d}*", _use_voview=False) as raw_dc:
# Reading RAW data for plotting. # Reading RAW data for plotting.
jf_raw = components.JUNGFRAU(raw_dc, detector_name=karabo_id).select_trains( jf_raw = components.JUNGFRAU(raw_dc, detector_name=karabo_id).select_trains(
np.s_[:plot_trains] np.s_[:plot_trains]
) )
raw = jf_raw.get_array("data.adc")[:, :, cell_idx_preview, ...].values raw = jf_raw.get_array("data.adc")[:, :, cell_idx_preview, ...].values
raw_train = ( raw_train = (
jf_raw.select_trains(by_id[[tid]]) jf_raw.select_trains(by_id[[tid]])
.get_array("data.adc")[:, 0, cell_idx_preview, ...] .get_array("data.adc")[:, 0, cell_idx_preview, ...]
.values .values
) )
gain = jf_raw.get_array("data.gain")[:, :, cell_idx_preview, ...].values gain = jf_raw.get_array("data.gain")[:, :, cell_idx_preview, ...].values
gain_train_cells = ( gain_train_cells = (
jf_raw.select_trains(by_id[[tid]]).get_array("data.gain")[:, :, :, ...].values jf_raw.select_trains(by_id[[tid]]).get_array("data.gain")[:, :, :, ...].values
) )
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
### Mean RAW Preview ### Mean RAW Preview
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
print(f"The per pixel mean of the first {raw.shape[1]} trains of the first sequence file") print(f"The per pixel mean of the first {raw.shape[1]} trains of the first sequence file")
fig, ax = plt.subplots(figsize=(18, 10)) fig, ax = plt.subplots(figsize=(18, 10))
raw_mean = np.mean(raw, axis=1) raw_mean = np.mean(raw, axis=1)
geom.plot_data_fast( geom.plot_data_fast(
raw_mean, raw_mean,
ax=ax, ax=ax,
vmin=min(0.75*np.median(raw_mean[raw_mean > 0]), 2000), vmin=min(0.75*np.median(raw_mean[raw_mean > 0]), 2000),
vmax=max(1.5*np.median(raw_mean[raw_mean > 0]), 16000), vmax=max(1.5*np.median(raw_mean[raw_mean > 0]), 16000),
cmap="jet", cmap="jet",
colorbar={'shrink': 1, 'pad': 0.01}, colorbar={'shrink': 1, 'pad': 0.01},
) )
ax.set_title(f'{karabo_id} - Mean RAW', size=18) ax.set_title(f'{karabo_id} - Mean RAW', size=18)
plt.show() plt.show()
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
### Mean CORRECTED Preview ### Mean CORRECTED Preview
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
print(f"The per pixel mean of the first {corrected.shape[1]} trains of the first sequence file") print(f"The per pixel mean of the first {corrected.shape[1]} trains of the first sequence file")
fig, ax = plt.subplots(figsize=(18, 10)) fig, ax = plt.subplots(figsize=(18, 10))
corrected_mean = np.mean(corrected, axis=1) corrected_mean = np.mean(corrected, axis=1)
_corrected_vmin = min(0.75*np.median(corrected_mean[corrected_mean > 0]), -0.5) _corrected_vmin = min(0.75*np.median(corrected_mean[corrected_mean > 0]), -0.5)
_corrected_vmax = max(2.*np.median(corrected_mean[corrected_mean > 0]), 100) _corrected_vmax = max(2.*np.median(corrected_mean[corrected_mean > 0]), 100)
mean_plot_kwargs = dict( mean_plot_kwargs = dict(
vmin=_corrected_vmin, vmax=_corrected_vmax, cmap="jet" vmin=_corrected_vmin, vmax=_corrected_vmax, cmap="jet"
) )
if not strixel_sensor: if not strixel_sensor:
geom.plot_data_fast( geom.plot_data_fast(
corrected_mean, corrected_mean,
ax=ax, ax=ax,
colorbar={'shrink': 1, 'pad': 0.01}, colorbar={'shrink': 1, 'pad': 0.01},
**mean_plot_kwargs **mean_plot_kwargs
) )
else: else:
ax.imshow(corrected_mean.squeeze(), aspect=10, **mean_plot_kwargs) ax.imshow(corrected_mean.squeeze(), aspect=10, **mean_plot_kwargs)
ax.set_title(f'{karabo_id} - Mean CORRECTED', size=18) ax.set_title(f'{karabo_id} - Mean CORRECTED', size=18)
plt.show() plt.show()
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
fig, ax = plt.subplots(figsize=(18, 10)) fig, ax = plt.subplots(figsize=(18, 10))
corrected_masked = corrected.copy() corrected_masked = corrected.copy()
corrected_masked[mask != 0] = np.nan corrected_masked[mask != 0] = np.nan
corrected_masked_mean = np.nanmean(corrected_masked, axis=1) corrected_masked_mean = np.nanmean(corrected_masked, axis=1)
del corrected_masked del corrected_masked
if not strixel_sensor: if not strixel_sensor:
geom.plot_data_fast( geom.plot_data_fast(
corrected_masked_mean, corrected_masked_mean,
ax=ax, ax=ax,
colorbar={'shrink': 1, 'pad': 0.01}, colorbar={'shrink': 1, 'pad': 0.01},
**mean_plot_kwargs **mean_plot_kwargs
) )
else: else:
ax.imshow(corrected_mean.squeeze(), aspect=10, **mean_plot_kwargs) ax.imshow(corrected_mean.squeeze(), aspect=10, **mean_plot_kwargs)
ax.set_title(f'{karabo_id} - Mean CORRECTED with mask', size=18) ax.set_title(f'{karabo_id} - Mean CORRECTED with mask', size=18)
plt.show() plt.show()
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
display(Markdown((f"#### A single image from train {tid}"))) display(Markdown((f"#### A single image from train {tid}")))
fig, ax = plt.subplots(figsize=(18, 10)) fig, ax = plt.subplots(figsize=(18, 10))
single_plot_kwargs = dict( single_plot_kwargs = dict(
vmin=min(0.75 * np.median(corrected_train[corrected_train > 0]), -0.5), vmin=min(0.75 * np.median(corrected_train[corrected_train > 0]), -0.5),
vmax=max(2.0 * np.median(corrected_train[corrected_train > 0]), 100), vmax=max(2.0 * np.median(corrected_train[corrected_train > 0]), 100),
cmap="jet" cmap="jet"
) )
if not strixel_sensor: if not strixel_sensor:
geom.plot_data_fast( geom.plot_data_fast(
corrected_train, corrected_train,
ax=ax, ax=ax,
colorbar={"shrink": 1, "pad": 0.01}, colorbar={"shrink": 1, "pad": 0.01},
**single_plot_kwargs **single_plot_kwargs
) )
else: else:
ax.imshow(corrected_train.squeeze(), aspect=10, **single_plot_kwargs) ax.imshow(corrected_train.squeeze(), aspect=10, **single_plot_kwargs)
ax.set_title(f"{karabo_id} - CORRECTED train: {tid}", size=18) ax.set_title(f"{karabo_id} - CORRECTED train: {tid}", size=18)
plt.show() plt.show()
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
def do_2d_plot(data, edges, y_axis, x_axis, title): def do_2d_plot(data, edges, y_axis, x_axis, title):
fig = plt.figure(figsize=(10, 10)) fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111) ax = fig.add_subplot(111)
extent = [ extent = [
np.min(edges[1]), np.min(edges[1]),
np.max(edges[1]), np.max(edges[1]),
np.min(edges[0]), np.min(edges[0]),
np.max(edges[0]), np.max(edges[0]),
] ]
im = ax.imshow( im = ax.imshow(
data[::-1, :], data[::-1, :],
extent=extent, extent=extent,
aspect="auto", aspect="auto",
norm=LogNorm(vmin=1, vmax=np.max(data)) norm=LogNorm(vmin=1, vmax=np.max(data))
) )
ax.set_xlabel(x_axis) ax.set_xlabel(x_axis)
ax.set_ylabel(y_axis) ax.set_ylabel(y_axis)
ax.set_title(title) ax.set_title(title)
cb = fig.colorbar(im) cb = fig.colorbar(im)
cb.set_label("Counts") cb.set_label("Counts")
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
### Gain Bit Value ### Gain Bit Value
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
for i, mod in enumerate(karabo_da): for i, mod in enumerate(karabo_da):
pdu = da_to_pdu[mod] pdu = da_to_pdu[mod]
h, ex, ey = np.histogram2d( h, ex, ey = np.histogram2d(
raw[i].flatten(), raw[i].flatten(),
gain[i].flatten(), gain[i].flatten(),
bins=[100, 4], bins=[100, 4],
range=[[0, 10000], [0, 4]], range=[[0, 10000], [0, 4]],
) )
do_2d_plot( do_2d_plot(
h, h,
(ex, ey), (ex, ey),
"Signal (ADU)", "Signal (ADU)",
"Gain Bit Value (high gain=0[00], medium gain=1[01], low gain=3[11])", "Gain Bit Value (high gain=0[00], medium gain=1[01], low gain=3[11])",
f"Module {mod} ({pdu})", f"Module {mod} ({pdu})",
) )
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Signal Distribution ## ## Signal Distribution ##
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
for i, mod in enumerate(karabo_da): for i, mod in enumerate(karabo_da):
pdu = da_to_pdu[mod] pdu = da_to_pdu[mod]
fig, axs = plt.subplots(nrows=2, ncols=1, figsize=(18, 10)) fig, axs = plt.subplots(nrows=2, ncols=1, figsize=(18, 10))
corrected_flatten = corrected[i].flatten() corrected_flatten = corrected[i].flatten()
for ax, hist_range in zip(axs, [(-100, 1000), (-1000, 10000)]): for ax, hist_range in zip(axs, [(-100, 1000), (-1000, 10000)]):
h = ax.hist( h = ax.hist(
corrected_flatten, corrected_flatten,
bins=1000, bins=1000,
range=hist_range, range=hist_range,
log=True, log=True,
) )
l = ax.set_xlabel("Signal (keV)") l = ax.set_xlabel("Signal (keV)")
l = ax.set_ylabel("Counts") l = ax.set_ylabel("Counts")
_ = ax.set_title(f'Module {mod} ({pdu})') _ = ax.set_title(f'Module {mod} ({pdu})')
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
### Maximum GAIN Preview ### Maximum GAIN Preview
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
display(Markdown((f"#### The per pixel maximum of train {tid} of the GAIN data"))) display(Markdown((f"#### The per pixel maximum of train {tid} of the GAIN data")))
fig, ax = plt.subplots(figsize=(18, 10)) fig, ax = plt.subplots(figsize=(18, 10))
gain_max = np.max(gain_train_cells, axis=(1, 2)) gain_max = np.max(gain_train_cells, axis=(1, 2))
geom.plot_data_fast( geom.plot_data_fast(
gain_max, gain_max,
ax=ax, ax=ax,
cmap="jet", cmap="jet",
colorbar={'shrink': 1, 'pad': 0.01}, colorbar={'shrink': 1, 'pad': 0.01},
) )
plt.show() plt.show()
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Bad Pixels ## ## Bad Pixels ##
The mask contains dedicated entries for all pixels and memory cells as well as all three gains stages. Each mask entry is encoded in 32 bits as: The mask contains dedicated entries for all pixels and memory cells as well as all three gains stages. Each mask entry is encoded in 32 bits as:
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
table = [] table = []
for item in BadPixels: for item in BadPixels:
table.append( table.append(
(item.name, f"{item.value:016b}")) (item.name, f"{item.value:016b}"))
md = display(Latex(tabulate.tabulate( md = display(Latex(tabulate.tabulate(
table, tablefmt='latex', table, tablefmt='latex',
headers=["Bad pixel type", "Bit mask"]))) headers=["Bad pixel type", "Bit mask"])))
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
### Single Image Bad Pixels ### ### Single Image Bad Pixels ###
A single image bad pixel map for the first image of the first train A single image bad pixel map for the first image of the first train
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
display(Markdown(f"#### Bad pixels image for train {tid}")) display(Markdown(f"#### Bad pixels image for train {tid}"))
fig, ax = plt.subplots(figsize=(18, 10)) fig, ax = plt.subplots(figsize=(18, 10))
if not strixel_sensor: if not strixel_sensor:
geom.plot_data_fast( geom.plot_data_fast(
np.log2(mask_train), np.log2(mask_train),
ax=ax, ax=ax,
vmin=0, vmax=32, cmap="jet", vmin=0, vmax=32, cmap="jet",
colorbar={'shrink': 1, 'pad': 0.01}, colorbar={'shrink': 1, 'pad': 0.01},
) )
else: else:
ax.imshow(np.log2(mask_train).squeeze(), vmin=0, vmax=32, cmap='jet', aspect=10) ax.imshow(np.log2(mask_train).squeeze(), vmin=0, vmax=32, cmap='jet', aspect=10)
plt.show() plt.show()
``` ```
......
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
# Jungfrau Dark Summary # Jungfrau Dark Summary
Author: European XFEL Detector Department, Version: 1.0 Author: European XFEL Detector Department, Version: 1.0
Summary for process dark constants and a comparison with previously injected constants with the same conditions. Summary for process dark constants and a comparison with previously injected constants with the same conditions.
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
out_folder = "/gpfs/exfel/data/scratch/ahmedk/test/jungfrau_assembeled_dark" # path to output to, required out_folder = "/gpfs/exfel/data/scratch/ahmedk/test/jungfrau_assembeled_dark" # path to output to, required
metadata_folder = "" # Directory containing calibration_metadata.yml when run by xfel-calibrate. metadata_folder = "" # Directory containing calibration_metadata.yml when run by xfel-calibrate.
# Parameters used to access raw data. # Parameters used to access raw data.
karabo_da = [] # list of data aggregators, which corresponds to different JF modules. This is only needed for the detectors of one module. karabo_da = [] # list of data aggregators, which corresponds to different JF modules. This is only needed for the detectors of one module.
karabo_id = "FXE_XAD_JF1M" # detector identifier. karabo_id = "FXE_XAD_JF1M" # detector identifier.
# Parameters to be used for injecting dark calibration constants. # Parameters to be used for injecting dark calibration constants.
local_output = True # Boolean indicating that local constants were stored in the out_folder local_output = True # Boolean indicating that local constants were stored in the out_folder
# Skip the whole notebook if local_output is false in the preceding notebooks. # Skip the whole notebook if local_output is false in the preceding notebooks.
if not local_output: if not local_output:
print('No local constants saved. Skipping summary plots') print('No local constants saved. Skipping summary plots')
import sys import sys
sys.exit(0) sys.exit(0)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
import warnings import warnings
from collections import OrderedDict
from pathlib import Path from pathlib import Path
warnings.filterwarnings('ignore') warnings.filterwarnings('ignore')
import h5py import h5py
import matplotlib import matplotlib
import matplotlib.gridspec as gridspec import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import numpy as np import numpy as np
import yaml import yaml
from IPython.display import Markdown, display from IPython.display import Markdown, display
matplotlib.use("agg") matplotlib.use("agg")
%matplotlib inline %matplotlib inline
from cal_tools.enums import BadPixels, JungfrauSettings
from cal_tools.ana_tools import get_range
from cal_tools.plotting import init_jungfrau_geom, show_processed_modules_jungfrau from cal_tools.plotting import init_jungfrau_geom, show_processed_modules_jungfrau
from cal_tools.tools import CalibrationMetadata from cal_tools.tools import CalibrationMetadata
from XFELDetAna.plotting.simpleplot import simplePlot from XFELDetAna.plotting.simpleplot import simplePlot
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Prepare paths and load previous constants' metadata. # Prepare paths and load previous constants' metadata.
out_folder = Path(out_folder) out_folder = Path(out_folder)
metadata = CalibrationMetadata(metadata_folder or out_folder) metadata = CalibrationMetadata(metadata_folder or out_folder)
mod_mapping = metadata.setdefault("modules-mapping", {}) mod_mapping = metadata.setdefault("modules-mapping", {})
dark_constants = ["Offset", "Noise", "BadPixelsDark"] dark_constants = ["Offset", "Noise", "BadPixelsDark"]
prev_const_metadata = {} prev_const_metadata = {}
for fn in Path(metadata_folder or out_folder).glob("module_metadata_*.yml"): for fn in Path(metadata_folder or out_folder).glob("module_metadata_*.yml"):
with fn.open("r") as fd: with fn.open("r") as fd:
fdict = yaml.safe_load(fd) fdict = yaml.safe_load(fd)
module = fdict["module"] module = fdict["module"]
mod_mapping[module] = fdict["pdu"] mod_mapping[module] = fdict["pdu"]
prev_const_metadata[module] = fdict["old-constants"] prev_const_metadata[module] = fdict["old-constants"]
metadata.save() metadata.save()
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
expected_modules, geom = init_jungfrau_geom( expected_modules, geom = init_jungfrau_geom(
karabo_id=karabo_id, karabo_da=karabo_da) karabo_id=karabo_id, karabo_da=karabo_da)
nmods = len(expected_modules) nmods = len(expected_modules)
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
Preparing newly injected and previous constants from produced local folder in out_folder. Preparing newly injected and previous constants from produced local folder in out_folder.
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
fixed_gain = False # constant is adaptive by default. fixed_gain = False # constant is adaptive by default.
# Get the constant shape from one of the local constants. # Get the constant shape from one of the local constants.
# This is one way to realize the number of memory cells. # This is one way to realize the number of memory cells.
with h5py.File(list(out_folder.glob("const_Offset_*"))[0], 'r') as f: with h5py.File(list(out_folder.glob("const_Offset_*"))[0], 'r') as f:
const_shape = f["data"][()].shape const_shape = f["data"][()].shape
# Get fixed gain value to decide offset vmin, vmax # Get fixed gain value to decide offset vmin, vmax
# for later constant map plots. # for later constant map plots.
gain_mode = "condition/Gain mode/value" gain_mode = "condition/Gain mode/value"
if gain_mode in f: if gain_mode in f:
fixed_gain = f[gain_mode][()] fixed_gain = f[gain_mode][()]
initial_stacked_constants = np.full(((nmods,)+const_shape), np.nan) initial_stacked_constants = np.full(((nmods,)+const_shape), np.nan)
curr_constants = { c: initial_stacked_constants.copy() for c in dark_constants} curr_constants = { c: initial_stacked_constants.copy() for c in dark_constants}
prev_constants = { c: initial_stacked_constants.copy() for c in dark_constants} prev_constants = { c: initial_stacked_constants.copy() for c in dark_constants}
exculded_constants = [] # constants excluded from comparison plots. exculded_constants = [] # constants excluded from comparison plots.
# Loop over modules # Loop over modules
for cname in dark_constants: for cname in dark_constants:
excluded_modules = [] # modules with no previous constants. excluded_modules = [] # modules with no previous constants.
for i, mod in enumerate(sorted(expected_modules)): for i, mod in enumerate(sorted(expected_modules)):
# Loop over expected dark constants in out_folder. # Loop over expected dark constants in out_folder.
# Some constants can be missing in out_folder. # Some constants can be missing in out_folder.
pdu = mod_mapping[mod] pdu = mod_mapping[mod]
# first load new constant # first load new constant
fpath = out_folder / f"const_{cname}_{pdu}.h5" fpath = out_folder / f"const_{cname}_{pdu}.h5"
with h5py.File(fpath, 'r') as f: with h5py.File(fpath, 'r') as f:
curr_constants[cname][i, ...] = f["data"][()] curr_constants[cname][i, ...] = f["data"][()]
# Load previous constants. # Load previous constants.
old_mod_mdata = prev_const_metadata[mod] old_mod_mdata = prev_const_metadata[mod]
if cname in old_mod_mdata: # a module can be missing from detector dark processing. if cname in old_mod_mdata: # a module can be missing from detector dark processing.
filepath = old_mod_mdata[cname]["filepath"] filepath = old_mod_mdata[cname]["filepath"]
h5path = old_mod_mdata[cname]["h5path"] h5path = old_mod_mdata[cname]["h5path"]
if not filepath or not h5path: if not filepath or not h5path:
excluded_modules.append(mod) excluded_modules.append(mod)
prev_constants[cname][i, ...].fill(np.nan) prev_constants[cname][i, ...].fill(np.nan)
else: else:
with h5py.File(filepath, "r") as fd: with h5py.File(filepath, "r") as fd:
prev_constants[cname][i, ...] = fd[f"{h5path}/data"][()] prev_constants[cname][i, ...] = fd[f"{h5path}/data"][()]
if excluded_modules: if excluded_modules:
print(f"Previous {cname} constants for {excluded_modules} are not available.\n.") print(f"Previous {cname} constants for {excluded_modules} are not available.\n.")
# Exclude constants from comparison plots, if the corresponding # Exclude constants from comparison plots, if the corresponding
# previous constants are not available for all modules. # previous constants are not available for all modules.
if len(excluded_modules) == nmods: if len(excluded_modules) == nmods:
exculded_constants.append(cname) exculded_constants.append(cname)
print(f"No comparison plots for {cname}.\n") print(f"No comparison plots for {cname}.\n")
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
display(Markdown('## Processed modules')) display(Markdown('## Processed modules'))
processed_modules = list(mod_mapping.keys()) processed_modules = list(mod_mapping.keys())
processed_pdus = list(mod_mapping.values()) processed_pdus = list(mod_mapping.values())
show_processed_modules_jungfrau( show_processed_modules_jungfrau(
jungfrau_geom=geom, jungfrau_geom=geom,
constants=curr_constants, constants=curr_constants,
processed_modules=processed_modules, processed_modules=processed_modules,
expected_modules=expected_modules, expected_modules=expected_modules,
display_module_names=processed_pdus, display_module_names=processed_pdus,
) )
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
gainstages = 3 gainstages = 3
gain_names = ["High Gain", "Medium Gain", "Low Gain" ] gain_names = ["High Gain", "Medium Gain", "Low Gain" ]
const_range = { const_range = {
"Offset": [(0, 8000), (8000, 16000), (8000, 16000)], "Offset": [(0, 8000), (8000, 16000), (8000, 16000)],
"Noise": [(0., 50.), (0., 50.), (0., 50.)], "Noise": [(0., 50.), (0., 50.), (0., 50.)],
"BadPixelsDark": [(0., 5.), (0., 5.), (0., 5.)], "BadPixelsDark": [(0., 5.), (0., 5.), (0., 5.)],
} }
# vmin and vmax are different for Offset for fixed gain constants. # vmin and vmax are different for Offset for fixed gain constants.
if fixed_gain: if fixed_gain:
const_range["Offset"] = [(0, 8000), (0, 8000), (0, 8000)] const_range["Offset"] = [(0, 8000), (0, 8000), (0, 8000)]
diff_const_range = { diff_const_range = {
"Offset": [(0, 500), (0, 500), (0, 500)], "Offset": [(0, 500), (0, 500), (0, 500)],
"Noise": [(0., 5.), (0., 5.), (0., 5.)], "Noise": [(0., 5.), (0., 5.), (0., 5.)],
"BadPixelsDark": [(0., 5.), (0., 5.), (0., 5.)], "BadPixelsDark": [(0., 5.), (0., 5.), (0., 5.)],
} }
percentage_range = (0, 100) percentage_range = (0, 100)
perc_const_range = {c: [percentage_range]*3 for c in dark_constants} perc_const_range = {c: [percentage_range]*3 for c in dark_constants}
gs = gridspec.GridSpec(2, 4) gs = gridspec.GridSpec(2, 4)
axes = { axes = {
"ax0": { "ax0": {
"gs": gs[0, 1:3], "gs": gs[0, 1:3],
"shrink": 0.7, "shrink": 0.7,
"pad": 0.05, "pad": 0.05,
"label": "ADCu", "label": "ADCu",
"title": "{}", "title": "{}",
"location": "right", "location": "right",
"range": const_range, "range": const_range,
}, },
"ax1": { "ax1": {
"gs": gs[1, :2], "gs": gs[1, :2],
"shrink": 0.7, "shrink": 0.7,
"pad": 0.02, "pad": 0.02,
"label": "ADCu", "label": "ADCu",
"location": "left", "location": "left",
"title": "Difference with previous {}", "title": "Difference with previous {}",
"range": diff_const_range, "range": diff_const_range,
}, },
"ax2": { "ax2": {
"gs": gs[1, 2:], "gs": gs[1, 2:],
"shrink": 0.7, "shrink": 0.7,
"pad": 0.02, "pad": 0.02,
"label": "%", "label": "%",
"location": "right", "location": "right",
"title": "Difference with previous {} %", "title": "Difference with previous {} %",
"range": perc_const_range, "range": perc_const_range,
}, },
} }
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Summary figures across pixels and memory cells. ## Summary figures across pixels and memory cells.
The following plots give an overview of calibration constants averaged across pixels and memory cells. A bad pixel mask is applied. The following plots give an overview of calibration constants averaged across pixels and memory cells. A bad pixel mask is applied.
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
for cname, const in curr_constants.items(): for cname, const in curr_constants.items():
# Prepare the stacked mean of constant, # Prepare the stacked mean of constant,
# the difference with the previous constant # the difference with the previous constant
# and the fraction of that difference. # and the fraction of that difference.
mean_const = np.nanmean(const, axis=3) mean_const = np.nanmean(const, axis=3)
mean_diff = np.abs(np.nanmean(const, axis=3) - np.nanmean(prev_constants[cname], axis=3)) # noqa mean_diff = np.abs(np.nanmean(const, axis=3) - np.nanmean(prev_constants[cname], axis=3)) # noqa
mean_frac = np.abs(mean_diff / mean_const) * 100 mean_frac = np.abs(mean_diff / mean_const) * 100
for gain in range(gainstages): for gain in range(gainstages):
data_to_plot = { data_to_plot = {
f'ax0': mean_const[..., gain], f'ax0': mean_const[..., gain],
f'ax1': mean_diff[..., gain], f'ax1': mean_diff[..., gain],
f'ax2': mean_frac[..., gain], f'ax2': mean_frac[..., gain],
} }
# Plotting constant overall modules. # Plotting constant overall modules.
display(Markdown(f'### {cname} - {gain_names[gain]}')) display(Markdown(f'### {cname} - {gain_names[gain]}'))
if nmods > 1: if nmods > 1:
fig = plt.figure(figsize=(20, 20)) fig = plt.figure(figsize=(20, 20))
else: else:
fig = plt.figure(figsize=(20, 10)) fig = plt.figure(figsize=(20, 10))
for axname, axv in axes.items(): for axname, axv in axes.items():
# Avoid difference plots if previous constants # Avoid difference plots if previous constants
# are missing for the detector. # are missing for the detector.
if cname in exculded_constants and axname != "ax0": if cname in exculded_constants and axname != "ax0":
break break
ax = fig.add_subplot(axv["gs"]) ax = fig.add_subplot(axv["gs"])
vmin, vmax = axv["range"][cname][gain] vmin, vmax = axv["range"][cname][gain]
geom.plot_data( geom.plot_data(
data_to_plot[axname], data_to_plot[axname],
vmin=vmin, vmax=vmax, ax=ax, vmin=vmin, vmax=vmax, ax=ax,
colorbar={ colorbar={
"shrink": axv["shrink"], "shrink": axv["shrink"],
"pad": axv["pad"], "pad": axv["pad"],
"location": axv["location"], "location": axv["location"],
} }
) )
colorbar = ax.images[0].colorbar colorbar = ax.images[0].colorbar
colorbar.set_label(axv["label"], fontsize=15) colorbar.set_label(axv["label"], fontsize=15)
colorbar.ax.tick_params(labelsize=15) colorbar.ax.tick_params(labelsize=15)
ax.tick_params(labelsize=1) ax.tick_params(labelsize=1)
ax.set_title(axv["title"].format( ax.set_title(axv["title"].format(
f"{cname} {gain_names[gain]}"), fontsize=15) f"{cname} {gain_names[gain]}"), fontsize=15)
if axname == "ax0": if axname == "ax0":
ax.set_xlabel('Columns', fontsize=15) ax.set_xlabel('Columns', fontsize=15)
ax.set_ylabel('Rows', fontsize=15) ax.set_ylabel('Rows', fontsize=15)
ax.tick_params(labelsize=15) ax.tick_params(labelsize=15)
else: else:
ax.tick_params(labelsize=0) ax.tick_params(labelsize=0)
# Remove axes labels for comparison plots. # Remove axes labels for comparison plots.
ax.set_xlabel('') ax.set_xlabel('')
ax.set_ylabel('') ax.set_ylabel('')
plt.show() plt.show()
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
if curr_constants["Offset"].shape[-2] > 1: if curr_constants["Offset"].shape[-2] > 1:
display(Markdown("## Summary across pixels per memory cells")) display(Markdown("## Summary across pixels per memory cells"))
# Plot mean and std of memcells for each module, gain, and constant # Plot mean and std of memcells for each module, gain, and constant
# across trains. # across trains.
for const_name, const in curr_constants.items(): for const_name, const in curr_constants.items():
display(Markdown(f'### {const_name}')) display(Markdown(f'### {const_name}'))
for gain in range(gainstages): for gain in range(gainstages):
data = np.copy(const[..., gain]) data = np.copy(const[..., gain])
if const_name == 'BadPixelsDark': if const_name == 'BadPixelsDark':
data[data > 0] = 1.0 data[data > 0] = 1.0
datamean = np.nanmean(data, axis=(1, 2)) datamean = np.nanmean(data, axis=(1, 2))
datamean[datamean == 1.0] = np.nan datamean[datamean == 1.0] = np.nan
fig = plt.figure( fig = plt.figure(
figsize=(15, 6), figsize=(15, 6),
tight_layout={'pad': 0.2, 'w_pad': 1.3, 'h_pad': 1.3}) tight_layout={'pad': 0.2, 'w_pad': 1.3, 'h_pad': 1.3})
label = 'Fraction of bad pixels' label = 'Fraction of bad pixels'
ax = fig.add_subplot(1, 1, 1) ax = fig.add_subplot(1, 1, 1)
else: else:
datamean = np.nanmean(data, axis=(1, 2)) datamean = np.nanmean(data, axis=(1, 2))
fig = plt.figure( fig = plt.figure(
figsize=(15, 6), figsize=(15, 6),
tight_layout={'pad': 0.2, 'w_pad': 1.3, 'h_pad': 1.3}) tight_layout={'pad': 0.2, 'w_pad': 1.3, 'h_pad': 1.3})
label = f'{const_name} value [ADU], good pixels only' label = f'{const_name} value [ADU], good pixels only'
ax = fig.add_subplot(1, 2, 1) ax = fig.add_subplot(1, 2, 1)
d = [] d = []
for i, mod in enumerate(datamean): for i, mod in enumerate(datamean):
d.append({ d.append({
'x': np.arange(mod.shape[0]), 'x': np.arange(mod.shape[0]),
'y': mod, 'y': mod,
'drawstyle': 'steps-pre', 'drawstyle': 'steps-pre',
'label': processed_modules[i], 'label': processed_modules[i],
}) })
simplePlot( simplePlot(
d, figsize=(10, 10), xrange=(-12, 510), d, figsize=(10, 10), xrange=(-12, 510),
x_label='Memory Cell ID', x_label='Memory Cell ID',
y_label=label, y_label=label,
use_axis=ax, use_axis=ax,
title=f'{gain_names[gain]}', title=f'{gain_names[gain]}',
title_position=[0.5, 1.18], title_position=[0.5, 1.18],
legend='outside-top-ncol6-frame', legend='outside-top-ncol6-frame',
legend_size='18%', legend_size='18%',
legend_pad=0.00, legend_pad=0.00,
) )
# Extra Sigma plot for Offset and Noise constants. # Extra Sigma plot for Offset and Noise constants.
if const_name != 'BadPixelsDark': if const_name != 'BadPixelsDark':
ax = fig.add_subplot(1, 2, 2) ax = fig.add_subplot(1, 2, 2)
label = f'$\sigma$ {const_name} [ADU], good pixels only' label = f'$\sigma$ {const_name} [ADU], good pixels only'
d = [] d = []
for i, mod in enumerate(np.nanstd(data, axis=(1, 2))): for i, mod in enumerate(np.nanstd(data, axis=(1, 2))):
d.append({ d.append({
'x': np.arange(mod.shape[0]), 'x': np.arange(mod.shape[0]),
'y': mod, 'y': mod,
'drawstyle': 'steps-pre', 'drawstyle': 'steps-pre',
'label': processed_modules[i], 'label': processed_modules[i],
}) })
simplePlot( simplePlot(
d, figsize=(10, 10), xrange=(-12, 510), d, figsize=(10, 10), xrange=(-12, 510),
x_label='Memory Cell ID', x_label='Memory Cell ID',
y_label=label, y_label=label,
use_axis=ax, use_axis=ax,
title=f'{gain_names[gain]} $\sigma$', title=f'{gain_names[gain]} $\sigma$',
title_position=[0.5, 1.18], title_position=[0.5, 1.18],
legend='outside-top-ncol6-frame', legend='outside-top-ncol6-frame',
legend_size='18%', legend_size='18%',
legend_pad=0.00, legend_pad=0.00,
) )
plt.show() plt.show()
``` ```
......
%% Cell type:markdown id: tags:
# LPD Mini Offset, Noise and Dead Pixels Characterization #
Author: M. Karnevskiy, S. Hauf
This notebook performs re-characterize of dark images to derive offset, noise and bad-pixel maps. All three types of constants are evaluated per-pixel and per-memory cell.
The notebook will correctly handle veto settings, but note that if you veto cells you will not be able to use these offsets for runs with different veto settings - vetoed cells will have zero offset.
The evaluated calibration constants are stored locally and injected in the calibration data base.
%% Cell type:code id: tags:
``` python
in_folder = "/gpfs/exfel/exp/FXE/202231/p900316/raw/" # path to input data, required
out_folder = "/gpfs/exfel/data/scratch/kluyvert/darks-lpdmini" # path to output to, required
metadata_folder = "" # Directory containing calibration_metadata.yml when run by xfel-calibrate
run_high = 10 # run number in which high gain data was recorded, required
run_med = 11 # run number in which medium gain data was recorded, required
run_low = 12 # run number in which low gain data was recorded, required
karabo_id = "FXE_DET_LPD_MINI" # karabo karabo_id
karabo_da = [''] # a list of data aggregators names with module number, e.g. 'LPDMINI00/2', Default [''] for selecting all data aggregators
source_name = "{}/DET/0CH0:xtdf" # Source name for raw detector data
control_source_name = "{}/FPGA/FEM" # Source name for control device
creation_time = "" # Override the timestamp taken from the run (format '2023-03-30 15:04:31')
cal_db_interface = "tcp://max-exfl016:8015#8025" # the database interface to use
cal_db_timeout = 300000 # timeout on caldb requests
local_output = True # output constants locally
db_output = False # output constants to database
capacitor_setting = 5 # capacitor_setting for which data was taken
bias_voltage_0 = -1 # bias voltage for minis 1, 3, 5, 7; Setting -1 will read the value from files
bias_voltage_1 = -1 # bias voltage for minis 2, 4, 6, 8; Setting -1 will read the value from files
thresholds_offset_sigma = 3. # bad pixel relative threshold in terms of n sigma offset
thresholds_offset_hard = [400, 1500] # bad pixel hard threshold
thresholds_noise_sigma = 7. # bad pixel relative threshold in terms of n sigma noise
thresholds_noise_hard = [1, 35] # bad pixel hard threshold
skip_first_ntrains = 10 # Number of first trains to skip
ntrains = 500 # number of trains to use
min_trains = 370 # minimum number of trains required for each gain stage
high_res_badpix_3d = False # plot bad-pixel summary in high resolution
test_for_normality = False # permorm normality test
inject_cell_order = False # Include memory cell order as part of the detector condition
```
%% Cell type:code id: tags:
``` python
import multiprocessing
import os
import warnings
warnings.filterwarnings('ignore')
import matplotlib
import pasha as psh
import scipy.stats
from IPython.display import Latex, Markdown, display
matplotlib.use("agg")
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
import tabulate
import yaml
from extra_data import RunDirectory
from iCalibrationDB import Conditions, Constants
from XFELDetAna.plotting.heatmap import heatmapPlot
from XFELDetAna.plotting.simpleplot import simplePlot
from cal_tools.calcat_interface import CalCatApi
from cal_tools.enums import BadPixels
from cal_tools.plotting import plot_badpix_3d
from cal_tools.restful_config import calibration_client
from cal_tools.tools import (
calcat_creation_time,
get_from_db,
get_report,
run_prop_seq_from_path,
save_const_to_h5,
send_to_db,
)
```
%% Cell type:code id: tags:
``` python
mem_cells = 512
gain_names = ['High', 'Medium', 'Low']
const_shape = (mem_cells, 32, 256, 3) # cells, slow_scan, fast_scan, gain
gain_runs = {}
if capacitor_setting == 5:
gain_runs["high_5pf"] = run_high
gain_runs["med_5pf"] = run_med
gain_runs["low_5pf"] = run_low
elif capacitor_setting == 50:
gain_runs["high_50pf"] = run_high
gain_runs["med_50pf"] = run_med
gain_runs["low_50pf"] = run_low
capacitor_settings = [capacitor_setting]
capacitor_settings = ['{}pf'.format(c) for c in capacitor_settings]
creation_time = calcat_creation_time(in_folder, run_high, creation_time)
print(f"Using {creation_time} as creation time")
source_name = source_name.format(karabo_id)
control_source_name = control_source_name.format(karabo_id)
if -1 in {bias_voltage_0, bias_voltage_1}:
run_data = RunDirectory(os.path.join(in_folder, f"r{run_high:04d}"))
if bias_voltage_0 == -1:
bias_voltage_0 = run_data[control_source_name, 'sensorBiasVoltage0'].as_single_value(atol=5.)
if bias_voltage_1 == -1:
bias_voltage_1 = run_data[control_source_name, 'sensorBiasVoltage1'].as_single_value(atol=5.)
run, prop, seq = run_prop_seq_from_path(in_folder)
display(Markdown('## Evaluated parameters'))
print(f'CalDB Interface {cal_db_interface}')
print(f"Proposal: {prop}")
print(f"Memory cells: {mem_cells}")
print(f"Runs: {run_high}, {run_med}, {run_low}")
print(f"Using DB: {db_output}")
print(f"Input: {in_folder}")
print(f"Output: {out_folder}")
print(f"Bias voltage: {bias_voltage_0}V & {bias_voltage_1}V")
```
%% Cell type:code id: tags:
``` python
calcat = CalCatApi(client=calibration_client())
detector_id = calcat.detector(karabo_id)['id']
pdus_by_da = calcat.physical_detector_units(detector_id, pdu_snapshot_at=creation_time)
pdu_name_by_da = {da: p['physical_name'] for (da, p) in pdus_by_da.items()}
if karabo_da and karabo_da != ['']:
karabo_da = [da for da in karabo_da if da in pdu_name_by_da]
pdu_name_by_da = {da: pdu_name_by_da[da] for da in karabo_da}
else:
karabo_da = sorted(pdu_name_by_da.keys())
modules = [int(x.split('/')[-1]) for x in karabo_da]
```
%% Cell type:code id: tags:
``` python
print("Minis in use (1-8) and PDUs:")
for mod_num, karabo_da_m in zip(modules, karabo_da):
print(f"Mini {mod_num} -> {pdu_name_by_da[karabo_da_m]}")
```
%% Cell type:markdown id: tags:
## Data processing
%% Cell type:code id: tags:
``` python
parallel_num_procs = min(6, len(modules)*3)
parallel_num_threads = multiprocessing.cpu_count() // parallel_num_procs
# the actual characterization
def characterize_detector(run_path, gg):
run = RunDirectory(run_path, parallelize=False)
det_source = source_name.format(karabo_id)
data = run[det_source, 'image.data'].drop_empty_trains()
data = data[skip_first_ntrains : skip_first_ntrains + ntrains]
cell_ids = run[det_source, 'image.cellId'].drop_empty_trains()
cell_ids = cell_ids[skip_first_ntrains : skip_first_ntrains + ntrains]
if len(data.train_ids) < min_trains:
raise Exception(f"Run {run_path} only contains {len(data.train_ids)} trains, but {min_ntrains} required")
im = data.ndarray()
if im.ndim > 3:
im = im[:, 0] # Drop extra dimension
cellid = cell_ids.ndarray()
cellid_pattern = cell_ids[0].ndarray()
if cellid.ndim > 1:
cellid = cellid[:, 0]
cellid_pattern = cellid_pattern[:, 0]
# Mask off gain bits, leaving only data
im &= 0b0000111111111111
im = im.astype(np.float32)
context = psh.context.ThreadContext(num_workers=parallel_num_threads)
# Results here should have shape (512, [<= 256], 256)
offset = context.alloc(shape=(mem_cells,) + im.shape[1:], dtype=np.float64)
noise = context.alloc(like=offset)
normal_test = context.alloc(like=offset)
def process_cell(worker_id, array_index, cc):
idx = cellid == cc
im_slice = im[idx]
if np.any(idx):
offset[cc] = np.median(im_slice, axis=0)
noise[cc] = np.std(im_slice, axis=0)
if test_for_normality:
_, normal_test[cc] = scipy.stats.normaltest(im_slice, axis=0)
context.map(process_cell, np.unique(cellid))
# bad pixels
bp = np.zeros(offset.shape, np.uint32)
# offset related bad pixels
offset_mn = np.nanmedian(offset, axis=(1, 2)).reshape(-1, 1, 1) # add some axes to make broadcasting work
offset_std = np.nanstd(offset, axis=(1, 2)).reshape(-1, 1, 1)
bp[(offset < offset_mn-thresholds_offset_sigma*offset_std) |
(offset > offset_mn+thresholds_offset_sigma*offset_std)] |= BadPixels.OFFSET_OUT_OF_THRESHOLD.value
bp[(offset < thresholds_offset_hard[0]) | (
offset > thresholds_offset_hard[1])] |= BadPixels.OFFSET_OUT_OF_THRESHOLD.value
bp[~np.isfinite(offset)] |= BadPixels.OFFSET_NOISE_EVAL_ERROR.value
# noise related bad pixels
noise_mn = np.nanmedian(noise, axis=(1, 2)).reshape(-1, 1, 1) # add some axes to make broadcasting work
noise_std = np.nanstd(noise, axis=(1, 2)).reshape(-1, 1, 1)
bp[(noise < noise_mn-thresholds_noise_sigma*noise_std) |
(noise > noise_mn+thresholds_noise_sigma*noise_std)] |= BadPixels.NOISE_OUT_OF_THRESHOLD.value
bp[(noise < thresholds_noise_hard[0]) | (
noise > thresholds_noise_hard[1])] |= BadPixels.NOISE_OUT_OF_THRESHOLD.value
bp[~np.isfinite(noise)] |= BadPixels.OFFSET_NOISE_EVAL_ERROR.value
idx = (cellid == cellid[0])
return offset, noise, gg, bp, im[idx, 12::32, 12], normal_test, cellid_pattern
```
%% Cell type:code id: tags:
``` python
def modno_to_slice(m):
return slice((m-1) * 32, m * 32)
```
%% Cell type:code id: tags:
``` python
offset_consts = {m: np.zeros(const_shape, dtype=np.float64) for m in modules}
noise_consts = {m: np.zeros(const_shape, dtype=np.float64) for m in modules}
badpix_consts = {m: np.zeros(const_shape, dtype=np.uint32) for m in modules}
normal_tests = {m: np.zeros(const_shape, dtype=np.float64) for m in modules}
data_samples = {m: np.full((ntrains, 3), np.nan) for m in modules} # pixel (12, 12) in first frame of each train
# Should be the same cell order for all modules & all gain stages
cellid_pattern_prev = None
gg = 0
inp = []
for gain_i, (gain, run_num) in enumerate(gain_runs.items()):
run_path = os.path.join(in_folder, f"r{run_num:04d}")
print("Process run: ", run_path)
inp.append((run_path, gain_i))
with multiprocessing.Pool(processes=parallel_num_procs) as pool:
results = pool.starmap(characterize_detector, inp)
for ir, r in enumerate(results):
offset, noise, gg, bp, data, normal, cellid_pattern = r
if cellid_pattern_prev is not None and not np.array_equal(cellid_pattern, cellid_pattern_prev):
raise ValueError("Inconsistent cell ID pattern between gain stages")
cellid_pattern_prev = cellid_pattern
# Split results up by module
for m in modules:
mod_slice = modno_to_slice(m)
offset_consts[m][..., gg] = offset[:, mod_slice]
noise_consts[m][..., gg] = noise[:, mod_slice]
badpix_consts[m][..., gg] = bp[:, mod_slice]
data_samples[m][..., gg] = data[:, m - 1]
normal_tests[m][..., gg] = normal[:, mod_slice]
print(f"{gain_names[gg]} gain. "
f"Number of processed trains per cell: {data.shape[0]}.")
```
%% Cell type:code id: tags:
``` python
# Read report path and create file location tuple to add with the injection
proposal = list(filter(None, in_folder.strip('/').split('/')))[-2]
file_loc = 'proposal:{} runs:{} {} {}'.format(proposal, run_low, run_med, run_high)
report = get_report(metadata_folder)
```
%% Cell type:code id: tags:
``` python
# Retrieve existing constants for comparison
clist = ["Offset", "Noise", "BadPixelsDark"]
old_const = {}
old_mdata = {}
print('Retrieve pre-existing constants for comparison.')
cap = capacitor_settings[0]
for mod_num, karabo_da_m in zip(modules, karabo_da):
db_module = pdu_name_by_da[karabo_da_m]
old_const[mod_num] = {}
old_mdata[mod_num] = {}
if inject_cell_order:
mem_cell_order = ",".join([str(c) for c in cellid_pattern]) + ","
else:
mem_cell_order = None
# mod_num is from 1 to 8, so b_v_0 applies to odd numbers
bias_voltage = bias_voltage_0 if (mod_num % 2 == 1) else bias_voltage_1
condition = Conditions.Dark.LPD(
memory_cells=mem_cells,
pixels_x=256,
pixels_y=32,
bias_voltage=bias_voltage,
capacitor=cap,
memory_cell_order=mem_cell_order,
)
for const in clist:
constant = getattr(Constants.LPD, const)()
data, mdata = get_from_db(karabo_id, karabo_da_m,
constant,
condition, None,
cal_db_interface,
creation_time=creation_time,
verbosity=2, timeout=cal_db_timeout)
old_const[mod_num][const] = data
if mdata is None or data is None:
old_mdata[mod_num][const] = {
"timestamp": "Not found",
"filepath": None,
"h5path": None
}
else:
old_mdata[mod_num][const] = {
"timestamp": mdata.calibration_constant_version.begin_at.isoformat(),
"filepath": os.path.join(
mdata.calibration_constant_version.hdf5path,
mdata.calibration_constant_version.filename
),
"h5path": mdata.calibration_constant_version.h5path,
}
with open(f"{out_folder}/module_metadata_{mod_num}.yml","w") as fd:
yaml.safe_dump(
{
"module": mod_num,
"pdu": db_module,
"old-constants": old_mdata[mod_num]
}, fd)
```
%% Cell type:code id: tags:
``` python
const_types = {
'Offset': offset_consts,
'Noise': noise_consts,
'BadPixelsDark': badpix_consts,
}
```
%% Cell type:code id: tags:
``` python
# Save constants in the calibration DB
md = None
cap = capacitor_settings[0]
for mod_num, karabo_da_m in zip(modules, karabo_da):
db_module = pdu_name_by_da[karabo_da_m]
print(f"Storing constants for PDU {db_module}")
if inject_cell_order:
mem_cell_order = ",".join([str(c) for c in cellid_pattern]) + ","
else:
mem_cell_order = None
# Do not store empty constants
# In case of 0 trains data_g is initiated with nans and never refilled.
if np.count_nonzero(~np.isnan(data_samples[mod_num]))==0:
print(f"Constant ({cap}, {mod_num}) would be empty, skipping saving")
continue
for const_name, const_dict in const_types.items():
dconst = getattr(Constants.LPD, const_name)()
dconst.data = const_dict[mod_num]
# mod_num is from 1 to 8, so b_v_0 applies to odd numbers
bias_voltage = bias_voltage_0 if (mod_num % 2 == 1) else bias_voltage_1
# set the operating condition
condition = Conditions.Dark.LPD(
memory_cells=mem_cells,
pixels_x=256,
pixels_y=32,
bias_voltage=bias_voltage,
capacitor=cap,
memory_cell_order=mem_cell_order,
)
if db_output:
md = send_to_db(db_module, karabo_id, dconst, condition,
file_loc, report_path=report,
cal_db_interface=cal_db_interface,
creation_time=creation_time,
timeout=cal_db_timeout)
if local_output:
md = save_const_to_h5(db_module, karabo_id, dconst, condition,
dconst.data, file_loc, report, creation_time, out_folder)
print(f"Calibration constant {const_name} is stored locally.\n")
print("Constants parameter conditions are:\n")
print(f"• memory_cells: {mem_cells}\n"
f"• bias_voltage: {bias_voltage}\n"
f"• capacitor: {cap}\n"
f"• memory cell order: {mem_cell_order}\n"
f"• creation_time: {md.calibration_constant_version.begin_at if md is not None else creation_time}\n")
```
%% Cell type:markdown id: tags:
## Raw pedestal distribution ##
Distribution of a pedestal (ADUs) over trains for the pixel (12,12), memory cell 12. A median of the distribution is shown in yellow. A standard deviation is shown in red. The green line shows average over all pixels for a given memory cell and gain stage.
%% Cell type:code id: tags:
``` python
for i in modules:
fig, grid = plt.subplots(3, 1, sharex="col", sharey="row", figsize=(10, 7))
fig.suptitle(f"Module {i}")
fig.subplots_adjust(wspace=0, hspace=0)
if np.count_nonzero(~np.isnan(data_samples[i])) == 0:
break
for gain in range(3):
data = data_samples[i][:, gain]
offset = np.nanmedian(data)
noise = np.nanstd(data)
xrange = [np.nanmin(data_samples[i]), np.nanmax(data_samples[i])]
if xrange[1] == xrange[0]:
xrange = [0, xrange[0]+xrange[0]//2]
nbins = data_samples[i].shape[0]
else:
nbins = int(xrange[1] - xrange[0])
hn, cn = np.histogram(data, bins=nbins, range=xrange)
grid[gain].hist(data, range=xrange, bins=nbins)
grid[gain].plot([offset-noise, offset-noise], [0, np.nanmax(hn)],
linewidth=1.5, color='red',
label='1 $\sigma$ deviation')
grid[gain].plot([offset+noise, offset+noise],
[0, np.nanmax(hn)], linewidth=1.5, color='red')
grid[gain].plot([offset, offset], [0, 0],
linewidth=1.5, color='y', label='median')
grid[gain].plot([np.nanmedian(offset_consts[i][:, :, 12, gain]),
np.nanmedian(offset_consts[i][:, :, 12, gain])],
[0, np.nanmax(hn)], linewidth=1.5, color='green',
label='average over pixels')
grid[gain].set_xlim(xrange)
grid[gain].set_ylim(0, np.nanmax(hn)*1.1)
grid[gain].set_xlabel("Offset value [ADU]")
grid[gain].set_ylabel("# of occurance")
if gain == 0:
leg = grid[gain].legend(
loc='upper center', ncol=3,
bbox_to_anchor=(0.1, 0.25, 0.7, 1.0))
grid[gain].text(xrange[0], np.nanmax(hn)*0.4,
"{} gain".format(gain_names[gain]), fontsize=20)
a = plt.axes([.125, .1, 0.775, .8], frame_on=False)
a.patch.set_alpha(0.05)
a.set_xlim(xrange)
plt.plot([offset, offset], [0, 1], linewidth=1.5, color='y')
plt.xticks([])
plt.yticks([])
ypos = 0.9
x1pos = (np.nanmedian(data_samples[i][:, 0]) +
np.nanmedian(data_samples[i][:, 2]))/2.
x2pos = (np.nanmedian(data_samples[i][:, 2]) +
np.nanmedian(data_samples[i][:, 1]))/2.-10
plt.annotate("", xy=(np.nanmedian(data_samples[i][:, 0]), ypos), xycoords='data',
xytext=(np.nanmedian(data_samples[i][:, 2]), ypos), textcoords='data',
arrowprops=dict(arrowstyle="<->", connectionstyle="arc3"))
plt.annotate('{}'.format(np.nanmedian(data_samples[i][:, 0])-np.nanmedian(data_samples[i][:, 2])),
xy=(x1pos, ypos), xycoords='data', xytext=(5, 5), textcoords='offset points')
plt.annotate("", xy=(np.nanmedian(data_samples[i][:, 2]), ypos), xycoords='data',
xytext=(np.nanmedian(data_samples[i][:, 1]), ypos), textcoords='data',
arrowprops=dict(arrowstyle="<->", connectionstyle="arc3"))
plt.annotate('{}'.format(np.nanmedian(data_samples[i][:, 2])-np.nanmedian(data_samples[i][:, 1])),
xy=(x2pos, ypos), xycoords='data', xytext=(5, 5), textcoords='offset points')
plt.show()
```
%% Cell type:code id: tags:
``` python
if not test_for_normality:
print('Normality test was not requested. Flag `test_for_normality` False')
else:
for i in modules:
data = np.copy(normal_tests[i])
data[badpix_consts[i] > 0] = 1.01
hn,cn = np.histogram(data[:,:,:,0], bins=100)
d = [{'x': np.arange(100)*0.01+0.01,
'y': np.histogram(data[:,:,:,0], bins=100)[0],
'drawstyle': 'steps-pre',
'label' : 'High gain',
},
{'x': np.arange(100)*0.01+0.01,
'y': np.histogram(data[:,:,:,1], bins=100)[0],
'drawstyle': 'steps-pre',
'label' : 'Medium gain',
},
{'x': np.arange(100)*0.01+0.01,
'y': np.histogram(data[:,:,:,2], bins=100)[0],
'drawstyle': 'steps-pre',
'label' : 'Low gain',
},
]
fig = plt.figure(figsize=(15,15), tight_layout={'pad': 0.5, 'w_pad': 0.3})
for gain in range(3):
ax = fig.add_subplot(2, 2, 1+gain)
heatmapPlot(data[:,:,12,gain], add_panels=False, cmap='viridis', figsize=(10,10),
y_label='Rows', x_label='Columns',
lut_label='p-Value',
use_axis=ax,
title='p-Value for cell 12, {} gain'.format(gain_names[gain]) )
ax = fig.add_subplot(2, 2, 4)
_ = simplePlot(d, #aspect=1.6,
x_label = "p-Value".format(gain),
y_label="# of occurance",
use_axis=ax,
y_log=False, legend='outside-top-ncol3-frame', legend_pad=0.05, legend_size='5%')
ax.ticklabel_format(style='sci', axis='y', scilimits=(4,6))
```
%% Cell type:raw id: tags:
.. raw:: latex
\newpage
%% Cell type:markdown id: tags:
## Single-Cell Overviews ##
Single cell overviews allow to identify potential effects on all memory cells, e.g. on a sensor level. Additionally, they should serve as a first sanity check on expected behaviour, e.g. if structuring on the ASIC level is visible in the offsets, but otherwise no immediate artifacts are visible.
%% Cell type:code id: tags:
``` python
for gain in range(3):
display(
Markdown('### Cell-12 overview - {} gain'.format(gain_names[gain])))
fig = plt.figure(figsize=(18, 22) , tight_layout={'pad': 0.1, 'w_pad': 0.1})
for i in modules:
for iconst, const in enumerate(['Offset', 'Noise', 'BadPixelsDark']):
ax = fig.add_subplot(3, 2, 1+iconst)
data = const_types[const][i][12, :, :, gain]
vmax = 1.5 * np.nanmedian(data)
title = const
label = f'{const} value [ADU]'
title = f'{const} value'
if const == 'BadPixelsDark':
vmax = 4
bpix_code = data.astype(np.float32)
bpix_code[bpix_code == 0] = np.nan
title = 'Bad pixel code'
label = title
cb_labels = ['1 {}'.format(BadPixels.NOISE_OUT_OF_THRESHOLD.name),
'2 {}'.format(BadPixels.OFFSET_NOISE_EVAL_ERROR.name),
'3 {}'.format(BadPixels.OFFSET_OUT_OF_THRESHOLD.name),
'4 {}'.format('MIXED')]
heatmapPlot(bpix_code, add_panels=False, cmap='viridis',
y_label='Rows', x_label='Columns',
lut_label='', vmax=vmax, aspect=1.,
use_axis=ax, cb_ticklabels=cb_labels, cb_ticks = np.arange(4)+1, cb_loc='bottom',
title=title)
del bpix_code
else:
heatmapPlot(data, add_panels=False, cmap='viridis',
y_label='Rows', x_label='Columns',
lut_label=label, vmax=vmax, aspect=1.,
use_axis=ax, cb_loc='bottom',
title=title)
for i in modules:
for iconst, const in enumerate(['Offset', 'Noise']):
data = const_types[const][i]
dataBP = np.copy(data)
dataBP[badpix_consts[i] > 0] = -1
x_ranges = [[0, 1500], [0, 40]]
hn, cn = np.histogram(
data[:, :, :, gain], bins=100, range=x_ranges[iconst])
hnBP, cnBP = np.histogram(dataBP[:, :, :, gain], bins=cn)
d = [{'x': cn[:-1],
'y': hn,
'drawstyle': 'steps-pre',
'label': 'All data',
},
{'x': cnBP[:-1],
'y': hnBP,
'drawstyle': 'steps-pre',
'label': 'Bad pixels masked',
},
]
ax = fig.add_subplot(3, 2, 5+iconst)
_ = simplePlot(d, figsize=(5, 7), aspect=1,
x_label=f"{const} value [ADU]",
y_label="# of occurence",
title='', legend_pad=0.1, legend_size='10%',
use_axis=ax,
y_log=True, legend='outside-top-2col-frame')
plt.show()
```
%% Cell type:raw id: tags:
.. raw:: latex
\newpage
%% Cell type:markdown id: tags:
%% Cell type:code id: tags:
``` python
cols = {BadPixels.NOISE_OUT_OF_THRESHOLD.value: (BadPixels.NOISE_OUT_OF_THRESHOLD.name, '#FF000080'),
BadPixels.OFFSET_NOISE_EVAL_ERROR.value: (BadPixels.OFFSET_NOISE_EVAL_ERROR.name, '#0000FF80'),
BadPixels.OFFSET_OUT_OF_THRESHOLD.value: (BadPixels.OFFSET_OUT_OF_THRESHOLD.name, '#00FF0080'),
BadPixels.OFFSET_OUT_OF_THRESHOLD.value | BadPixels.NOISE_OUT_OF_THRESHOLD.value: ('MIXED', '#DD00DD80')}
if high_res_badpix_3d:
display(Markdown("""
## Global Bad Pixel Behaviour ##
The following plots shows the results of a bad pixel evaluation for all evaluated memory cells.
Cells are stacked in the Z-dimension, while pixels values in x/y are re-binned with a factor of 2.
This excludes single bad pixels present only in disconnected pixels.
Hence, any bad pixels spanning at least 4 pixels in the x/y-plane, or across at least two memory cells are indicated.
Colors encode the bad pixel type, or mixed type.
"""))
# Switch rebin to 1 for full resolution and
# no interpolation for badpixel values.
rebin = 2
for gain in range(3):
display(Markdown('### Bad pixel behaviour - {} gain ###'.format(gain_names[gain])))
for mod, data in badpix_consts.items():
plot_badpix_3d(data[...,gain], cols, title='', rebin_fac=rebin)
ax = plt.gca()
leg = ax.get_legend()
leg.set(alpha=0.5)
plt.show()
```
%% Cell type:raw id: tags:
.. raw:: latex
\newpage
%% Cell type:markdown id: tags:
## Summary across tiles ##
Plots give an overview of calibration constants averaged across tiles. A bad pixel mask is applied. Constants are compared with pre-existing constants retrieved from the calibration database. Differences $\Delta$ between the old and new constants is shown.
%% Cell type:code id: tags:
``` python
time_summary = []
time_summary.append(f"The following pre-existing constants are used for comparison:")
for mod_num, mod_data in old_mdata.items():
time_summary.append(f"- Module {mod_num}")
for const, const_data in mod_data.items():
time_summary.append(f" - {const} created at {const_data['timestamp']}")
display(Markdown("\n".join(time_summary)))
```
%% Cell type:code id: tags:
``` python
for i in modules:
for gain in range(3):
display(Markdown('### Summary across tiles - {} gain'.format(gain_names[gain])))
for const in const_types:
data = np.copy(const_types[const][i][:, :, :, gain])
label = 'Fraction of bad pixels'
if const != 'BadPixelsDark':
data[badpix_consts[i][:, :, :, gain] > 0] = np.nan
label = '{} value [ADU]'.format(const)
else:
data[data>0] = 1.0
# Split tiles, making shape (mem_cells, slow_scan, tiles(=2), fast_scan)
data = data.reshape(data.shape[0], 32, 2, 128)
# Average within each tile
data = np.nanmean(data, axis=(1, 3))
fig = plt.figure(figsize=(15, 6))
ax = fig.add_subplot(1, 2, 1)
_ = heatmapPlot(data[:510, :], add_panels=True,
y_label='Memory Cell ID', x_label='Tile ID',
lut_label=label, use_axis=ax,
panel_y_label=label, panel_x_label=label,
cmap='viridis', # cb_loc='right',cb_aspect=15,
x_ticklabels=np.arange(2)+1,
x_ticks=np.arange(2)+0.5)
if old_const[i][const] is not None:
ax = fig.add_subplot(1, 2, 2)
dataold = np.copy(old_const[i][const][:, :, :, gain])
label = '$\Delta$ {}'.format(label)
if const != 'BadPixelsDark':
if old_const[i]['BadPixelsDark'] is not None:
dataold[old_const[i]['BadPixelsDark'][:, :, :, gain] > 0] = np.nan
else:
dataold[:] = np.nan
else:
dataold[dataold>0]=1.0
dataold = dataold.reshape(dataold.shape[0], 32, 2, 128)
dataold = np.nanmean(dataold, axis=(1, 3))
dataold = dataold - data
_ = heatmapPlot(dataold[:510, :], add_panels=True,
y_label='Memory Cell ID', x_label='Tile ID',
lut_label=label, use_axis=ax,
panel_y_label=label, panel_x_label=label,
cmap='viridis', # cb_loc='right',cb_aspect=15,
x_ticklabels=np.arange(2)+1,
x_ticks=np.arange(2)+0.5)
plt.show()
```
%% Cell type:raw id: tags:
.. raw:: latex
\newpage
%% Cell type:markdown id: tags:
## Variation of offset and noise across Tiles and ASICs ##
The following plots show a standard deviation $\sigma$ of the calibration constant. The plot of standard deviations across tiles show pixels of one tile ($128 \times 32$). Value for each pixel shows a standard deviation across 16 tiles. The standard deviation across ASICs are shown overall tiles. The plot shows pixels of one ASIC ($16 \times 32$), where the value shows a standard deviation across all ACIS of the module.
%% Cell type:code id: tags:
``` python
for i in modules:
for gain in range(3):
display(Markdown('### Variation of offset and noise across ASICs - {} gain'.format(gain_names[gain])))
fig = plt.figure(figsize=(15, 6))
for iconst, const in enumerate(['Offset', 'Noise']):
data = np.copy(const_types[const][i][:, :, :, gain])
data[badpix_consts[i][:, :, :, gain] > 0] = np.nan
label = '$\sigma$ {} [ADU]'.format(const)
dataA = np.nanmean(data, axis=0) # average over cells
dataA = dataA.reshape(32, 16, 16)
dataA = np.nanstd(dataA, axis=1) # average across ASICs
ax = fig.add_subplot(1, 2, 1+iconst)
_ = heatmapPlot(dataA, add_panels=True,
y_label='rows', x_label='columns',
lut_label=label, use_axis=ax,
panel_y_label=label, panel_x_label=label,
cmap='viridis'
)
plt.show()
```
%% Cell type:code id: tags:
``` python
for i in modules:
for gain in range(3):
display(Markdown('### Variation of offset and noise across tiles - {} gain'.format(gain_names[gain])))
fig = plt.figure(figsize=(15, 6))
for iconst, const in enumerate(['Offset', 'Noise']):
data = np.copy(const_types[const][i][:, :, :, gain])
data[badpix_consts[i][:, :, :, gain] > 0] = np.nan
label = '$\sigma$ {} [ADU]'.format(const)
dataT = data.reshape(data.shape[0], 32, 2, 128)
dataT = np.nanstd(dataT, axis=2)
dataT = np.nanmean(dataT, axis=0)
ax = fig.add_subplot(121+iconst)
_ = heatmapPlot(dataT, add_panels=True,
y_label='rows', x_label='columns',
lut_label=label, use_axis=ax,
panel_y_label=label, panel_x_label=label,
cmap='viridis')
plt.show()
```
%% Cell type:raw id: tags:
.. raw:: latex
\newpage
%% Cell type:markdown id: tags:
## Aggregate values and per cell behaviour ##
The following tables and plots give an overview of statistical aggregates for each constant, as well as per-cell behavior, averaged across pixels.
%% Cell type:code id: tags:
``` python
for i in modules:
for gain in range(3):
display(Markdown('### Mean over pixels - {} gain'.format(gain_names[gain])))
fig = plt.figure(figsize=(9,11))
for iconst, const in enumerate(const_types):
ax = fig.add_subplot(3, 1, 1+iconst)
data = const_types[const][i][:510,:,:,gain]
if const == 'BadPixelsDark':
data[data>0] = 1.0
dataBP = np.copy(data)
dataBP[badpix_consts[i][:510,:,:,gain] > 0] = -10
data = np.nanmean(data, axis=(1, 2))
dataBP = np.nanmean(dataBP, axis=(1, 2))
d = [{'y': data,
'x': np.arange(data.shape[0]),
'drawstyle': 'steps-mid',
'label' : 'All data'
}
]
if const != 'BadPixelsDark':
d.append({'y': dataBP,
'x': np.arange(data.shape[0]),
'drawstyle': 'steps-mid',
'label' : 'good pixels only'
})
y_title = f"{const} value [ADU]"
title = f"{const} value, {gain_names[gain]} gain"
else:
y_title = "Fraction of Bad Pixels"
title = f"Fraction of Bad Pixels, {gain_names[gain]} gain"
data_min = np.min([data, dataBP]) if const != 'BadPixelsDark' else np.min([data])
data_max = np.max([data[20:], dataBP[20:]])
data_dif = data_max - data_min
local_max = np.max([data[200:300], dataBP[200:300]])
frac = 0.35
new_max = (local_max - data_min*(1-frac))/frac
new_max = np.max([data_max, new_max])
_ = simplePlot(d, figsize=(10,10), aspect=2, xrange=(-12, 510),
x_label = 'Memory Cell ID',
y_label=y_title, use_axis=ax,
title=title,
title_position=[0.5, 1.15],
inset='xy-coord-right', inset_x_range=(0,20), inset_indicated=True,
inset_labeled=True, inset_coord=[0.2,0.5,0.6,0.95],
inset_lw = 1.0, y_range = [data_min-data_dif*0.05, new_max+data_dif*0.05],
y_log=False, legend='outside-top-ncol2-frame', legend_size='18%',
legend_pad=0.00)
plt.tight_layout(pad=1.08, h_pad=0.35)
plt.show()
```
%% Cell type:raw id: tags:
.. raw:: latex
\newpage
%% Cell type:markdown id: tags:
## Summary tables ##
The following tables show summary information for the evaluated module. Values for currently evaluated constants are compared with values for pre-existing constants retrieved from the calibration database.
%% Cell type:code id: tags:
``` python
table = []
bits = [BadPixels.NOISE_OUT_OF_THRESHOLD, BadPixels.OFFSET_OUT_OF_THRESHOLD, BadPixels.OFFSET_NOISE_EVAL_ERROR]
for mod_num in modules:
for gain in range(3):
l_data = []
l_data_old = []
data = np.copy(badpix_consts[mod_num][:,:,:,gain])
l_data.append(len(data[data>0].flatten()))
for bit in bits:
l_data.append(np.count_nonzero(badpix_consts[mod_num][:,:,:,gain] & bit.value))
if old_const[mod_num]['BadPixelsDark'] is not None:
old_const[mod_num]['BadPixelsDark'] = old_const[mod_num]['BadPixelsDark'].astype(np.uint32)
dataold = np.copy(old_const[mod_num]['BadPixelsDark'][:, :, :, gain])
l_data_old.append(len(dataold[dataold>0].flatten()))
for bit in bits:
l_data_old.append(np.count_nonzero(old_const[mod_num]['BadPixelsDark'][:, :, :, gain] & bit.value))
l_data_name = ['All bad pixels', 'NOISE_OUT_OF_THRESHOLD',
'OFFSET_OUT_OF_THRESHOLD', 'OFFSET_NOISE_EVAL_ERROR']
l_threshold = ['', f'{thresholds_noise_sigma}', f'{thresholds_offset_sigma}',
f'{thresholds_offset_hard}/{thresholds_noise_hard}']
for i in range(len(l_data)):
line = [f'{l_data_name[i]}, gain {gain_names[gain]}', l_threshold[i], l_data[i]]
if old_const[mod_num]['BadPixelsDark'] is not None:
line += [l_data_old[i]]
else:
line += ['-']
table.append(line)
table.append(['', '', '', ''])
display(Markdown('''
### Number of bad pixels ###
One pixel can be bad for different reasons, therefore, the sum of all types of bad pixels can be more than the number of all bad pixels.
'''))
if len(table)>0:
md = display(Latex(tabulate.tabulate(table, tablefmt='latex',
headers=["Pixel type", "Threshold",
"New constant", "Old constant"])))
```
%% Cell type:code id: tags:
``` python
header = ['Parameter',
"New constant", "Old constant ",
"New constant", "Old constant ",
"New constant", "Old constant "]
for const in ['Offset', 'Noise']:
table = [['','High gain', 'High gain', 'Medium gain', 'Medium gain', 'Low gain', 'Low gain']]
for mod_num in modules:
data = np.copy(const_types[const][mod_num])
data[badpix_consts[mod_num] > 0] = np.nan
if old_const[mod_num][const] is not None and old_const[mod_num]['BadPixelsDark'] is not None :
dataold = np.copy(old_const[mod_num][const])
dataold[old_const[mod_num]['BadPixelsDark']>0] = np.nan
f_list = [np.nanmedian, np.nanmean, np.nanstd, np.nanmin, np.nanmax]
n_list = ['Median', 'Mean', 'Std', 'Min', 'Max']
for i, f in enumerate(f_list):
line = [n_list[i]]
for gain in range(3):
line.append('{:6.1f}'.format(f(data[...,gain])))
if old_const[mod_num][const] is not None and old_const[mod_num]['BadPixelsDark'] is not None:
line.append('{:6.1f}'.format(f(dataold[...,gain])))
else:
line.append('-')
table.append(line)
display(Markdown('### {} [ADU], good pixels only ###'.format(const)))
md = display(Latex(tabulate.tabulate(table, tablefmt='latex', headers=header)))
```
%% Cell type:markdown id: tags:
# LPD Mini Offline Correction #
Author: European XFEL Data Analysis Group
%% Cell type:code id: tags:
``` python
# Input parameters
in_folder = "/gpfs/exfel/exp/FXE/202321/p004576/raw/" # the folder to read data from, required
out_folder = "/gpfs/exfel/data/scratch/kluyvert/correct-lpdmini-p4576-r48" # the folder to output to, required
metadata_folder = '' # Directory containing calibration_metadata.yml when run by xfel-calibrate.
sequences = [-1] # Sequences to correct, use [-1] for all
karabo_da = [''] # Data aggregators names to correct, e.g. 'LPDMINI00/8', use [''] for all
run = 48 # run to process, required
# Source parameters
karabo_id = 'FXE_DET_LPD_MINI' # Karabo domain for detector.
input_source = '{karabo_id}/DET/0CH0:xtdf' # Input fast data source.
output_source = '{karabo_id}/CORR/0CH0:output' # Output fast data source, empty to use same as input.
control_source = '{karabo_id}/FPGA/FEM' # Control source
# CalCat parameters
creation_time = "" # The timestamp to use with Calibration DB. Required Format: "YYYY-MM-DD hh:mm:ss" e.g. 2019-07-04 11:02:41
cal_db_interface = '' # Not needed, compatibility with current webservice.
cal_db_timeout = 0 # Not needed, compatbility with current webservice.
cal_db_root = '/gpfs/exfel/d/cal/caldb_store'
# Operating conditions
bias_voltage_0 = -1 # bias voltage for minis 1, 3, 5, 7; Setting -1 will read the value from files
bias_voltage_1 = -1 # bias voltage for minis 2, 4, 6, 8; Setting -1 will read the value from files
capacitor = '5pF' # Capacitor setting: 5pF or 50pF
photon_energy = 9.3 # Photon energy in keV.
use_cell_order = False # Whether to use memory cell order as a detector condition (not stored for older constants)
# Correction parameters
offset_corr = True # Offset correction.
rel_gain = True # Gain correction based on RelativeGain constant.
ff_map = True # Gain correction based on FFMap constant.
gain_amp_map = True # Gain correction based on GainAmpMap constant.
# Output options
overwrite = True # set to True if existing data should be overwritten
chunks_data = 1 # HDF chunk size for pixel data in number of frames.
chunks_ids = 32 # HDF chunk size for cellId and pulseId datasets.
# Parallelization options
sequences_per_node = 1 # Sequence files to process per node
max_nodes = 8 # Maximum number of SLURM jobs to split correction work into
num_workers = 8 # Worker processes per node, 8 is safe on 768G nodes but won't work on 512G.
num_threads_per_worker = 32 # Number of threads per worker.
def balance_sequences(in_folder, run, sequences, sequences_per_node, karabo_da, max_nodes):
from xfel_calibrate.calibrate import balance_sequences as bs
return bs(in_folder, run, sequences, sequences_per_node, karabo_da, max_nodes=max_nodes)
```
%% Cell type:code id: tags:
``` python
from pathlib import Path
from time import perf_counter
import re
import warnings
from IPython.display import Markdown
import numpy as np
import h5py
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
%matplotlib inline
from calibration_client.modules import CalibrationConstantVersion
import extra_data as xd
import extra_geom as xg
import pasha as psh
from cal_tools.calcat_interface import CalCatApi
from cal_tools.lpdalgs import correct_lpd_frames
from cal_tools.lpdlib import get_mem_cell_order
from cal_tools.tools import CalibrationMetadata, calcat_creation_time
from cal_tools.files import DataFile
from cal_tools.restful_config import calibration_client
```
%% Cell type:markdown id: tags:
# Prepare environment
%% Cell type:code id: tags:
``` python
file_re = re.compile(r'^RAW-R(\d{4})-(\w+\d+)-S(\d{5})$') # This should probably move to cal_tools
run_folder = Path(in_folder) / f'r{run:04d}'
out_folder = Path(out_folder)
out_folder.mkdir(exist_ok=True)
output_source = output_source or input_source
input_source = input_source.format(karabo_id=karabo_id)
output_source = output_source.format(karabo_id=karabo_id)
control_source = control_source.format(karabo_id=karabo_id)
cal_db_root = Path(cal_db_root)
creation_time = calcat_creation_time(in_folder, run, creation_time)
print(f'Using {creation_time.isoformat()} as creation time')
# Pick all sequences or those selected.
if not sequences or sequences == [-1]:
do_sequence = lambda seq: True
else:
do_sequence = [int(x) for x in sequences].__contains__
# List of detector sources.
det_inp_sources = [input_source.format(karabo_id=karabo_id)]
mem_cells = 512
```
%% Cell type:code id: tags:
``` python
if -1 in {bias_voltage_0, bias_voltage_1}:
run_data = xd.RunDirectory(Path(in_folder, f"r{run:04d}"))
if bias_voltage_0 == -1:
bias_voltage_0 = run_data[control_source, 'sensorBiasVoltage0'].as_single_value(atol=5.)
if bias_voltage_1 == -1:
bias_voltage_1 = run_data[control_source, 'sensorBiasVoltage1'].as_single_value(atol=5.)
print(f"Using bias voltages {bias_voltage_0}V & {bias_voltage_1}V")
```
%% Cell type:markdown id: tags:
# Select data to process
%% Cell type:code id: tags:
``` python
calcat_client = calibration_client()
calcat = CalCatApi(client=calcat_client)
# Look up PDUs
detector_id = calcat.detector(karabo_id)['id']
pdus_by_da = calcat.physical_detector_units(detector_id, pdu_snapshot_at=creation_time)
modnos_from_db = set()
if not karabo_da or karabo_da == ['']:
karabo_da = sorted(pdus_by_da.keys())
else:
karabo_da = sorted(set(karabo_da) & pdus_by_da.keys())
print("Modules to correct:", karabo_da)
```
%% Cell type:code id: tags:
``` python
data_to_process = []
data_agg_names = {kda.split('/')[0] for kda in karabo_da}
for inp_path in run_folder.glob('RAW-*.h5'):
match = file_re.match(inp_path.stem)
if match[2] not in data_agg_names or not do_sequence(int(match[3])):
continue
outp_path = out_folder / 'CORR-R{run:04d}-{aggregator}-S{seq:05d}.h5'.format(
run=int(match[1]), aggregator=match[2], seq=int(match[3]))
data_to_process.append((inp_path, outp_path))
print('Files to process:')
for inp_path, _ in sorted(data_to_process):
print(inp_path.name)
```
%% Cell type:markdown id: tags:
# Obtain and prepare calibration constants
%% Cell type:code id: tags:
``` python
const_data = {}
retrieved_consts = {} # To be recorded in YAML file
const_load_mp = psh.ProcessContext(num_workers=24)
module_const_shape = (mem_cells, 32, 256, 3) # cells, slow_scan, fast_scan, gain
# Retrieve constants from CALCAT.
dark_calibrations = {
14: 'BadPixelsDark' # np.uint32
}
if offset_corr:
dark_calibrations[1] = 'Offset' # np.float32
base_condition = [
# Bias voltage added below as it differs by module
dict(parameter_id=15, value=capacitor), # Feedback capacitor
dict(parameter_id=7, value=mem_cells), # Memory cells
dict(parameter_id=13, value=256), # Pixels X
dict(parameter_id=14, value=32), # Pixels Y
]
if use_cell_order:
# Read the order of memory cells used
raw_data = xd.DataCollection.from_paths([e[0] for e in data_to_process])
cell_ids_pattern_s = get_mem_cell_order(raw_data, det_inp_sources)
print("Memory cells order:", cell_ids_pattern_s)
dark_condition = base_condition + [
dict(parameter_id=30, value=cell_ids_pattern_s), # Memory cell order
]
else:
dark_condition = base_condition.copy()
illuminated_calibrations = {}
if rel_gain:
illuminated_calibrations[44] = 'RelativeGain' # np.float32
if ff_map:
illuminated_calibrations[43] = 'FFMap' # np.float32
illuminated_calibrations[20] = 'BadPixelsFF' # np.uint32
if gain_amp_map:
illuminated_calibrations[42] = 'GainAmpMap' # np.float32
illuminated_condition = base_condition + [
dict(parameter_id=3, value=photon_energy), # Source energy
]
```
%% Cell type:code id: tags:
``` python
print('Querying calibration database', end='', flush=True)
start = perf_counter()
for calibrations, condition in [
(dark_calibrations, dark_condition),
(illuminated_calibrations, illuminated_condition)
]:
if not calibrations:
continue
for karabo_da_m in karabo_da:
mod_num = int(karabo_da_m.split('/')[-1])
# mod_num is from 1 to 8, so b_v_0 applies to odd numbers
bias_voltage = bias_voltage_0 if mod_num % 2 == 1 else bias_voltage_1
condition_w_voltage = [dict(parameter_id=1, value=bias_voltage)] + condition
resp = CalibrationConstantVersion.get_closest_by_time_by_detector_conditions(
calcat_client, karabo_id, list(calibrations.keys()),
{'parameters_conditions_attributes': condition_w_voltage},
karabo_da=karabo_da_m, event_at=creation_time.isoformat()
)
if not resp['success']:
raise RuntimeError(resp)
for ccv in resp['data']:
cc = ccv['calibration_constant']
calibration_name = calibrations[cc['calibration_id']]
mod_const_metadata = retrieved_consts.setdefault(karabo_da_m, {
'physical-name': ccv['physical_detector_unit']['physical_name']
})
mod_const_metadata.setdefault('constants', {})[calibration_name] = {
"path": str(cal_db_root / ccv['path_to_file'] / ccv['file_name']),
"dataset": ccv['data_set_name'],
"creation-time": ccv["begin_validity_at"],
"ccv-id": ccv["id"],
}
dtype = np.uint32 if calibration_name.startswith('BadPixels') else np.float32
const_data[(mod_num, calibration_name)] = dict(
path=Path(ccv['path_to_file']) / ccv['file_name'],
dataset=ccv['data_set_name'],
data=const_load_mp.alloc(shape=module_const_shape, dtype=dtype)
)
print('.', end='', flush=True)
total_time = perf_counter() - start
print(f'{total_time:.1f}s')
```
%% Cell type:code id: tags:
``` python
lines = []
for karabo_da_m, mod_const_metadata in retrieved_consts.items():
lines.append(f"- {karabo_da_m} ({mod_const_metadata['physical-name']})")
for const_name, d in mod_const_metadata['constants'].items():
url = f"https://in.xfel.eu/calibration/calibration_constant_versions/{d['ccv-id']}"
lines.append(f" - {const_name}: [{d['creation-time']}]({url})")
Markdown('\n'.join(lines))
```
%% Cell type:code id: tags:
``` python
CalibrationMetadata(metadata_folder or out_folder).add_fragment({
"retrieved-constants": retrieved_consts
})
```
%% Cell type:code id: tags:
``` python
def load_constant_dataset(wid, index, const_descr):
ccv_entry = const_data[const_descr]
with h5py.File(cal_db_root / ccv_entry['path'], 'r') as fp:
fp[ccv_entry['dataset'] + '/data'].read_direct(ccv_entry['data'])
print('.', end='', flush=True)
print('Loading calibration data', end='', flush=True)
start = perf_counter()
const_load_mp.map(load_constant_dataset, list(const_data.keys()))
total_time = perf_counter() - start
print(f'{total_time:.1f}s')
```
%% Cell type:code id: tags:
``` python
module_nums = sorted({n for n, _ in const_data})
nmods = len(module_nums)
const_type_names = {t for _, t in const_data}
const_shape = (mem_cells, 32 * len(module_nums), 256, 3) # cells, slow_scan, fast_scan, gain
const_slices = [slice(i * 32, (i+1) * 32) for i in range(len(module_nums))]
raw_data_slices = [slice((n-1) * 32, n * 32) for n in module_nums]
def _assemble_constant(arr, calibration_name):
for mod_num, sl in zip(module_nums, const_slices):
arr[:, sl] = const_data[mod_num, calibration_name]['data']
offset_const = np.zeros(const_shape, dtype=np.float32)
if offset_corr:
_assemble_constant(offset_const, 'Offset')
mask_const = np.zeros(const_shape, dtype=np.uint32)
_assemble_constant(mask_const, 'BadPixelsDark')
gain_const = np.ones(const_shape, dtype=np.float32)
if rel_gain:
_assemble_constant(gain_const, 'RelativeGain')
if ff_map:
ff_map_gain = np.ones(const_shape, dtype=np.float32)
_assemble_constant(ff_map_gain, 'FFMap')
gain_const *= ff_map_gain
if 'BadPixelsFF' in const_type_names:
badpix_ff = np.zeros(const_shape, dtype=np.uint32)
_assemble_constant(badpix_ff, 'BadPixelsFF')
mask_const |= badpix_ff
if gain_amp_map:
gain_amp_map = np.zeros(const_shape, dtype=np.float32)
_assemble_constant(gain_amp_map, 'GainAmpMap')
gain_const *= gain_amp_map
```
%% Cell type:code id: tags:
``` python
def correct_file(wid, index, work):
inp_path, outp_path = work
start = perf_counter()
dc = xd.H5File(inp_path, inc_suspect_trains=False).select('*', 'image.*', require_all=True)
inp_source = dc[input_source]
open_time = perf_counter() - start
# Load raw data for this file.
# Reshaping gets rid of the extra 1-len dimensions without
# mangling the frame axis for an actual frame count of 1.
start = perf_counter()
in_raw = inp_source['image.data'].ndarray()
if in_raw.ndim > 3:
in_raw = in_raw[:, 0]
in_cell = inp_source['image.cellId'].ndarray().reshape(-1)
in_pulse = inp_source['image.pulseId'].ndarray().reshape(-1)
read_time = perf_counter() - start
# Slice modules from input data
data_shape = (in_raw.shape[0], nmods * 32, 256)
in_sliced = np.zeros(data_shape, dtype=in_raw.dtype)
for i, sl in enumerate(raw_data_slices):
in_sliced[:, i*32:(i+1)*32] = in_raw[..., sl, :]
output_shape = (data_shape[0], nmods, 32, 256)
# Allocate output arrays.
out_data = np.zeros(in_sliced.shape, dtype=np.float32)
out_gain = np.zeros(in_sliced.shape, dtype=np.uint8)
out_mask = np.zeros(in_sliced.shape, dtype=np.uint32)
start = perf_counter()
correct_lpd_frames(in_sliced, in_cell,
out_data, out_gain, out_mask,
offset_const, gain_const, mask_const,
num_threads=num_threads_per_worker)
correct_time = perf_counter() - start
image_counts = inp_source['image.data'].data_counts(labelled=False)
start = perf_counter()
if (not outp_path.exists() or overwrite) and image_counts.sum() > 0:
with DataFile(outp_path, 'w') as outp_file:
outp_file.create_index(dc.train_ids, from_file=dc.files[0])
outp_file.create_metadata(like=dc, instrument_channels=(f'{output_source}/image',))
outp_source = outp_file.create_instrument_source(output_source)
outp_source.create_index(image=image_counts)
outp_source.create_key('image.cellId', data=in_cell,
chunks=(min(chunks_ids, in_cell.shape[0]),))
outp_source.create_key('image.pulseId', data=in_pulse,
chunks=(min(chunks_ids, in_pulse.shape[0]),))
outp_source.create_key('image.data', data=out_data.reshape(output_shape),
chunks=(min(chunks_data, out_data.shape[0]), 1, 32, 256))
outp_source.create_compressed_key('image.gain', data=out_gain.reshape(output_shape))
outp_source.create_compressed_key('image.mask', data=out_mask.reshape(output_shape))
write_time = perf_counter() - start
total_time = open_time + read_time + correct_time + write_time
frame_rate = in_raw.shape[0] / total_time
m = file_re.match(inp_path.stem)
seq = int(m[3]) if m else -1
print('{}\t{}\t{:.3f}\t{:.3f}\t{:.3f}\t{:.3f}\t{:.3f}\t{}\t{:.1f}'.format(
wid, seq, open_time, read_time, correct_time, write_time, total_time,
in_raw.shape[0], frame_rate))
in_raw = None
in_cell = None
in_pulse = None
out_data = None
out_gain = None
out_mask = None
print('worker\tseq\topen\tread\tcorrect\twrite\ttotal\tframes\trate')
start = perf_counter()
psh.ProcessContext(num_workers=num_workers).map(correct_file, data_to_process)
total_time = perf_counter() - start
print(f'Total time: {total_time:.1f}s')
```
%% Cell type:markdown id: tags:
# Data preview for first train
%% Cell type:code id: tags:
``` python
# This geometry is arbitrary, we just want to show all the modules
geom = xg.LPD_MiniGeometry.from_module_positions(
[(0, i * 40) for i in range(nmods)]
)
output_paths = [outp_path for _, outp_path in data_to_process if outp_path.exists()]
dc = xd.H5File(sorted(output_paths)[0]).select_trains(np.s_[0])
det = dc[output_source.format(karabo_id=karabo_id)]
data = det['image.data'].ndarray()
```
%% Cell type:markdown id: tags:
### Intensity histogram across all cells
%% Cell type:code id: tags:
``` python
left_edge_ratio = 0.01
right_edge_ratio = 0.99
fig, ax = plt.subplots(num=1, clear=True, figsize=(15, 6))
values, bins, _ = ax.hist(np.ravel(data), bins=2000, range=(-1500, 2000))
def find_nearest_index(array, value):
return (np.abs(array - value)).argmin()
cum_values = np.cumsum(values)
vmin = bins[find_nearest_index(cum_values, cum_values[-1]*left_edge_ratio)]
vmax = bins[find_nearest_index(cum_values, cum_values[-1]*right_edge_ratio)]
max_value = values.max()
ax.vlines([vmin, vmax], 0, max_value, color='red', linewidth=5, alpha=0.2)
ax.text(vmin, max_value, f'{left_edge_ratio*100:.0f}%',
color='red', ha='center', va='bottom', size='large')
ax.text(vmax, max_value, f'{right_edge_ratio*100:.0f}%',
color='red', ha='center', va='bottom', size='large')
ax.text(vmax+(vmax-vmin)*0.01, max_value/2, 'Colormap interval',
color='red', rotation=90, ha='left', va='center', size='x-large')
ax.set_xlim(vmin-(vmax-vmin)*0.1, vmax+(vmax-vmin)*0.1)
ax.set_ylim(0, max_value*1.1)
pass
```
%% Cell type:markdown id: tags:
### First memory cell
%% Cell type:code id: tags:
``` python
fig, ax = plt.subplots(num=2, figsize=(15, 15), clear=True, nrows=1, ncols=1)
geom.plot_data_fast(data[0], ax=ax, vmin=vmin, vmax=vmax)
pass
```
%% Cell type:markdown id: tags:
### Train average
%% Cell type:code id: tags:
``` python
fig, ax = plt.subplots(num=3, figsize=(15, 15), clear=True, nrows=1, ncols=1)
geom.plot_data_fast(data.mean(axis=0), ax=ax, vmin=vmin, vmax=vmax)
pass
```
%% Cell type:markdown id: tags:
### Lowest gain stage per pixel
%% Cell type:code id: tags:
``` python
highest_gain_stage = det['image.gain'].ndarray().max(axis=0)
fig, ax = plt.subplots(num=4, figsize=(15, 15), clear=True, nrows=1, ncols=1)
p = geom.plot_data_fast(highest_gain_stage, ax=ax, vmin=0, vmax=2);
cb = ax.images[0].colorbar
cb.set_ticks([0, 1, 2])
cb.set_ticklabels(['High gain', 'Medium gain', 'Low gain'])
```
...@@ -64,7 +64,7 @@ install_requires = [ ...@@ -64,7 +64,7 @@ install_requires = [
"dynaconf==3.1.4", "dynaconf==3.1.4",
"env_cache==0.1", "env_cache==0.1",
"extra_data==1.12.0", "extra_data==1.12.0",
"extra_geom==1.8.0", "extra_geom==1.10.0",
"gitpython==3.1.0", "gitpython==3.1.0",
"h5py==3.5.0", "h5py==3.5.0",
"iminuit==1.3.8", "iminuit==1.3.8",
......
"""Interfaces to calibration constant data.""" """Interfaces to calibration constant data."""
import re
import socket
from datetime import date, datetime, time, timezone from datetime import date, datetime, time, timezone
from functools import lru_cache from functools import lru_cache
from os import getenv
from pathlib import Path from pathlib import Path
from weakref import WeakKeyDictionary from weakref import WeakKeyDictionary
...@@ -367,6 +364,7 @@ class CalibrationData: ...@@ -367,6 +364,7 @@ class CalibrationData:
calibrations = set() calibrations = set()
default_client = None default_client = None
_default_caldb_root = ...
def __init__( def __init__(
self, self,
...@@ -375,6 +373,7 @@ class CalibrationData: ...@@ -375,6 +373,7 @@ class CalibrationData:
client=None, client=None,
event_at=None, event_at=None,
module_naming="da", module_naming="da",
caldb_root=None,
): ):
"""Initialize a new CalibrationData object. """Initialize a new CalibrationData object.
...@@ -397,6 +396,8 @@ class CalibrationData: ...@@ -397,6 +396,8 @@ class CalibrationData:
integers in karabo_da. integers in karabo_da.
`qm`: QxMx naming convention is used. Virtual names for `qm`: QxMx naming convention is used. Virtual names for
AGIPD, DSSC, and LPD. AGIPD, DSSC, and LPD.
caldb_root (str or None): Path to the root directory for caldb
files, finds folder for production caldb by default.
**condition_params: Operating condition parameters defined **condition_params: Operating condition parameters defined
on an instance level. on an instance level.
""" """
...@@ -406,6 +407,10 @@ class CalibrationData: ...@@ -406,6 +407,10 @@ class CalibrationData:
self.event_at = event_at self.event_at = event_at
self.pdu_snapshot_at = event_at self.pdu_snapshot_at = event_at
self.module_naming = module_naming self.module_naming = module_naming
if caldb_root is None:
self.caldb_root = self._get_default_caldb_root()
else:
self.caldb_root = Path(caldb_root)
if client is None: if client is None:
...@@ -486,29 +491,19 @@ class CalibrationData: ...@@ -486,29 +491,19 @@ class CalibrationData:
) )
return CalibrationData.default_client return CalibrationData.default_client
@property @staticmethod
def caldb_root(self): def _get_default_caldb_root():
"""Root directory for calibration constant data. if CalibrationData._default_caldb_root is ...:
onc_path = Path("/common/cal/caldb_store")
Returns: maxwell_path = Path("/gpfs/exfel/d/cal/caldb_store")
(Path or None) Location of caldb store or if onc_path.is_dir():
None if not available. CalibrationData._default_caldb_root = onc_path
""" elif maxwell_path.is_dir():
CalibrationData._default_caldb_root = maxwell_path
if not hasattr(CalibrationData, "_caldb_root"):
if getenv("SASE"):
# ONC
CalibrationData._caldb_root = Path("/common/cal/caldb_store")
elif re.match(r"^max-(.+)\.desy\.de$", socket.getfqdn()):
# Maxwell
CalibrationData._caldb_root = Path(
"/gpfs/exfel/d/cal/caldb_store"
)
else: else:
# Probably unavailable CalibrationData._default_caldb_root = None
CalibrationData._caldb_root = None
return CalibrationData._caldb_root return CalibrationData._default_caldb_root
@property @property
def client(self): def client(self):
...@@ -947,6 +942,7 @@ class AGIPD_CalibrationData(SplitConditionCalibrationData): ...@@ -947,6 +942,7 @@ class AGIPD_CalibrationData(SplitConditionCalibrationData):
gain_setting=None, gain_setting=None,
gain_mode=None, gain_mode=None,
module_naming="da", module_naming="da",
caldb_root=None,
integration_time=12, integration_time=12,
source_energy=9.2, source_energy=9.2,
pixels_x=512, pixels_x=512,
...@@ -958,6 +954,7 @@ class AGIPD_CalibrationData(SplitConditionCalibrationData): ...@@ -958,6 +954,7 @@ class AGIPD_CalibrationData(SplitConditionCalibrationData):
client, client,
event_at, event_at,
module_naming, module_naming,
caldb_root,
) )
self.sensor_bias_voltage = sensor_bias_voltage self.sensor_bias_voltage = sensor_bias_voltage
...@@ -1021,6 +1018,7 @@ class LPD_CalibrationData(SplitConditionCalibrationData): ...@@ -1021,6 +1018,7 @@ class LPD_CalibrationData(SplitConditionCalibrationData):
client=None, client=None,
event_at=None, event_at=None,
module_naming="da", module_naming="da",
caldb_root=None,
): ):
super().__init__( super().__init__(
detector_name, detector_name,
...@@ -1028,6 +1026,7 @@ class LPD_CalibrationData(SplitConditionCalibrationData): ...@@ -1028,6 +1026,7 @@ class LPD_CalibrationData(SplitConditionCalibrationData):
client, client,
event_at, event_at,
module_naming, module_naming,
caldb_root,
) )
self.sensor_bias_voltage = sensor_bias_voltage self.sensor_bias_voltage = sensor_bias_voltage
...@@ -1072,6 +1071,7 @@ class DSSC_CalibrationData(CalibrationData): ...@@ -1072,6 +1071,7 @@ class DSSC_CalibrationData(CalibrationData):
client=None, client=None,
event_at=None, event_at=None,
module_naming="da", module_naming="da",
caldb_root=None,
): ):
super().__init__( super().__init__(
detector_name, detector_name,
...@@ -1079,6 +1079,7 @@ class DSSC_CalibrationData(CalibrationData): ...@@ -1079,6 +1079,7 @@ class DSSC_CalibrationData(CalibrationData):
client, client,
event_at, event_at,
module_naming, module_naming,
caldb_root,
) )
self.sensor_bias_voltage = sensor_bias_voltage self.sensor_bias_voltage = sensor_bias_voltage
...@@ -1126,6 +1127,7 @@ class JUNGFRAU_CalibrationData(CalibrationData): ...@@ -1126,6 +1127,7 @@ class JUNGFRAU_CalibrationData(CalibrationData):
client=None, client=None,
event_at=None, event_at=None,
module_naming="da", module_naming="da",
caldb_root=None,
): ):
super().__init__( super().__init__(
detector_name, detector_name,
...@@ -1133,6 +1135,7 @@ class JUNGFRAU_CalibrationData(CalibrationData): ...@@ -1133,6 +1135,7 @@ class JUNGFRAU_CalibrationData(CalibrationData):
client, client,
event_at, event_at,
module_naming, module_naming,
caldb_root,
) )
self.sensor_bias_voltage = sensor_bias_voltage self.sensor_bias_voltage = sensor_bias_voltage
...@@ -1193,6 +1196,7 @@ class PNCCD_CalibrationData(SplitConditionCalibrationData): ...@@ -1193,6 +1196,7 @@ class PNCCD_CalibrationData(SplitConditionCalibrationData):
client=None, client=None,
event_at=None, event_at=None,
module_naming="da", module_naming="da",
caldb_root=None,
): ):
# Ignore modules for this detector. # Ignore modules for this detector.
super().__init__( super().__init__(
...@@ -1201,6 +1205,7 @@ class PNCCD_CalibrationData(SplitConditionCalibrationData): ...@@ -1201,6 +1205,7 @@ class PNCCD_CalibrationData(SplitConditionCalibrationData):
client, client,
event_at, event_at,
module_naming, module_naming,
caldb_root,
) )
self.sensor_bias_voltage = sensor_bias_voltage self.sensor_bias_voltage = sensor_bias_voltage
...@@ -1249,6 +1254,7 @@ class EPIX100_CalibrationData(SplitConditionCalibrationData): ...@@ -1249,6 +1254,7 @@ class EPIX100_CalibrationData(SplitConditionCalibrationData):
client=None, client=None,
event_at=None, event_at=None,
module_naming="da", module_naming="da",
caldb_root=None,
): ):
# Ignore modules for this detector. # Ignore modules for this detector.
super().__init__( super().__init__(
...@@ -1257,6 +1263,7 @@ class EPIX100_CalibrationData(SplitConditionCalibrationData): ...@@ -1257,6 +1263,7 @@ class EPIX100_CalibrationData(SplitConditionCalibrationData):
client, client,
event_at, event_at,
module_naming, module_naming,
caldb_root,
) )
self.sensor_bias_voltage = sensor_bias_voltage self.sensor_bias_voltage = sensor_bias_voltage
...@@ -1299,6 +1306,7 @@ class GOTTHARD2_CalibrationData(CalibrationData): ...@@ -1299,6 +1306,7 @@ class GOTTHARD2_CalibrationData(CalibrationData):
client=None, client=None,
event_at=None, event_at=None,
module_naming="da", module_naming="da",
caldb_root=None,
): ):
# Ignore modules for this detector. # Ignore modules for this detector.
super().__init__( super().__init__(
...@@ -1307,6 +1315,7 @@ class GOTTHARD2_CalibrationData(CalibrationData): ...@@ -1307,6 +1315,7 @@ class GOTTHARD2_CalibrationData(CalibrationData):
client, client,
event_at, event_at,
module_naming, module_naming,
caldb_root,
) )
self.sensor_bias_voltage = sensor_bias_voltage self.sensor_bias_voltage = sensor_bias_voltage
......
...@@ -415,7 +415,7 @@ def init_jungfrau_geom( ...@@ -415,7 +415,7 @@ def init_jungfrau_geom(
karabo_da: List[str] karabo_da: List[str]
) -> Tuple[List[str], JUNGFRAUGeometry]: ) -> Tuple[List[str], JUNGFRAUGeometry]:
""" Initiate JUNGFRAUGeometry object based on the selected detector """ Initiate JUNGFRAUGeometry object based on the selected detector
(SPB_IRDA_JF4M, FXE_XAD_JF1M, or a single module detector). (JF4M, JF1M, or JF500K detectors).
:param karabo_id: the detector identifer of an expected multimodular :param karabo_id: the detector identifer of an expected multimodular
detector or a single module detector. detector or a single module detector.
...@@ -429,7 +429,7 @@ def init_jungfrau_geom( ...@@ -429,7 +429,7 @@ def init_jungfrau_geom(
mod_width = (256 * 4) + (2 * 3) # inc. 2px gaps between tiles mod_width = (256 * 4) + (2 * 3) # inc. 2px gaps between tiles
mod_height = (256 * 2) + 2 mod_height = (256 * 2) + 2
if karabo_id == "SPB_IRDA_JF4M": if "JF4M" in karabo_id:
nmods = 8 nmods = 8
expected_modules = [f"JNGFR{i:02d}" for i in range(1, nmods+1)] expected_modules = [f"JNGFR{i:02d}" for i in range(1, nmods+1)]
# The first 4 modules are rotated 180 degrees relative to the others. # The first 4 modules are rotated 180 degrees relative to the others.
...@@ -445,12 +445,12 @@ def init_jungfrau_geom( ...@@ -445,12 +445,12 @@ def init_jungfrau_geom(
] ]
orientations = [ orientations = [
(-1, -1) for _ in range(4)] + [(1, 1) for _ in range(4)] (-1, -1) for _ in range(4)] + [(1, 1) for _ in range(4)]
elif karabo_id == "FXE_XAD_JF1M": elif "JF1M" in karabo_id:
nmods = 2 nmods = 2
expected_modules = [f"JNGFR{i:02d}" for i in range(1, nmods+1)] expected_modules = [f"JNGFR{i:02d}" for i in range(1, nmods+1)]
module_pos = ((-mod_width//2, 33), (-mod_width//2, -mod_height-33)) module_pos = ((-mod_width//2, 33), (-mod_width//2, -mod_height-33))
orientations = [(-1,-1), (1,1)] orientations = [(-1,-1), (1,1)]
else: else: # e.g. HED_IA1_JF500K1, FXE_XAD_JF500K, FXE_XAD_JFHZ
nmods = 1 nmods = 1
expected_modules = karabo_da expected_modules = karabo_da
module_pos = ((-mod_width//2, -mod_height//2),) module_pos = ((-mod_width//2, -mod_height//2),)
......
...@@ -98,6 +98,19 @@ notebooks = { ...@@ -98,6 +98,19 @@ notebooks = {
"cluster cores": 1}, "cluster cores": 1},
} }
}, },
"LPDMINI": {
"DARK": {
"notebook": "notebooks/LPDMini/LPD_Mini_Char_Darks_NBC.ipynb",
"concurrency": {"parameter": None},
},
"CORRECT": {
"notebook": "notebooks/LPDMini/LPD_Mini_Correct.ipynb",
"concurrency": {"parameter": "sequences",
"default concurrency": [-1],
"use function": "balance_sequences",
"cluster cores": 16},
},
},
"PNCCD": { "PNCCD": {
"DARK": { "DARK": {
"notebook": "notebooks/pnCCD/Characterize_pnCCD_Dark_NBC.ipynb", "notebook": "notebooks/pnCCD/Characterize_pnCCD_Dark_NBC.ipynb",
......
...@@ -1029,8 +1029,15 @@ class ActionsServer: ...@@ -1029,8 +1029,15 @@ class ActionsServer:
dconfig = data_conf[karabo_id] dconfig = data_conf[karabo_id]
# check for files according to mapping in raw run dir. # check for files according to mapping in raw run dir.
# data-mapping for LPD mini uses karabo-da names like
# LPDMINI00/8 to identify individual modules. The /8 is not
# part of the file name
data_agg_names = {
kda.split('/')[0] for kda in dconfig['karabo-da']
}
if any(y in x for x in fl if any(y in x for x in fl
for y in dconfig['karabo-da']): for y in data_agg_names):
thisconf = copy.copy(dconfig) thisconf = copy.copy(dconfig)
if isinstance(pconf[karabo_id], dict): if isinstance(pconf[karabo_id], dict):
thisconf.update(copy.copy(pconf[karabo_id])) thisconf.update(copy.copy(pconf[karabo_id]))
......