Skip to content
Snippets Groups Projects
Commit c92872fd authored by Karim Ahmed's avatar Karim Ahmed Committed by Cyril Danilevski
Browse files

Set gain_setting to 0 for old JF runs

parent 3c06a7c0
No related branches found
No related tags found
No related merge requests found
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
# Jungfrau Offline Correction # # Jungfrau Offline Correction #
Author: European XFEL Detector Group, Version: 0.1 Author: European XFEL Detector Group, Version: 0.1
Offline Calibration for the Jungfrau Detector Offline Calibration for the Jungfrau Detector
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
in_folder = "/gpfs/exfel/exp/CALLAB/202031/p900113/raw" # the folder to read data from, required in_folder = "/gpfs/exfel/exp/CALLAB/202031/p900113/raw" # the folder to read data from, required
out_folder = "/gpfs/exfel/data/scratch/hammerd/issue-242" # the folder to output to, required out_folder = "/gpfs/exfel/data/scratch/hammerd/issue-242" # the folder to output to, required
sequences = [-1] # sequences to correct, set to [-1] for all, range allowed sequences = [-1] # sequences to correct, set to [-1] for all, range allowed
run = 9979 # run to process, required run = 9979 # run to process, required
karabo_id = "SPB_IRDA_JF4M" # karabo prefix of Jungfrau devices karabo_id = "SPB_IRDA_JF4M" # karabo prefix of Jungfrau devices
karabo_da = ['JNGFR01'] # data aggregators karabo_da = ['JNGFR01'] # data aggregators
receiver_id = "JNGFR{:02d}" # inset for receiver devices receiver_id = "JNGFR{:02d}" # inset for receiver devices
receiver_control_id = "CONTROL" # inset for control devices receiver_control_id = "CONTROL" # inset for control devices
path_template = 'RAW-R{:04d}-{}-S{:05d}.h5' # template to use for file name path_template = 'RAW-R{:04d}-{}-S{:05d}.h5' # template to use for file name
h5path = '/INSTRUMENT/{}/DET/{}:daqOutput/data' # path in H5 file under which images are located h5path = '/INSTRUMENT/{}/DET/{}:daqOutput/data' # path in H5 file under which images are located
h5path_run = '/RUN/{}/DET/{}' # path to run data h5path_run = '/RUN/{}/DET/{}' # path to run data
h5path_cntrl = '/CONTROL/{}/DET/{}' # path to control data h5path_cntrl = '/CONTROL/{}/DET/{}' # path to control data
karabo_id_control = "" # if control is on a different ID, set to empty string if it is the same a karabo-id karabo_id_control = "" # if control is on a different ID, set to empty string if it is the same a karabo-id
karabo_da_control = "JNGFRCTRL00" # file inset for control data karabo_da_control = "JNGFRCTRL00" # file inset for control data
use_dir_creation_date = True # use the creation data of the input dir for database queries use_dir_creation_date = True # use the creation data of the input dir for database queries
cal_db_interface = "tcp://max-exfl016:8017#8025" # the database interface to use cal_db_interface = "tcp://max-exfl016:8017#8025" # the database interface to use
cal_db_timeout = 180000 # timeout on caldb requests cal_db_timeout = 180000 # timeout on caldb requests
overwrite = True # set to True if existing data should be overwritten overwrite = True # set to True if existing data should be overwritten
no_relative_gain = False # do not do relative gain correction no_relative_gain = False # do not do relative gain correction
bias_voltage = 180 # will be overwritten by value in file bias_voltage = 180 # will be overwritten by value in file
sequences_per_node = 5 # number of sequence files per cluster node if run as slurm job, set to 0 to not run SLURM parallel sequences_per_node = 5 # number of sequence files per cluster node if run as slurm job, set to 0 to not run SLURM parallel
photon_energy = 9.2 # photon energy in keV photon_energy = 9.2 # photon energy in keV
chunk_size_idim = 1 # chunking size of imaging dimension, adjust if user software is sensitive to this. chunk_size_idim = 1 # chunking size of imaging dimension, adjust if user software is sensitive to this.
integration_time = 4.96 # integration time in us, will be overwritten by value in file integration_time = 4.96 # integration time in us, will be overwritten by value in file
gain_setting = 0 # 0 for dynamic gain, 1 for dynamic HG0, will be overwritten by value in file gain_setting = 0 # 0 for dynamic gain, 1 for dynamic HG0, will be overwritten by value in file
mem_cells = 0 # leave memory cells equal 0, as it is saved in control information starting 2019. mem_cells = 0 # leave memory cells equal 0, as it is saved in control information starting 2019.
db_module = ["Jungfrau_M275"] # ID of module in calibration database db_module = ["Jungfrau_M275"] # ID of module in calibration database
manual_slow_data = False # if true, use manually entered bias_voltage and integration_time values manual_slow_data = False # if true, use manually entered bias_voltage and integration_time values
chunk_size = 0 chunk_size = 0
def balance_sequences(in_folder, run, sequences, sequences_per_node, karabo_da): def balance_sequences(in_folder, run, sequences, sequences_per_node, karabo_da):
from xfel_calibrate.calibrate import balance_sequences as bs from xfel_calibrate.calibrate import balance_sequences as bs
return bs(in_folder, run, sequences, sequences_per_node, karabo_da) return bs(in_folder, run, sequences, sequences_per_node, karabo_da)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
import copy import copy
import multiprocessing import multiprocessing
import time import time
import warnings import warnings
from functools import partial from functools import partial
from pathlib import Path from pathlib import Path
import h5py import h5py
import matplotlib import matplotlib
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import numpy as np import numpy as np
import tabulate import tabulate
from cal_tools.enums import BadPixels from cal_tools.enums import BadPixels
from cal_tools.tools import ( from cal_tools.tools import (
get_constant_from_db_and_time, get_constant_from_db_and_time,
get_dir_creation_date, get_dir_creation_date,
map_modules_from_folder, map_modules_from_folder,
) )
from iCalibrationDB import Conditions, Constants from iCalibrationDB import Conditions, Constants
from IPython.display import Latex, display from IPython.display import Latex, display
from matplotlib.colors import LogNorm from matplotlib.colors import LogNorm
warnings.filterwarnings('ignore') warnings.filterwarnings('ignore')
matplotlib.use('agg') matplotlib.use('agg')
%matplotlib inline %matplotlib inline
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
in_folder = Path(in_folder) in_folder = Path(in_folder)
out_folder = Path(out_folder) out_folder = Path(out_folder)
ped_dir = in_folder / f'r{run:04d}' ped_dir = in_folder / f'r{run:04d}'
h5path = h5path.format(karabo_id, receiver_id) h5path = h5path.format(karabo_id, receiver_id)
if out_folder.exists() and not overwrite: if out_folder.exists() and not overwrite:
raise AttributeError("Output path exists! Exiting") raise AttributeError("Output path exists! Exiting")
else: else:
out_folder.mkdir(parents=True, exist_ok=True) out_folder.mkdir(parents=True, exist_ok=True)
fp_name_contr = path_template.format(run, karabo_da_control, 0) fp_name_contr = path_template.format(run, karabo_da_control, 0)
fp_path_contr = ped_dir / fp_name_contr fp_path_contr = ped_dir / fp_name_contr
if sequences[0] == -1: if sequences[0] == -1:
sequences = None sequences = None
print(f"Run is: {run}") print(f"Run is: {run}")
print(f"HDF5 path: {h5path}") print(f"HDF5 path: {h5path}")
print(f"Process modules: {karabo_da}") print(f"Process modules: {karabo_da}")
creation_time = None creation_time = None
if use_dir_creation_date: if use_dir_creation_date:
creation_time = get_dir_creation_date(in_folder, run) creation_time = get_dir_creation_date(in_folder, run)
print(f"Using {creation_time} as creation time") print(f"Using {creation_time} as creation time")
if karabo_id_control == "": if karabo_id_control == "":
karabo_id_control = karabo_id karabo_id_control = karabo_id
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
def check_memory_cells(file_name, path): def check_memory_cells(file_name, path):
with h5py.File(file_name, 'r') as f: with h5py.File(file_name, 'r') as f:
t_stamp = np.array(f[path + '/storageCells/timestamp']) t_stamp = np.array(f[path + '/storageCells/timestamp'])
st_cells = np.array(f[path + '/storageCells/value']) st_cells = np.array(f[path + '/storageCells/value'])
sc_start = np.array(f[path + '/storageCellStart/value']) sc_start = np.array(f[path + '/storageCellStart/value'])
valid_train = t_stamp > 0 valid_train = t_stamp > 0
n_scs = st_cells[valid_train][0] + 1 n_scs = st_cells[valid_train][0] + 1
sc_s = sc_start[valid_train][0] sc_s = sc_start[valid_train][0]
return n_scs, sc_s return n_scs, sc_s
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# set everything up filewise # set everything up filewise
mapped_files, mod_ids, total_sequences, sequences_qm, _ = map_modules_from_folder( mapped_files, mod_ids, total_sequences, sequences_qm, _ = map_modules_from_folder(
in_folder, run, path_template, karabo_da, sequences in_folder, run, path_template, karabo_da, sequences
) )
print(f"Processing a total of {total_sequences} sequence files") print(f"Processing a total of {total_sequences} sequence files")
table = [] table = []
fi = 0 fi = 0
if total_sequences > 0: # create table if total_sequences > 0: # create table
for i, key in enumerate(mapped_files): for i, key in enumerate(mapped_files):
for k, f in enumerate(list(mapped_files[key].queue)): for k, f in enumerate(list(mapped_files[key].queue)):
if k == 0: if k == 0:
table.append((fi, karabo_da[i], k, f)) table.append((fi, karabo_da[i], k, f))
else: else:
table.append((fi, "", k, f)) table.append((fi, "", k, f))
fi += 1 fi += 1
md = display(Latex(tabulate.tabulate(table, tablefmt='latex', md = display(Latex(tabulate.tabulate(table, tablefmt='latex',
headers=["#", "module", "# module", "file"]))) headers=["#", "module", "# module", "file"])))
# restore the queue # restore the queue
mapped_files, mod_ids, total_sequences, sequences_qm, _ = map_modules_from_folder( mapped_files, mod_ids, total_sequences, sequences_qm, _ = map_modules_from_folder(
in_folder, run, path_template, karabo_da, sequences in_folder, run, path_template, karabo_da, sequences
) )
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
if not manual_slow_data: if not manual_slow_data:
with h5py.File(fp_path_contr, 'r') as f: with h5py.File(fp_path_contr, 'r') as f:
run_path = h5path_run.format(karabo_id_control, receiver_control_id) run_path = h5path_run.format(karabo_id_control, receiver_control_id)
integration_time = float(f[f'{run_path}/exposureTime/value'][()]*1e6) integration_time = float(f[f'{run_path}/exposureTime/value'][()]*1e6)
bias_voltage = int(np.squeeze(f[f'{run_path}/vHighVoltage/value'])[0]) bias_voltage = int(np.squeeze(f[f'{run_path}/vHighVoltage/value'])[0])
try:
gain_s = f[f'/RUN/{karabo_id_control}/DET/CONTROL/settings/value'][0].decode() gain_s = f[f'/RUN/{karabo_id_control}/DET/CONTROL/settings/value'][0].decode()
except KeyError:
print(
"ERROR: gain_setting is not available for h5 ctrl path "
f"/RUN/{karabo_id_control}/DET/CONTROL/settings/value,\nfor file: {fp_path_contr}. \n"
"WARNING: Setting gain_setting to 0, assuming that this is an old run.\n"
)
gain_s = "KeyError"
gain_setting = 1 if gain_s == 'dynamichg0' else 0 # gain_s == 'dynamicgain' gain_setting = 1 if gain_s == 'dynamichg0' else 0 # gain_s == 'dynamicgain'
control_path = h5path_cntrl.format(karabo_id_control, receiver_control_id) control_path = h5path_cntrl.format(karabo_id_control, receiver_control_id)
try: try:
this_run_mcells, sc_start = check_memory_cells(fp_path_contr, control_path) this_run_mcells, sc_start = check_memory_cells(fp_path_contr, control_path)
if this_run_mcells == 1: if this_run_mcells == 1:
memory_cells = 1 memory_cells = 1
print(f'Dark runs in single cell mode\n storage cell start: {sc_start:02d}') print(f'Dark runs in single cell mode\n storage cell start: {sc_start:02d}')
else: else:
memory_cells = 16 memory_cells = 16
print(f'Dark runs in burst mode\n storage cell start: {sc_start:02d}') print(f'Dark runs in burst mode\n storage cell start: {sc_start:02d}')
except Exception as e: except Exception as e:
if "Unable to open object" in str(e): if "Unable to open object" in str(e):
if mem_cells==0: if mem_cells==0:
memory_cells = 1 memory_cells = 1
else: else:
memory_cells = mem_cells memory_cells = mem_cells
print(f'Set memory cells to {memory_cells} as it is not saved in control information.') print(f'Set memory cells to {memory_cells} as it is not saved in control information.')
else: else:
print(f"Error trying to access memory cell from contol information: {e}") print(f"Error trying to access memory cell from contol information: {e}")
print(f"Integration time is {integration_time} us") print(f"Integration time is {integration_time} us")
print(f"Gain setting is {gain_setting}") print(f"Gain setting is {gain_setting}")
print(f"Bias voltage is {bias_voltage} V") print(f"Bias voltage is {bias_voltage} V")
print(f"Number of memory cells is {memory_cells}") print(f"Number of memory cells is {memory_cells}")
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
condition = Conditions.Dark.jungfrau( condition = Conditions.Dark.jungfrau(
memory_cells=memory_cells, memory_cells=memory_cells,
bias_voltage=bias_voltage, bias_voltage=bias_voltage,
integration_time=integration_time, integration_time=integration_time,
gain_setting=gain_setting, gain_setting=gain_setting,
) )
def get_constants_for_module(karabo_da: str): def get_constants_for_module(karabo_da: str):
""" Get calibration constants for given module of Jungfrau """ Get calibration constants for given module of Jungfrau
:return: :return:
offset_map (offset map), offset_map (offset map),
mask (mask of bad pixels), mask (mask of bad pixels),
gain_map (map of relative gain factors), gain_map (map of relative gain factors),
db_module (name of DB module), db_module (name of DB module),
when (dictionaty: constant - creation time) when (dictionaty: constant - creation time)
""" """
when = {} when = {}
retrieval_function = partial( retrieval_function = partial(
get_constant_from_db_and_time, get_constant_from_db_and_time,
karabo_id=karabo_id, karabo_id=karabo_id,
karabo_da=karabo_da, karabo_da=karabo_da,
condition=condition, condition=condition,
cal_db_interface=cal_db_interface, cal_db_interface=cal_db_interface,
creation_time=creation_time, creation_time=creation_time,
timeout=cal_db_timeout, timeout=cal_db_timeout,
) )
offset_map, when["Offset"] = retrieval_function( offset_map, when["Offset"] = retrieval_function(
constant=Constants.jungfrau.Offset(), empty_constant=np.zeros((1024, 512, 1, 3)) constant=Constants.jungfrau.Offset(), empty_constant=np.zeros((1024, 512, 1, 3))
) )
mask, when["BadPixelsDark"] = retrieval_function( mask, when["BadPixelsDark"] = retrieval_function(
constant=Constants.jungfrau.BadPixelsDark(), constant=Constants.jungfrau.BadPixelsDark(),
empty_constant=np.zeros((1024, 512, 1, 3)), empty_constant=np.zeros((1024, 512, 1, 3)),
) )
mask_ff, when["BadPixelsFF"] = retrieval_function( mask_ff, when["BadPixelsFF"] = retrieval_function(
constant=Constants.jungfrau.BadPixelsFF(), constant=Constants.jungfrau.BadPixelsFF(),
empty_constant=None empty_constant=None
) )
gain_map, when["Gain"] = retrieval_function( gain_map, when["Gain"] = retrieval_function(
constant=Constants.jungfrau.RelativeGain(), constant=Constants.jungfrau.RelativeGain(),
empty_constant=None empty_constant=None
) )
# combine masks # combine masks
if mask_ff is not None: if mask_ff is not None:
mask |= np.moveaxis(mask_ff, 0, 1) mask |= np.moveaxis(mask_ff, 0, 1)
# move from x,y,cell,gain to cell,x,y,gain # move from x,y,cell,gain to cell,x,y,gain
offset_map = np.squeeze(offset_map) offset_map = np.squeeze(offset_map)
mask = np.squeeze(mask) mask = np.squeeze(mask)
if memory_cells > 1: if memory_cells > 1:
offset_map = np.moveaxis(np.moveaxis(offset_map, 0, 2), 0, 2) offset_map = np.moveaxis(np.moveaxis(offset_map, 0, 2), 0, 2)
mask = np.moveaxis(np.moveaxis(mask, 0, 2), 0, 2) mask = np.moveaxis(np.moveaxis(mask, 0, 2), 0, 2)
if gain_map is not None: if gain_map is not None:
if memory_cells > 1: if memory_cells > 1:
gain_map = np.moveaxis(np.moveaxis(gain_map, 0, 2), 0, 1) gain_map = np.moveaxis(np.moveaxis(gain_map, 0, 2), 0, 1)
else: else:
gain_map = np.squeeze(gain_map) gain_map = np.squeeze(gain_map)
gain_map = np.moveaxis(gain_map, 1, 0) gain_map = np.moveaxis(gain_map, 1, 0)
return offset_map, mask, gain_map, karabo_da, when return offset_map, mask, gain_map, karabo_da, when
with multiprocessing.Pool() as pool: with multiprocessing.Pool() as pool:
r = pool.map(get_constants_for_module, karabo_da) r = pool.map(get_constants_for_module, karabo_da)
constants = {} constants = {}
for offset_map, mask, gain_map, k_da, when in r: for offset_map, mask, gain_map, k_da, when in r:
print(f'Constants for module {k_da}:') print(f'Constants for module {k_da}:')
for const in when: for const in when:
print(f' {const} injected at {when[const]}') print(f' {const} injected at {when[const]}')
if gain_map is None: if gain_map is None:
print(" No gain map found") print(" No gain map found")
no_relative_gain = True no_relative_gain = True
constants[k_da] = (offset_map, mask, gain_map) constants[k_da] = (offset_map, mask, gain_map)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
def copy_and_sanitize_non_cal_data(infile, outfile, h5base): def copy_and_sanitize_non_cal_data(infile, outfile, h5base):
"""Copy and sanitize data from `infile` that is not calibrated.""" """Copy and sanitize data from `infile` that is not calibrated."""
h5base = h5base.lstrip("/") h5base = h5base.lstrip("/")
dont_copy = ["adc", ] dont_copy = ["adc", ]
dont_copy = [f'{h5base}/{dnc}' for dnc in dont_copy] dont_copy = [f'{h5base}/{dnc}' for dnc in dont_copy]
def visitor(k, item): def visitor(k, item):
if k not in dont_copy: if k not in dont_copy:
if isinstance(item, h5py.Group): if isinstance(item, h5py.Group):
outfile.create_group(k) outfile.create_group(k)
elif isinstance(item, h5py.Dataset): elif isinstance(item, h5py.Dataset):
group = str(k).split("/") group = str(k).split("/")
group = "/".join(group[:-1]) group = "/".join(group[:-1])
infile.copy(k, outfile[group]) infile.copy(k, outfile[group])
infile.visititems(visitor) infile.visititems(visitor)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Correct a chunk of images for offset and gain # Correct a chunk of images for offset and gain
def correct_chunk(offset_map, mask, gain_map, memory_cells, no_relative_gain, inp): def correct_chunk(offset_map, mask, gain_map, memory_cells, no_relative_gain, inp):
fim_data = None fim_data = None
gim_data = None gim_data = None
rim_data = None rim_data = None
msk_data = None msk_data = None
err = '' err = ''
try: try:
d, g, m, ind, copy_sample = inp d, g, m, ind, copy_sample = inp
g[g==3] = 2 g[g==3] = 2
if copy_sample and ind==0: if copy_sample and ind==0:
if memory_cells==1: if memory_cells==1:
rim_data = np.squeeze(copy.copy(d)) rim_data = np.squeeze(copy.copy(d))
else: else:
rim_data = np.squeeze(copy.copy(d[:,0,...])) rim_data = np.squeeze(copy.copy(d[:,0,...]))
# Select memory cells # Select memory cells
if memory_cells>1: if memory_cells>1:
m[m>16] = 0 m[m>16] = 0
offset_map_cell = offset_map[m,...] offset_map_cell = offset_map[m,...]
mask_cell = mask[m,...] mask_cell = mask[m,...]
else: else:
offset_map_cell = offset_map offset_map_cell = offset_map
mask_cell = mask mask_cell = mask
# Offset correction # Offset correction
offset = np.choose(g, (offset_map_cell[...,0], offset_map_cell[...,1], offset_map_cell[...,2])) offset = np.choose(g, (offset_map_cell[...,0], offset_map_cell[...,1], offset_map_cell[...,2]))
d -= offset d -= offset
# Gain correction # Gain correction
if not no_relative_gain: if not no_relative_gain:
if memory_cells>1: if memory_cells>1:
gain_map_cell = gain_map[m,...] gain_map_cell = gain_map[m,...]
else: else:
gain_map_cell = gain_map gain_map_cell = gain_map
cal = np.choose(g, (gain_map_cell[..., 0], gain_map_cell[..., 1], gain_map_cell[..., 2])) cal = np.choose(g, (gain_map_cell[..., 0], gain_map_cell[..., 1], gain_map_cell[..., 2]))
d /= cal d /= cal
msk = np.choose(g, (mask_cell[...,0], mask_cell[...,1], mask_cell[...,2])) msk = np.choose(g, (mask_cell[...,0], mask_cell[...,1], mask_cell[...,2]))
# Store sample of data for plotting # Store sample of data for plotting
if copy_sample and ind==0: if copy_sample and ind==0:
if memory_cells==1: if memory_cells==1:
fim_data = np.squeeze(copy.copy(d)) fim_data = np.squeeze(copy.copy(d))
gim_data = np.squeeze(copy.copy(g)) gim_data = np.squeeze(copy.copy(g))
msk_data = np.squeeze(copy.copy(msk)) msk_data = np.squeeze(copy.copy(msk))
else: else:
fim_data = np.squeeze(copy.copy(d[:,1,...])) fim_data = np.squeeze(copy.copy(d[:,1,...]))
gim_data = np.squeeze(copy.copy(g[:,1,...])) gim_data = np.squeeze(copy.copy(g[:,1,...]))
msk_data = np.squeeze(copy.copy(msk[:,1,...])) msk_data = np.squeeze(copy.copy(msk[:,1,...]))
except Exception as e: except Exception as e:
err = e err = e
return ind, d, msk, rim_data, fim_data, gim_data, msk_data, err return ind, d, msk, rim_data, fim_data, gim_data, msk_data, err
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
fim_data = {} fim_data = {}
gim_data = {} gim_data = {}
rim_data = {} rim_data = {}
msk_data = {} msk_data = {}
# For each module, chunks will be processed by pool # For each module, chunks will be processed by pool
pool = multiprocessing.Pool() pool = multiprocessing.Pool()
# Loop over modules # Loop over modules
for local_karabo_da, mapped_files_module in zip(karabo_da, mapped_files.values()): for local_karabo_da, mapped_files_module in zip(karabo_da, mapped_files.values()):
h5path_f = h5path.format(int(local_karabo_da[-2:])) h5path_f = h5path.format(int(local_karabo_da[-2:]))
# Loop over sequences for given module # Loop over sequences for given module
for sequence_file_number, sequence_file in enumerate(mapped_files_module.queue): for sequence_file_number, sequence_file in enumerate(mapped_files_module.queue):
sequence_file = Path(sequence_file) sequence_file = Path(sequence_file)
offset_map, mask, gain_map = constants[local_karabo_da] offset_map, mask, gain_map = constants[local_karabo_da]
with h5py.File(sequence_file, 'r') as infile: with h5py.File(sequence_file, 'r') as infile:
# The processed files are saved here in a folder with the run name. # The processed files are saved here in a folder with the run name.
out_filename = out_folder / sequence_file.name.replace("RAW", "CORR") out_filename = out_folder / sequence_file.name.replace("RAW", "CORR")
print(f'Process file: {sequence_file}, with path {h5path_f}') print(f'Process file: {sequence_file}, with path {h5path_f}')
try: try:
with h5py.File(out_filename, "w") as outfile: with h5py.File(out_filename, "w") as outfile:
copy_and_sanitize_non_cal_data(infile, outfile, h5path_f) copy_and_sanitize_non_cal_data(infile, outfile, h5path_f)
oshape = infile[h5path_f+"/adc"].shape oshape = infile[h5path_f+"/adc"].shape
print(f'Data shape: {oshape}') print(f'Data shape: {oshape}')
if not oshape[0]: if not oshape[0]:
raise ValueError(f"No image data: shape {oshape}") raise ValueError(f"No image data: shape {oshape}")
# Chunk always contains >= 1 complete image # Chunk always contains >= 1 complete image
chunk_shape = (chunk_size_idim, 1) + oshape[-2:] chunk_shape = (chunk_size_idim, 1) + oshape[-2:]
ddset = outfile.create_dataset(h5path_f+"/adc", ddset = outfile.create_dataset(h5path_f+"/adc",
oshape, oshape,
chunks=chunk_shape, chunks=chunk_shape,
dtype=np.float32) dtype=np.float32)
mskset = outfile.create_dataset(h5path_f+"/mask", mskset = outfile.create_dataset(h5path_f+"/mask",
oshape, oshape,
chunks=chunk_shape, chunks=chunk_shape,
dtype=np.uint32, dtype=np.uint32,
compression="gzip", compression_opts=1, shuffle=True) compression="gzip", compression_opts=1, shuffle=True)
# Parallelize over chunks of images # Parallelize over chunks of images
inp = [] inp = []
max_ind = oshape[0] max_ind = oshape[0]
ind = 0 ind = 0
# If chunk size is not given maximum 12+1 chunks is expected # If chunk size is not given maximum 12+1 chunks is expected
if chunk_size == 0: if chunk_size == 0:
chunk_size = max_ind//12 chunk_size = max_ind//12
print(f'Chunk size: {chunk_size}') print(f'Chunk size: {chunk_size}')
ts = time.time() ts = time.time()
while ind<max_ind: while ind<max_ind:
d = infile[h5path_f+"/adc"][ind:ind+chunk_size,...].astype(np.float32) d = infile[h5path_f+"/adc"][ind:ind+chunk_size,...].astype(np.float32)
g = infile[h5path_f+"/gain"][ind:ind+chunk_size,...] g = infile[h5path_f+"/gain"][ind:ind+chunk_size,...]
if h5path_f+"/memoryCell" in infile: if h5path_f+"/memoryCell" in infile:
m = infile[h5path_f+"/memoryCell"][ind:ind+chunk_size,...] m = infile[h5path_f+"/memoryCell"][ind:ind+chunk_size,...]
else: else:
m = None m = None
print(f'To process: {d.shape}') print(f'To process: {d.shape}')
inp.append((d, g, m, ind, sequence_file_number==0)) inp.append((d, g, m, ind, sequence_file_number==0))
ind += chunk_size ind += chunk_size
print('Preparation time: ', time.time() - ts) print('Preparation time: ', time.time() - ts)
ts = time.time() ts = time.time()
print(f'Run {len(inp)} processes') print(f'Run {len(inp)} processes')
p = partial(correct_chunk, offset_map, mask, gain_map, memory_cells, no_relative_gain) p = partial(correct_chunk, offset_map, mask, gain_map, memory_cells, no_relative_gain)
r = pool.map(p, inp) r = pool.map(p, inp)
if sequence_file_number == 0: if sequence_file_number == 0:
(_,_,_, (_,_,_,
rim_data[local_karabo_da], fim_data[local_karabo_da], rim_data[local_karabo_da], fim_data[local_karabo_da],
gim_data[local_karabo_da], msk_data[local_karabo_da], _) = r[0] gim_data[local_karabo_da], msk_data[local_karabo_da], _) = r[0]
print('Correction time: ', time.time() - ts) print('Correction time: ', time.time() - ts)
ts = time.time() ts = time.time()
for rr in r: for rr in r:
ind, cdata, cmask, _,_,_,_, err = rr ind, cdata, cmask, _,_,_,_, err = rr
data_size = cdata.shape[0] data_size = cdata.shape[0]
ddset[ind:ind+data_size,...] = cdata ddset[ind:ind+data_size,...] = cdata
mskset[ind:ind+data_size,...] = cmask mskset[ind:ind+data_size,...] = cmask
if err != '': if err != '':
print(f'Error: {err}') print(f'Error: {err}')
print('Saving time: ', time.time() - ts) print('Saving time: ', time.time() - ts)
except Exception as e: except Exception as e:
print(f"Error: {e}") print(f"Error: {e}")
pool.close() pool.close()
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
def do_2d_plot(data, edges, y_axis, x_axis, title): def do_2d_plot(data, edges, y_axis, x_axis, title):
fig = plt.figure(figsize=(10,10)) fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111) ax = fig.add_subplot(111)
extent = [np.min(edges[1]), np.max(edges[1]),np.min(edges[0]), np.max(edges[0])] extent = [np.min(edges[1]), np.max(edges[1]),np.min(edges[0]), np.max(edges[0])]
im = ax.imshow(data[::-1,:], extent=extent, aspect="auto", norm=LogNorm(vmin=1, vmax=np.max(data))) im = ax.imshow(data[::-1,:], extent=extent, aspect="auto", norm=LogNorm(vmin=1, vmax=np.max(data)))
ax.set_xlabel(x_axis) ax.set_xlabel(x_axis)
ax.set_ylabel(y_axis) ax.set_ylabel(y_axis)
ax.set_title(title) ax.set_title(title)
cb = fig.colorbar(im) cb = fig.colorbar(im)
cb.set_label("Counts") cb.set_label("Counts")
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
for mod in rim_data: for mod in rim_data:
h, ex, ey = np.histogram2d(rim_data[mod].flatten(), h, ex, ey = np.histogram2d(rim_data[mod].flatten(),
gim_data[mod].flatten(), gim_data[mod].flatten(),
bins=[100, 4], bins=[100, 4],
range=[[0, 10000], [0, 4]]) range=[[0, 10000], [0, 4]])
do_2d_plot(h, (ex, ey), "Signal (ADU)", "Gain Bit Value", f'Module {mod}') do_2d_plot(h, (ex, ey), "Signal (ADU)", "Gain Bit Value", f'Module {mod}')
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
### Mean RAW Preview ### ### Mean RAW Preview ###
The per pixel mean of the sequence file of RAW data The per pixel mean of the sequence file of RAW data
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
for mod in rim_data: for mod in rim_data:
fig = plt.figure(figsize=(20,10)) fig = plt.figure(figsize=(20,10))
ax = fig.add_subplot(111) ax = fig.add_subplot(111)
im = ax.imshow(np.mean(rim_data[mod],axis=0), im = ax.imshow(np.mean(rim_data[mod],axis=0),
vmin=min(0.75*np.median(rim_data[mod][rim_data[mod] > 0]), 2000), vmin=min(0.75*np.median(rim_data[mod][rim_data[mod] > 0]), 2000),
vmax=max(1.5*np.median(rim_data[mod][rim_data[mod] > 0]), 16000), cmap="jet") vmax=max(1.5*np.median(rim_data[mod][rim_data[mod] > 0]), 16000), cmap="jet")
ax.set_title(f'Module {mod}') ax.set_title(f'Module {mod}')
cb = fig.colorbar(im, ax=ax) cb = fig.colorbar(im, ax=ax)
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
### Mean CORRECTED Preview ### ### Mean CORRECTED Preview ###
The per pixel mean of the sequence file of CORR data The per pixel mean of the sequence file of CORR data
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
for mod in rim_data: for mod in rim_data:
fig = plt.figure(figsize=(20,10)) fig = plt.figure(figsize=(20,10))
ax = fig.add_subplot(111) ax = fig.add_subplot(111)
im = ax.imshow(np.mean(fim_data[mod],axis=0), im = ax.imshow(np.mean(fim_data[mod],axis=0),
vmin=min(0.75*np.median(fim_data[mod][fim_data[mod] > 0]), -0.5), vmin=min(0.75*np.median(fim_data[mod][fim_data[mod] > 0]), -0.5),
vmax=max(2.*np.median(fim_data[mod][fim_data[mod] > 0]), 100), cmap="jet") vmax=max(2.*np.median(fim_data[mod][fim_data[mod] > 0]), 100), cmap="jet")
ax.set_title(f'Module {mod}', size=18) ax.set_title(f'Module {mod}', size=18)
cb = fig.colorbar(im, ax=ax) cb = fig.colorbar(im, ax=ax)
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
### Single Train Preview ### ### Single Train Preview ###
A single image from the first train A single image from the first train
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
for mod in rim_data: for mod in rim_data:
fig = plt.figure(figsize=(20,10)) fig = plt.figure(figsize=(20,10))
ax = fig.add_subplot(111) ax = fig.add_subplot(111)
im = ax.imshow(fim_data[mod][0,...], im = ax.imshow(fim_data[mod][0,...],
vmin=min(0.75*np.median(fim_data[mod][0,...]), -0.5), vmin=min(0.75*np.median(fim_data[mod][0,...]), -0.5),
vmax=max(2.*np.median(fim_data[mod][0,...]), 100), cmap="jet") vmax=max(2.*np.median(fim_data[mod][0,...]), 100), cmap="jet")
ax.set_title(f'Module {mod}', size=18) ax.set_title(f'Module {mod}', size=18)
cb = fig.colorbar(im, ax=ax) cb = fig.colorbar(im, ax=ax)
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Signal Distribution ## ## Signal Distribution ##
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
for mod in rim_data: for mod in rim_data:
fig = plt.figure(figsize=(20,10)) fig = plt.figure(figsize=(20,10))
ax = fig.add_subplot(211) ax = fig.add_subplot(211)
h = ax.hist(fim_data[mod].flatten(), bins=1000, range=(-100, 1000), log=True) h = ax.hist(fim_data[mod].flatten(), bins=1000, range=(-100, 1000), log=True)
l = ax.set_xlabel("Signal (keV)") l = ax.set_xlabel("Signal (keV)")
l = ax.set_ylabel("Counts") l = ax.set_ylabel("Counts")
_ = ax.set_title(f'Module {mod}') _ = ax.set_title(f'Module {mod}')
ax = fig.add_subplot(212) ax = fig.add_subplot(212)
h = ax.hist(fim_data[mod].flatten(), bins=1000, range=(-1000, 10000), log=True) h = ax.hist(fim_data[mod].flatten(), bins=1000, range=(-1000, 10000), log=True)
l = ax.set_xlabel("Signal (keV)") l = ax.set_xlabel("Signal (keV)")
l = ax.set_ylabel("Counts") l = ax.set_ylabel("Counts")
_ = ax.set_title(f'Module {mod}') _ = ax.set_title(f'Module {mod}')
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
### Maximum GAIN Preview ### ### Maximum GAIN Preview ###
The per pixel maximum of the first train of the GAIN data The per pixel maximum of the first train of the GAIN data
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
for mod in rim_data: for mod in rim_data:
fig = plt.figure(figsize=(20,10)) fig = plt.figure(figsize=(20,10))
ax = fig.add_subplot(111) ax = fig.add_subplot(111)
im = ax.imshow(np.max(gim_data[mod], axis=0), vmin=0, im = ax.imshow(np.max(gim_data[mod], axis=0), vmin=0,
vmax=3, cmap="jet") vmax=3, cmap="jet")
ax.set_title(f'Module {mod}', size=18) ax.set_title(f'Module {mod}', size=18)
cb = fig.colorbar(im, ax=ax) cb = fig.colorbar(im, ax=ax)
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Bad Pixels ## ## Bad Pixels ##
The mask contains dedicated entries for all pixels and memory cells as well as all three gains stages. Each mask entry is encoded in 32 bits as: The mask contains dedicated entries for all pixels and memory cells as well as all three gains stages. Each mask entry is encoded in 32 bits as:
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
table = [] table = []
for item in BadPixels: for item in BadPixels:
table.append((item.name, f"{item.value:016b}")) table.append((item.name, f"{item.value:016b}"))
md = display(Latex(tabulate.tabulate(table, tablefmt='latex', headers=["Bad pixel type", "Bit mask"]))) md = display(Latex(tabulate.tabulate(table, tablefmt='latex', headers=["Bad pixel type", "Bit mask"])))
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
### Single Image Bad Pixels ### ### Single Image Bad Pixels ###
A single image bad pixel map for the first image of the first train A single image bad pixel map for the first image of the first train
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
for mod in rim_data: for mod in rim_data:
fig = plt.figure(figsize=(20,10)) fig = plt.figure(figsize=(20,10))
ax = fig.add_subplot(111) ax = fig.add_subplot(111)
im = ax.imshow(np.log2(msk_data[mod][0,...]), vmin=0, vmax=32, cmap="jet") im = ax.imshow(np.log2(msk_data[mod][0,...]), vmin=0, vmax=32, cmap="jet")
ax.set_title(f'Module {mod}', size=18) ax.set_title(f'Module {mod}', size=18)
cb = fig.colorbar(im, ax=ax) cb = fig.colorbar(im, ax=ax)
``` ```
......
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
# Jungfrau Dark Image Characterization # # Jungfrau Dark Image Characterization #
Version: 0.1, Author: M. Ramilli, S. Hauf Version: 0.1, Author: M. Ramilli, S. Hauf
Analyzes Jungfrau dark image data to deduce offset, noise and resulting bad pixel maps Analyzes Jungfrau dark image data to deduce offset, noise and resulting bad pixel maps
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
cluster_profile = 'noDB' # the ipcluster profile name cluster_profile = 'noDB' # the ipcluster profile name
in_folder = '/gpfs/exfel/exp/SPB/202130/p900204/raw/' # folder under which runs are located, required in_folder = '/gpfs/exfel/exp/SPB/202130/p900204/raw/' # folder under which runs are located, required
out_folder = '/gpfs/exfel/data/scratch/ahmedk/jftest_dark/' # path to place reports at, required out_folder = '/gpfs/exfel/data/scratch/ahmedk/jftest_dark/' # path to place reports at, required
run_high = 141 # run number for G0 dark run, required run_high = 141 # run number for G0 dark run, required
run_med = 142 # run number for G1 dark run, required run_med = 142 # run number for G1 dark run, required
run_low = 143 # run number for G2 dark run, required run_low = 143 # run number for G2 dark run, required
karabo_da = ['JNGFR01', 'JNGFR02','JNGFR03','JNGFR04', 'JNGFR05', 'JNGFR06','JNGFR07','JNGFR08'] # list of data aggregators, which corresponds to different JF modules karabo_da = ['JNGFR01', 'JNGFR02','JNGFR03','JNGFR04', 'JNGFR05', 'JNGFR06','JNGFR07','JNGFR08'] # list of data aggregators, which corresponds to different JF modules
karabo_id = "SPB_IRDA_JF4M" # karabo_id (detector identifier) prefix of Jungfrau detector to process. karabo_id = "SPB_IRDA_JF4M" # karabo_id (detector identifier) prefix of Jungfrau detector to process.
karabo_id_control = "" # if control is on a different ID, set to empty string if it is the same a karabo-id karabo_id_control = "" # if control is on a different ID, set to empty string if it is the same a karabo-id
receiver_id = 'JNGFR{:02}' # inset for receiver devices receiver_id = 'JNGFR{:02}' # inset for receiver devices
receiver_control_id = "CONTROL" # inset for control devices receiver_control_id = "CONTROL" # inset for control devices
path_template = 'RAW-R{:04d}-{}-S{{:05d}}.h5' # template to use for file name, double escape sequence number path_template = 'RAW-R{:04d}-{}-S{{:05d}}.h5' # template to use for file name, double escape sequence number
h5path = '/INSTRUMENT/{}/DET/{}:daqOutput/data' # path in H5 file under which images are located h5path = '/INSTRUMENT/{}/DET/{}:daqOutput/data' # path in H5 file under which images are located
h5path_run = '/RUN/{}/DET/{}' # path to run data h5path_run = '/RUN/{}/DET/{}' # path to run data
h5path_cntrl = '/CONTROL/{}/DET/{}' # path to control data h5path_cntrl = '/CONTROL/{}/DET/{}' # path to control data
karabo_da_control = "JNGFRCTRL00" # file inset for control data karabo_da_control = "JNGFRCTRL00" # file inset for control data
use_dir_creation_date = True # use dir creation date use_dir_creation_date = True # use dir creation date
cal_db_interface = 'tcp://max-exfl016:8016' # calibrate db interface to connect to cal_db_interface = 'tcp://max-exfl016:8016' # calibrate db interface to connect to
cal_db_timeout = 300000 # timeout on caldb requests cal_db_timeout = 300000 # timeout on caldb requests
local_output = True # output constants locally local_output = True # output constants locally
db_output = False # output constants to database db_output = False # output constants to database
integration_time = 1000 # integration time in us, will be overwritten by value in file integration_time = 1000 # integration time in us, will be overwritten by value in file
gain_setting = 0 # 0 for dynamic, forceswitchg1, forceswitchg2, 1 for dynamichg0, fixedgain1, fixgain2. Will be overwritten by value in file gain_setting = 0 # 0 for dynamic, forceswitchg1, forceswitchg2, 1 for dynamichg0, fixedgain1, fixgain2. Will be overwritten by value in file
bias_voltage = 90 # sensor bias voltage in V, will be overwritten by value in file bias_voltage = 90 # sensor bias voltage in V, will be overwritten by value in file
badpixel_threshold_sigma = 5. # bad pixels defined by values outside n times this std from median badpixel_threshold_sigma = 5. # bad pixels defined by values outside n times this std from median
offset_abs_threshold_low = [1000, 10000, 10000] # absolute bad pixel threshold in terms of offset, lower values offset_abs_threshold_low = [1000, 10000, 10000] # absolute bad pixel threshold in terms of offset, lower values
offset_abs_threshold_high = [8000, 15000, 15000] # absolute bad pixel threshold in terms of offset, upper values offset_abs_threshold_high = [8000, 15000, 15000] # absolute bad pixel threshold in terms of offset, upper values
chunkSize = 10 # iteration chunk size, needs to match or be less than number of images in a sequence file chunkSize = 10 # iteration chunk size, needs to match or be less than number of images in a sequence file
imageRange = [0, 500] # image range in which to evaluate imageRange = [0, 500] # image range in which to evaluate
memoryCells = 16 # number of memory cells memoryCells = 16 # number of memory cells
db_module = ['Jungfrau_M275', "Jungfrau_M035", 'Jungfrau_M273','Jungfrau_M203','Jungfrau_M221','Jungfrau_M267'] # ID of module in calibration database db_module = ['Jungfrau_M275', "Jungfrau_M035", 'Jungfrau_M273','Jungfrau_M203','Jungfrau_M221','Jungfrau_M267'] # ID of module in calibration database
manual_slow_data = False # if true, use manually entered bias_voltage and integration_time values manual_slow_data = False # if true, use manually entered bias_voltage and integration_time values
time_limits = 0.025 # to find calibration constants later on, the integration time is allowed to vary by 0.5 us time_limits = 0.025 # to find calibration constants later on, the integration time is allowed to vary by 0.5 us
operation_mode = '' # Detector operation mode, optional operation_mode = '' # Detector operation mode, optional
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
import glob import glob
import os import os
import warnings import warnings
warnings.filterwarnings('ignore') warnings.filterwarnings('ignore')
import h5py import h5py
import matplotlib import matplotlib
from h5py import File as h5file from h5py import File as h5file
matplotlib.use('agg') matplotlib.use('agg')
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
%matplotlib inline %matplotlib inline
import numpy as np import numpy as np
from cal_tools.ana_tools import save_dict_to_hdf5 from cal_tools.ana_tools import save_dict_to_hdf5
from cal_tools.enums import BadPixels from cal_tools.enums import BadPixels
from cal_tools.tools import ( from cal_tools.tools import (
get_dir_creation_date, get_dir_creation_date,
get_pdu_from_db, get_pdu_from_db,
get_random_db_interface, get_random_db_interface,
get_report, get_report,
save_const_to_h5, save_const_to_h5,
send_to_db, send_to_db,
) )
from iCalibrationDB import Conditions, Constants, Detectors, Versions from iCalibrationDB import Conditions, Constants, Detectors, Versions
from XFELDetAna.util import env from XFELDetAna.util import env
env.iprofile = cluster_profile env.iprofile = cluster_profile
from XFELDetAna.detectors.jungfrau import reader as jfreader from XFELDetAna.detectors.jungfrau import reader as jfreader
from XFELDetAna.detectors.jungfrau import readerPSI as jfreaderPSI from XFELDetAna.detectors.jungfrau import readerPSI as jfreaderPSI
from XFELDetAna.detectors.jungfrau.jf_chunk_reader import JFChunkReader from XFELDetAna.detectors.jungfrau.jf_chunk_reader import JFChunkReader
from XFELDetAna.detectors.jungfrau.util import ( from XFELDetAna.detectors.jungfrau.util import (
count_n_files, count_n_files,
rollout_data, rollout_data,
sanitize_data_cellid, sanitize_data_cellid,
) )
from XFELDetAna.plotting.heatmap import heatmapPlot from XFELDetAna.plotting.heatmap import heatmapPlot
from XFELDetAna.plotting.histogram import histPlot from XFELDetAna.plotting.histogram import histPlot
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
path_inset = karabo_da[0] # karabo_da is a concurrency parameter path_inset = karabo_da[0] # karabo_da is a concurrency parameter
receiver_id = receiver_id.format(int(path_inset[-2:])) receiver_id = receiver_id.format(int(path_inset[-2:]))
proposal = list(filter(None, in_folder.strip('/').split('/')))[-2] proposal = list(filter(None, in_folder.strip('/').split('/')))[-2]
file_loc = 'proposal:{} runs:{} {} {}'.format(proposal, run_high, run_med, run_low) file_loc = 'proposal:{} runs:{} {} {}'.format(proposal, run_high, run_med, run_low)
report = get_report(out_folder) report = get_report(out_folder)
os.makedirs(out_folder, exist_ok=True) os.makedirs(out_folder, exist_ok=True)
# TODO # TODO
# this trick is needed until proper mapping is introduced # this trick is needed until proper mapping is introduced
if len(db_module)>1: if len(db_module)>1:
# TODO: SPB JF Hack till using all modules. # TODO: SPB JF Hack till using all modules.
if karabo_id == "SPB_IRDA_JNGFR" and int(path_inset[-2:]) > 5: if karabo_id == "SPB_IRDA_JNGFR" and int(path_inset[-2:]) > 5:
db_module = db_module[int(path_inset[-2:])-3] db_module = db_module[int(path_inset[-2:])-3]
else: else:
db_module = db_module[int(path_inset[-2:])-1] db_module = db_module[int(path_inset[-2:])-1]
else: else:
db_module = db_module[0] db_module = db_module[0]
# Constants relevant for the analysis # Constants relevant for the analysis
run_nums = [run_high, run_med, run_low] # run number for G0/HG0, G1, G2 run_nums = [run_high, run_med, run_low] # run number for G0/HG0, G1, G2
sensorSize = [1024, 512] sensorSize = [1024, 512]
blockSize = [1024, 512] blockSize = [1024, 512]
xRange = [0, 0+sensorSize[0]] xRange = [0, 0+sensorSize[0]]
yRange = [0, 0+sensorSize[1]] yRange = [0, 0+sensorSize[1]]
gains = [0, 1, 2] gains = [0, 1, 2]
h5path = h5path.format(karabo_id, receiver_id) h5path = h5path.format(karabo_id, receiver_id)
creation_time = None creation_time = None
if use_dir_creation_date: if use_dir_creation_date:
creation_time = get_dir_creation_date(in_folder, run_high) creation_time = get_dir_creation_date(in_folder, run_high)
print("Using {} as creation time".format(creation_time)) print("Using {} as creation time".format(creation_time))
cal_db_interface = get_random_db_interface(cal_db_interface) cal_db_interface = get_random_db_interface(cal_db_interface)
print('Calibration database interface: {}'.format(cal_db_interface)) print('Calibration database interface: {}'.format(cal_db_interface))
offset_abs_threshold = [offset_abs_threshold_low, offset_abs_threshold_high] offset_abs_threshold = [offset_abs_threshold_low, offset_abs_threshold_high]
if karabo_id_control == "": if karabo_id_control == "":
karabo_id_control = karabo_id karabo_id_control = karabo_id
print('Path inset ', path_inset) print('Path inset ', path_inset)
print('Receiver Id ', receiver_id) print('Receiver Id ', receiver_id)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
def check_memoryCells(file_name, path): def check_memoryCells(file_name, path):
with h5file(file_name, 'r') as f: with h5file(file_name, 'r') as f:
t_stamp = np.array(f[path + '/storageCells/timestamp']) t_stamp = np.array(f[path + '/storageCells/timestamp'])
st_cells = np.array(f[path + '/storageCells/value']) st_cells = np.array(f[path + '/storageCells/value'])
sc_start = np.array(f[path + '/storageCellStart/value']) sc_start = np.array(f[path + '/storageCellStart/value'])
valid_train = t_stamp > 0 valid_train = t_stamp > 0
n_scs = st_cells[valid_train][0] + 1 n_scs = st_cells[valid_train][0] + 1
sc_s = sc_start[valid_train][0] sc_s = sc_start[valid_train][0]
return n_scs, sc_s return n_scs, sc_s
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
chunkSize = 100 chunkSize = 100
filep_size = 1000 filep_size = 1000
noiseCal = None noiseCal = None
noise_map = None noise_map = None
offset_map = None offset_map = None
memoryCells = None memoryCells = None
for i, r_n in enumerate(run_nums): for i, r_n in enumerate(run_nums):
gain = i gain = i
print(f"Gain stage {gain}, run {r_n}") print(f"Gain stage {gain}, run {r_n}")
valid_data = [] valid_data = []
valid_cellids = [] valid_cellids = []
if r_n is not None: if r_n is not None:
n_tr = 0 n_tr = 0
n_empty_trains = 0 n_empty_trains = 0
n_empty_sc = 0 n_empty_sc = 0
ped_dir = "{}/r{:04d}/".format(in_folder, r_n) ped_dir = "{}/r{:04d}/".format(in_folder, r_n)
fp_name = path_template.format(r_n, karabo_da_control) fp_name = path_template.format(r_n, karabo_da_control)
fp_path = '{}/{}'.format(ped_dir, fp_name) fp_path = '{}/{}'.format(ped_dir, fp_name)
files_pattern = "{}/*{}*.h5".format(ped_dir, path_inset) files_pattern = "{}/*{}*.h5".format(ped_dir, path_inset)
n_files = len(glob.glob(files_pattern)) n_files = len(glob.glob(files_pattern))
if n_files == 0: if n_files == 0:
raise Exception(f"No files found matching {files_pattern!r}") raise Exception(f"No files found matching {files_pattern!r}")
myRange = range(0, n_files) myRange = range(0, n_files)
control_path = h5path_cntrl.format(karabo_id_control, receiver_control_id) control_path = h5path_cntrl.format(karabo_id_control, receiver_control_id)
this_run_mcells, sc_start = check_memoryCells(fp_path.format(0).format(myRange[0]), control_path) this_run_mcells, sc_start = check_memoryCells(fp_path.format(0).format(myRange[0]), control_path)
if noise_map is None: if noise_map is None:
if not manual_slow_data: if not manual_slow_data:
with h5py.File(fp_path.format(0), 'r') as f: with h5py.File(fp_path.format(0), 'r') as f:
run_path = h5path_run.format(karabo_id_control, receiver_control_id) run_path = h5path_run.format(karabo_id_control, receiver_control_id)
integration_time = float(f[f'{run_path}/exposureTime/value'][()]*1e6) integration_time = float(f[f'{run_path}/exposureTime/value'][()]*1e6)
bias_voltage = int(np.squeeze(f[f'{run_path}/vHighVoltage/value'])[0]) bias_voltage = int(np.squeeze(f[f'{run_path}/vHighVoltage/value'])[0])
if r_n == run_high: if r_n == run_high:
gain_s = f[f'/RUN/{karabo_id_control}/DET/CONTROL/settings/value'][0].decode() try:
gain_s = f[f'/RUN/{karabo_id_control}/DET/CONTROL/settings/value'][0].decode()
except KeyError:
print(
"ERROR: gain_setting is not available for h5 ctrl path "
f"/RUN/{karabo_id_control}/DET/CONTROL/settings/value,\nfor file: {fp_path}. \n"
"WARNING: Setting gain_setting to 0, assuming that this is an old run.\n")
gain_s = "KeyError"
gain_setting = 1 if gain_s == "dynamichg0" else 0 gain_setting = 1 if gain_s == "dynamichg0" else 0
print(f"Constants Gain setting is {gain_setting} ({gain_s})") print(f"Constants Gain setting is {gain_setting} ({gain_s})")
print("Integration time is {} us".format(integration_time)) print("Integration time is {} us".format(integration_time))
print("Bias voltage is {} V".format(bias_voltage)) print("Bias voltage is {} V".format(bias_voltage))
if this_run_mcells == 1: if this_run_mcells == 1:
memoryCells = 1 memoryCells = 1
print('Dark runs in single cell mode\n storage cell start: {:02d}'.format(sc_start)) print('Dark runs in single cell mode\n storage cell start: {:02d}'.format(sc_start))
else: else:
memoryCells = 16 memoryCells = 16
print('Dark runs in burst mode\n storage cell start: {:02d}'.format(sc_start)) print('Dark runs in burst mode\n storage cell start: {:02d}'.format(sc_start))
noise_map = np.zeros(sensorSize+[memoryCells, 3]) noise_map = np.zeros(sensorSize+[memoryCells, 3])
offset_map = np.zeros(sensorSize+[memoryCells, 3]) offset_map = np.zeros(sensorSize+[memoryCells, 3])
fp_name = path_template.format(r_n, path_inset) fp_name = path_template.format(r_n, path_inset)
fp_path = '{}/{}'.format(ped_dir, fp_name) fp_path = '{}/{}'.format(ped_dir, fp_name)
path = h5path path = h5path
print("Reading data from {}".format(fp_path)) print("Reading data from {}".format(fp_path))
print("Run is: {}".format(r_n)) print("Run is: {}".format(r_n))
print("HDF5 path: {}".format(h5path)) print("HDF5 path: {}".format(h5path))
imageRange = [0, filep_size*len(myRange)] imageRange = [0, filep_size*len(myRange)]
reader = JFChunkReader(filename = fp_path, readFun = jfreader.readData, size = filep_size, chunkSize = chunkSize, reader = JFChunkReader(filename = fp_path, readFun = jfreader.readData, size = filep_size, chunkSize = chunkSize,
path = h5path, image_range=imageRange, pixels_x = sensorSize[0], pixels_y = sensorSize[1], path = h5path, image_range=imageRange, pixels_x = sensorSize[0], pixels_y = sensorSize[1],
x_range = xRange, y_range = yRange, imagesPerChunk=chunkSize, filesRange = myRange, x_range = xRange, y_range = yRange, imagesPerChunk=chunkSize, filesRange = myRange,
memoryCells=this_run_mcells, blockSize=blockSize) memoryCells=this_run_mcells, blockSize=blockSize)
for data in reader.readChunks(): for data in reader.readChunks():
images = np.array(data[0], dtype=np.float) images = np.array(data[0], dtype=np.float)
gainmaps = np.array(data[1], dtype=np.uint16) gainmaps = np.array(data[1], dtype=np.uint16)
trainId = np.array(data[2]) trainId = np.array(data[2])
fr_num = np.array(data[3]) fr_num = np.array(data[3])
acelltable = np.array(data[4]) acelltable = np.array(data[4])
n_tr += acelltable.shape[-1] n_tr += acelltable.shape[-1]
this_tr = acelltable.shape[-1] this_tr = acelltable.shape[-1]
idxs = np.nonzero(trainId)[0] idxs = np.nonzero(trainId)[0]
images = images[..., idxs] images = images[..., idxs]
gainmaps = gainmaps[..., idxs] gainmaps = gainmaps[..., idxs]
fr_num = fr_num[..., idxs] fr_num = fr_num[..., idxs]
acelltable = acelltable[..., idxs] acelltable = acelltable[..., idxs]
if memoryCells == 1: if memoryCells == 1:
acelltable -= sc_start acelltable -= sc_start
n_empty_trains += this_tr - acelltable.shape[-1] n_empty_trains += this_tr - acelltable.shape[-1]
n_empty_sc += len(acelltable[acelltable > 15]) n_empty_sc += len(acelltable[acelltable > 15])
if i > 0 and memoryCells == 16: ## throwing away all the SC entries except the first for lower gains if i > 0 and memoryCells == 16: ## throwing away all the SC entries except the first for lower gains
acelltable[1:] = 255 acelltable[1:] = 255
# makes 4-dim vecs into 3-dim # makes 4-dim vecs into 3-dim
# makes 2-dim into 1-dim # makes 2-dim into 1-dim
# leaves 1-dim and 3-dim vecs # leaves 1-dim and 3-dim vecs
images, gainmaps, acelltable = rollout_data([images, gainmaps, acelltable]) images, gainmaps, acelltable = rollout_data([images, gainmaps, acelltable])
images, gainmaps, acelltable = sanitize_data_cellid([images, gainmaps], acelltable) # removes entries with cellID 255 images, gainmaps, acelltable = sanitize_data_cellid([images, gainmaps], acelltable) # removes entries with cellID 255
valid_data.append(images) valid_data.append(images)
valid_cellids.append(acelltable) valid_cellids.append(acelltable)
valid_data = np.concatenate(valid_data, axis=2) valid_data = np.concatenate(valid_data, axis=2)
valid_cellids = np.concatenate(valid_cellids, axis=0) valid_cellids = np.concatenate(valid_cellids, axis=0)
for cell in range(memoryCells): for cell in range(memoryCells):
thiscell = valid_data[...,valid_cellids == cell] thiscell = valid_data[...,valid_cellids == cell]
noise_map[...,cell,gain] = np.std(thiscell, axis=2) noise_map[...,cell,gain] = np.std(thiscell, axis=2)
offset_map[...,cell,gain] = np.mean(thiscell, axis=2) offset_map[...,cell,gain] = np.mean(thiscell, axis=2)
print('G{:01d} dark calibration'.format(i)) print('G{:01d} dark calibration'.format(i))
print('Missed {:d} out of {:d} trains'.format(n_empty_trains, n_tr)) print('Missed {:d} out of {:d} trains'.format(n_empty_trains, n_tr))
print('Lost {:d} images out of {:d}'.format(n_empty_sc, this_run_mcells * (n_tr - n_empty_trains))) print('Lost {:d} images out of {:d}'.format(n_empty_sc, this_run_mcells * (n_tr - n_empty_trains)))
else: else:
print('missing G{:01d}'.format(i)) print('missing G{:01d}'.format(i))
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Offset and Noise Maps ## ## Offset and Noise Maps ##
Below offset and noise maps for the high ($g_0$) gain stage are shown, alongside the distribution of these values. One expects block-like structures mapping to the ASICs of the detector Below offset and noise maps for the high ($g_0$) gain stage are shown, alongside the distribution of these values. One expects block-like structures mapping to the ASICs of the detector
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
from XFELDetAna.core.util import remove_nans from XFELDetAna.core.util import remove_nans
%matplotlib inline %matplotlib inline
#%matplotlib notebook #%matplotlib notebook
from XFELDetAna.plotting.heatmap import heatmapPlot from XFELDetAna.plotting.heatmap import heatmapPlot
from XFELDetAna.plotting.histogram import histPlot from XFELDetAna.plotting.histogram import histPlot
g_name = ['G0', 'G1', 'G2'] g_name = ['G0', 'G1', 'G2']
g_range = [(0, 8000), (8000, 16000), (8000, 16000)] g_range = [(0, 8000), (8000, 16000), (8000, 16000)]
n_range = [(0., 50.), (0., 50.), (0., 50.)] n_range = [(0., 50.), (0., 50.), (0., 50.)]
unit = '[ADCu]' unit = '[ADCu]'
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
for g_idx in gains: for g_idx in gains:
for cell in range(0, memoryCells): for cell in range(0, memoryCells):
f_o0 = heatmapPlot(np.swapaxes(offset_map[..., cell, g_idx], 0, 1), f_o0 = heatmapPlot(np.swapaxes(offset_map[..., cell, g_idx], 0, 1),
y_label="Row", y_label="Row",
x_label="Column", x_label="Column",
lut_label=unit, lut_label=unit,
aspect=1., aspect=1.,
vmin=g_range[g_idx][0], vmin=g_range[g_idx][0],
vmax=g_range[g_idx][1], vmax=g_range[g_idx][1],
title=f'Pedestal {g_name[g_idx]} - Cell {cell:02d}') title=f'Pedestal {g_name[g_idx]} - Cell {cell:02d}')
fo0, ax_o0 = plt.subplots() fo0, ax_o0 = plt.subplots()
res_o0 = histPlot(ax_o0, offset_map[..., cell, g_idx], res_o0 = histPlot(ax_o0, offset_map[..., cell, g_idx],
bins=800, bins=800,
range=g_range[g_idx], range=g_range[g_idx],
facecolor='b', facecolor='b',
histotype='stepfilled') histotype='stepfilled')
ax_o0.tick_params(axis='both',which='major',labelsize=15) ax_o0.tick_params(axis='both',which='major',labelsize=15)
ax_o0.set_title(f'Module pedestal distribution - Cell {cell:02d}', fontsize=15) ax_o0.set_title(f'Module pedestal distribution - Cell {cell:02d}', fontsize=15)
ax_o0.set_xlabel(f'Pedestal {g_name[g_idx]} {unit}',fontsize=15) ax_o0.set_xlabel(f'Pedestal {g_name[g_idx]} {unit}',fontsize=15)
ax_o0.set_yscale('log') ax_o0.set_yscale('log')
f_n0 = heatmapPlot(np.swapaxes(noise_map[..., cell, g_idx], 0, 1), f_n0 = heatmapPlot(np.swapaxes(noise_map[..., cell, g_idx], 0, 1),
y_label="Row", y_label="Row",
x_label="Column", x_label="Column",
lut_label= unit, lut_label= unit,
aspect=1., aspect=1.,
vmin=n_range[g_idx][0], vmin=n_range[g_idx][0],
vmax=n_range[g_idx][1], vmax=n_range[g_idx][1],
title=f"RMS noise {g_name[g_idx]} - Cell {cell:02d}") title=f"RMS noise {g_name[g_idx]} - Cell {cell:02d}")
fn0, ax_n0 = plt.subplots() fn0, ax_n0 = plt.subplots()
res_n0 = histPlot(ax_n0, noise_map[..., cell, g_idx], res_n0 = histPlot(ax_n0, noise_map[..., cell, g_idx],
bins=100, bins=100,
range=n_range[g_idx], range=n_range[g_idx],
facecolor='b', facecolor='b',
histotype='stepfilled') histotype='stepfilled')
ax_n0.tick_params(axis='both',which='major',labelsize=15) ax_n0.tick_params(axis='both',which='major',labelsize=15)
ax_n0.set_title(f'Module noise distribution - Cell {cell:02d}', fontsize=15) ax_n0.set_title(f'Module noise distribution - Cell {cell:02d}', fontsize=15)
ax_n0.set_xlabel(f'RMS noise {g_name[g_idx]} ' + unit, fontsize=15) ax_n0.set_xlabel(f'RMS noise {g_name[g_idx]} ' + unit, fontsize=15)
#ax_n0.set_yscale('log') #ax_n0.set_yscale('log')
plt.show() plt.show()
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Bad Pixel Map ### ## Bad Pixel Map ###
The bad pixel map is deduced by comparing offset and noise of each pixel ($v_i$) and each gain ($g$) against the median value for that gain stage: The bad pixel map is deduced by comparing offset and noise of each pixel ($v_i$) and each gain ($g$) against the median value for that gain stage:
$$ $$
v_i > \mathrm{median}(v_{k,g}) + n \sigma_{v_{k,g}} v_i > \mathrm{median}(v_{k,g}) + n \sigma_{v_{k,g}}
$$ $$
or or
$$ $$
v_i < \mathrm{median}(v_{k,g}) - n \sigma_{v_{k,g}} v_i < \mathrm{median}(v_{k,g}) - n \sigma_{v_{k,g}}
$$ $$
Values are encoded in a 32 bit mask, where for the dark image deduced bad pixels the following non-zero entries are relevant: Values are encoded in a 32 bit mask, where for the dark image deduced bad pixels the following non-zero entries are relevant:
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
def print_bp_entry(bp): def print_bp_entry(bp):
print("{:<30s} {:032b}".format(bp.name, bp.value)) print("{:<30s} {:032b}".format(bp.name, bp.value))
print_bp_entry(BadPixels.OFFSET_OUT_OF_THRESHOLD) print_bp_entry(BadPixels.OFFSET_OUT_OF_THRESHOLD)
print_bp_entry(BadPixels.NOISE_OUT_OF_THRESHOLD) print_bp_entry(BadPixels.NOISE_OUT_OF_THRESHOLD)
print_bp_entry(BadPixels.OFFSET_NOISE_EVAL_ERROR) print_bp_entry(BadPixels.OFFSET_NOISE_EVAL_ERROR)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
bad_pixels_map = np.zeros(noise_map.shape, np.uint32) bad_pixels_map = np.zeros(noise_map.shape, np.uint32)
def eval_bpidx(d): def eval_bpidx(d):
mdn = np.nanmedian(d, axis=(0, 1))[None, None, :, :] mdn = np.nanmedian(d, axis=(0, 1))[None, None, :, :]
std = np.nanstd(d, axis=(0, 1))[None, None, :, :] std = np.nanstd(d, axis=(0, 1))[None, None, :, :]
idx = (d > badpixel_threshold_sigma*std+mdn) | (d < (-badpixel_threshold_sigma)*std+mdn) idx = (d > badpixel_threshold_sigma*std+mdn) | (d < (-badpixel_threshold_sigma)*std+mdn)
return idx return idx
offset_abs_threshold = np.array(offset_abs_threshold) offset_abs_threshold = np.array(offset_abs_threshold)
bad_pixels_map[eval_bpidx(offset_map)] = BadPixels.OFFSET_OUT_OF_THRESHOLD.value bad_pixels_map[eval_bpidx(offset_map)] = BadPixels.OFFSET_OUT_OF_THRESHOLD.value
bad_pixels_map[~np.isfinite(offset_map)] |= BadPixels.OFFSET_NOISE_EVAL_ERROR.value bad_pixels_map[~np.isfinite(offset_map)] |= BadPixels.OFFSET_NOISE_EVAL_ERROR.value
bad_pixels_map[eval_bpidx(noise_map)] |= BadPixels.NOISE_OUT_OF_THRESHOLD.value bad_pixels_map[eval_bpidx(noise_map)] |= BadPixels.NOISE_OUT_OF_THRESHOLD.value
bad_pixels_map[~np.isfinite(noise_map)] |= BadPixels.OFFSET_NOISE_EVAL_ERROR.value bad_pixels_map[~np.isfinite(noise_map)] |= BadPixels.OFFSET_NOISE_EVAL_ERROR.value
bad_pixels_map[(offset_map < offset_abs_threshold[0][None, None, None, :]) | (offset_map > offset_abs_threshold[1][None, None, None, :])] |= BadPixels.OFFSET_OUT_OF_THRESHOLD.value bad_pixels_map[(offset_map < offset_abs_threshold[0][None, None, None, :]) | (offset_map > offset_abs_threshold[1][None, None, None, :])] |= BadPixels.OFFSET_OUT_OF_THRESHOLD.value
for g_idx in gains: for g_idx in gains:
for cell in range(memoryCells): for cell in range(memoryCells):
bad_pixels = bad_pixels_map[:, :, cell, g_idx] bad_pixels = bad_pixels_map[:, :, cell, g_idx]
fn_0 = heatmapPlot(np.swapaxes(bad_pixels, 0, 1), fn_0 = heatmapPlot(np.swapaxes(bad_pixels, 0, 1),
y_label="Row", y_label="Row",
x_label="Column", x_label="Column",
lut_label=f"Badpixels {g_name[g_idx]} [ADCu]", lut_label=f"Badpixels {g_name[g_idx]} [ADCu]",
aspect=1., aspect=1.,
vmin=0, title=f'G{g_idx} Bad pixel map - Cell {cell:02d}') vmin=0, title=f'G{g_idx} Bad pixel map - Cell {cell:02d}')
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# TODO: this cell in the notebook is not designed to run for more than one module # TODO: this cell in the notebook is not designed to run for more than one module
# Constants need to be able to have constant for each module as for big detectors # Constants need to be able to have constant for each module as for big detectors
constants = {'Offset': np.moveaxis(offset_map, 0, 1), constants = {'Offset': np.moveaxis(offset_map, 0, 1),
'Noise': np.moveaxis(noise_map, 0, 1), 'Noise': np.moveaxis(noise_map, 0, 1),
'BadPixelsDark': np.moveaxis(bad_pixels_map, 0, 1)} 'BadPixelsDark': np.moveaxis(bad_pixels_map, 0, 1)}
md = None md = None
for key, const_data in constants.items(): for key, const_data in constants.items():
const = getattr(Constants.jungfrau, key)() const = getattr(Constants.jungfrau, key)()
const.data = const_data const.data = const_data
# set the operating condition # set the operating condition
condition = Conditions.Dark.jungfrau(memory_cells=memoryCells, bias_voltage=bias_voltage, condition = Conditions.Dark.jungfrau(memory_cells=memoryCells, bias_voltage=bias_voltage,
integration_time=integration_time, integration_time=integration_time,
gain_setting=gain_setting) gain_setting=gain_setting)
for parm in condition.parameters: for parm in condition.parameters:
if parm.name == "Integration Time": if parm.name == "Integration Time":
parm.lower_deviation = time_limits parm.lower_deviation = time_limits
parm.upper_deviation = time_limits parm.upper_deviation = time_limits
# This should be used in case of running notebook # This should be used in case of running notebook
# by a different method other than myMDC which already # by a different method other than myMDC which already
# sends CalCat info. # sends CalCat info.
# TODO: Set db_module to "" by default in the first cell # TODO: Set db_module to "" by default in the first cell
if not db_module: if not db_module:
db_module = get_pdu_from_db(karabo_id, karabo_da, const, db_module = get_pdu_from_db(karabo_id, karabo_da, const,
condition, cal_db_interface, condition, cal_db_interface,
snapshot_at=creation_time)[0] snapshot_at=creation_time)[0]
if db_output: if db_output:
md = send_to_db(db_module, karabo_id, const, condition, md = send_to_db(db_module, karabo_id, const, condition,
file_loc=file_loc, report_path=report, file_loc=file_loc, report_path=report,
cal_db_interface=cal_db_interface, cal_db_interface=cal_db_interface,
creation_time=creation_time, creation_time=creation_time,
timeout=cal_db_timeout) timeout=cal_db_timeout)
if local_output: if local_output:
md = save_const_to_h5(db_module, karabo_id, const, condition, md = save_const_to_h5(db_module, karabo_id, const, condition,
const.data, file_loc, report, const.data, file_loc, report,
creation_time, out_folder) creation_time, out_folder)
print(f"Calibration constant {key} is stored locally at {out_folder}.\n") print(f"Calibration constant {key} is stored locally at {out_folder}.\n")
print("Constants parameter conditions are:\n") print("Constants parameter conditions are:\n")
print(f"• Bias voltage: {bias_voltage}\n• Memory cells: {memoryCells}\n" print(f"• Bias voltage: {bias_voltage}\n• Memory cells: {memoryCells}\n"
f"• Integration time: {integration_time}\n" f"• Integration time: {integration_time}\n"
f"• Gain setting: {gain_setting}\n" f"• Gain setting: {gain_setting}\n"
f"• Creation time: {md.calibration_constant_version.begin_at if md is not None else creation_time}\n") f"• Creation time: {md.calibration_constant_version.begin_at if md is not None else creation_time}\n")
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
``` ```
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment