Skip to content
Snippets Groups Projects

[Jungfrau][Dark] Set max trains to process dark constants from

Merged Karim Ahmed requested to merge fix/jungfrau_long_dark_run into master
badpixel_threshold_sigma = 5. # bad pixels defined by values outside n times this std from median
badpixel_threshold_sigma = 5. # bad pixels defined by values outside n times this std from median
offset_abs_threshold_low = [1000, 10000, 10000] # absolute bad pixel threshold in terms of offset, lower values
offset_abs_threshold_low = [1000, 10000, 10000] # absolute bad pixel threshold in terms of offset, lower values
offset_abs_threshold_high = [8000, 15000, 15000] # absolute bad pixel threshold in terms of offset, upper values
offset_abs_threshold_high = [8000, 15000, 15000] # absolute bad pixel threshold in terms of offset, upper values
max_trains = 0 # Maximum trains to process darks. Set to 0 to process all available train images.
max_trains = 1000 # Maximum trains to process darks. Set to 0 to process all available train images. 1000 trains is enough resolution to create the dark constants
min_trains = 1 # Minimum number of trains that should be available to process dark constants. Default 1.
min_trains = 100 # Minimum number of trains to process dark constants. Raise a warning if the run has fewer trains.
manual_slow_data = False # if true, use manually entered bias_voltage and integration_time values
manual_slow_data = False # if true, use manually entered bias_voltage and integration_time values
time_limits = 0.025 # to find calibration constants later on, the integration time is allowed to vary by 0.5 us
time_limits = 0.025 # to find calibration constants later on, the integration time is allowed to vary by 0.5 us
``` python
``` python
import glob
import os
import os
import warnings
import warnings
from pathlib import Path
from logging import warning
warnings.filterwarnings('ignore')
warnings.filterwarnings('ignore')
import matplotlib
import matplotlib
from XFELDetAna.plotting.heatmap import heatmapPlot
from XFELDetAna.plotting.heatmap import heatmapPlot
from XFELDetAna.plotting.histogram import histPlot
from XFELDetAna.plotting.histogram import histPlot
from cal_tools import jungfraulib, step_timing
from cal_tools import jungfraulib, step_timing
from cal_tools.ana_tools import save_dict_to_hdf5
from cal_tools.enums import BadPixels, JungfrauGainMode
from cal_tools.enums import BadPixels, JungfrauGainMode
from cal_tools.tools import (
from cal_tools.tools import (
get_dir_creation_date,
get_dir_creation_date,
# A transperent workaround for old raw data with wrong/missing medium and low settings
# A transperent workaround for old raw data with wrong/missing medium and low settings
if med_low_settings == [None, None]:
if med_low_settings == [None, None]:
print("WARNING: run.settings is not stored in the data to read. "
warning("run.settings is not stored in the data to read. "
f"Hence assuming gain_mode = {gain_mode} for adaptive old data.")
f"Hence assuming gain_mode = {gain_mode} for adaptive old data.")
elif med_low_settings == ["dynamicgain", "forceswitchg1"]:
elif med_low_settings == ["dynamicgain", "forceswitchg1"]:
print(f"WARNING: run.settings for medium and low gain runs are wrong {med_low_settings}. "
warning(f"run.settings for medium and low gain runs are wrong {med_low_settings}. "
f"This is an expected bug for old raw data. Setting gain_mode to {gain_mode}.")
f"This is an expected bug for old raw data. Setting gain_mode to {gain_mode}.")
# Validate that low_med_settings is not a mix of adaptive and fixed settings.
# Validate that low_med_settings is not a mix of adaptive and fixed settings.
elif not (sorted(med_low_settings) in [fixed_settings, dynamic_settings, old_fixed_settings]): # noqa
elif not (sorted(med_low_settings) in [fixed_settings, dynamic_settings, old_fixed_settings]): # noqa
raise ValueError(
raise ValueError(
``` python
``` python
context = psh.context.ThreadContext(num_workers=multiprocessing.cpu_count())
context = psh.context.ThreadContext(num_workers=memory_cells)
%% Cell type:code id: tags:
%% Cell type:code id: tags:
print(f"\n- Instrument data path for {mod} is {instrument_src}.")
print(f"\n- Instrument data path for {mod} is {instrument_src}.")
offset_map[mod] = context.alloc(shape=(sensor_size+(memory_cells, 3)), fill=0)
offset_map[mod] = context.alloc(
 
shape=(sensor_size+(memory_cells, 3)), fill=0, dtype=np.float32)
noise_map[mod] = context.alloc(like=offset_map[mod], fill=0)
noise_map[mod] = context.alloc(like=offset_map[mod], fill=0)
bad_pixels_map[mod] = context.alloc(like=offset_map[mod], dtype=np.uint32, fill=0)
bad_pixels_map[mod] = context.alloc(like=offset_map[mod], dtype=np.uint32, fill=0)
def process_cell(worker_id, array_index, cell_number):
def process_cell(worker_id, array_index, cell_number):
cell_slice_idx = acelltable == cell_number
cell_slice_idx = acelltable == cell_number
thiscell = images[..., cell_slice_idx]
thiscell = images[..., cell_slice_idx] # [1024, 512, n_trains]
# Identify cells/trains with images of 0 pixels.
# Identify cells/trains with images of 0 pixels.
# TODO: An investigation is ongoing by DET to identify reason for these empty images.
# TODO: An investigation is ongoing by DET to identify reason for these empty images.
nonzero_adc = np.any(thiscell != 0 , axis=(0, 1))
nonzero_adc = np.any(thiscell != 0 , axis=(0, 1)) # [n_trains]
# Exclude empty images with 0 pixels, before calculating offset and noise
# Exclude empty images with 0 pixels, before calculating offset and noise
thiscell = thiscell[..., nonzero_adc]
thiscell = thiscell[..., nonzero_adc]
offset_map[mod][..., cell_number, gain] = np.mean(thiscell, axis=2)
offset_map[mod][..., cell_number, gain] = np.mean( # [1024, 512]
noise_map[mod][..., cell_number, gain] = np.std(thiscell, axis=2)
thiscell, axis=2, dtype=np.float32)
noise_map[mod][..., cell_number, gain] = np.std( # [1024, 512]
 
thiscell, axis=2, dtype=np.float32)
 
del thiscell
# Check if there are wrong bad gain values.
# Check if there are wrong bad gain values.
# 1. Exclude empty images.
# 1. Exclude empty images.
# 2. Indicate pixels with wrong gain value for any train for each cell.
# 2. Indicate pixels with wrong gain value for any train for each cell.
# TODO: mean is used to use thresholds for accepting gain values, even if not 0 mean value.
# TODO: mean is used to use thresholds for accepting gain values, even if not 0 mean value.
gain_avg = np.mean(
gain_avg = np.mean( # [1024, 512]
gain_vals[..., cell_slice_idx][..., nonzero_adc], axis=2)
gain_vals[..., cell_slice_idx][..., nonzero_adc],
 
axis=2, dtype=np.float32
 
)
 
# [1024, 512]
bad_pixels_map[mod][..., cell_number, gain][gain_avg != raw_g] |= BadPixels.WRONG_GAIN_VALUE.value
bad_pixels_map[mod][..., cell_number, gain][gain_avg != raw_g] |= BadPixels.WRONG_GAIN_VALUE.value
 
print(f"Gain stage {gain}, run {run_n}")
print(f"Gain stage {gain}, run {run_n}")
# load shape of data for memory cells, and detector size (imgs, cells, x, y)
# load shape of data for memory cells, and detector size (imgs, cells, x, y)
n_imgs = run_dc[instrument_src, "data.adc"].shape[0]
n_trains = run_dc[instrument_src, "data.adc"].shape[0]
# load number of data available, including trains with empty data.
# load number of data available, including trains with empty data.
n_trains = len(run_dc.train_ids)
all_trains = len(run_dc.train_ids)
instr_dc = run_dc.select(instrument_src, require_all=True)
instr_dc = run_dc.select(instrument_src, require_all=True)
empty_trains = n_trains - n_imgs
empty_trains = all_trains - n_trains
if empty_trains != 0:
if empty_trains != 0:
print(f"\tWARNING: {mod} has {empty_trains} trains with empty data out of {n_trains} trains") # noqa
print(f"{mod} has {empty_trains} empty trains out of {all_trains} trains")
if max_trains > 0:
if max_trains > 0:
n_imgs = min(n_imgs, max_trains)
n_trains = min(n_trains, max_trains)
print(f"Processing {n_imgs} images.")
print(f"Processing {n_trains} images.")
# Select only requested number of images to process darks.
instr_dc = instr_dc.select_trains(np.s_[:n_imgs])
if n_imgs < min_trains:
if n_trains == 0:
raise ValueError(
raise ValueError(f"{run_n} has no trains to process.")
f"Less than {min_trains} trains are available in RAW data."
" Not enough data to process darks.")
 
if n_trains < min_trains:
 
warning(f"Less than {min_trains} trains are available in RAW data.")
 
 
# Select only requested number of images to process darks.
 
instr_dc = instr_dc.select_trains(np.s_[:n_trains])
images = np.transpose(
images = np.transpose(
instr_dc[instrument_src, "data.adc"].ndarray(), (3, 2, 1, 0))
instr_dc[instrument_src, "data.adc"].ndarray(), (3, 2, 1, 0))
acelltable = np.transpose(instr_dc[instrument_src, "data.memoryCell"].ndarray())
acelltable = np.transpose(instr_dc[instrument_src, "data.memoryCell"].ndarray())
# Calculate offset and noise maps
# Calculate offset and noise maps
context.map(process_cell, range(memory_cells))
context.map(process_cell, range(memory_cells))
del images
 
del acelltable
 
del gain_vals
step_timer.done_step(f'Creating Offset and noise constants for a module.')
step_timer.done_step(f'Creating Offset and noise constants for a module.')
Loading