Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • calibration/pycalibration
1 result
Show changes
Commits on Source (4)
%% Cell type:markdown id: tags:
# AGIPD Retrieving Constants Pre-correction #
Author: European XFEL Detector Group, Version: 1.0
Retrieving Required Constants for Offline Calibration of the AGIPD Detector
%% Cell type:code id: tags:
``` python
in_folder = "/gpfs/exfel/exp/SPB/202030/p900119/raw" # the folder to read data from, required
out_folder = "/gpfs/exfel/data/scratch/ahmedk/test/AGIPD_" # the folder to output to, required
sequences = [-1] # sequences to correct, set to -1 for all, range allowed
modules = [-1] # modules to correct, set to -1 for all, range allowed
run = 80 # runs to process, required
karabo_id = "SPB_DET_AGIPD1M-1" # karabo karabo_id
karabo_da = ['-1'] # a list of data aggregators names, Default [-1] for selecting all data aggregators
path_template = 'RAW-R{:04d}-{}-S{:05d}.h5' # the template to use to access data
h5path_ctrl = '/CONTROL/{}/MDL/FPGA_COMP_TEST' # path to control information
ctrl_source_template = '{}/MDL/FPGA_COMP_TEST' # path to control information
instrument_source_template = '{}/DET/{}:xtdf' # path in the HDF5 file to images
receiver_template = "{}CH0" # inset for receiver devices
karabo_id_control = "SPB_IRU_AGIPD1M1" # karabo-id for control device
karabo_da_control = 'AGIPD1MCTRL00' # karabo DA for control infromation
use_dir_creation_date = True # use the creation data of the input dir for database queries
cal_db_interface = "tcp://max-exfl016:8015#8045" # the database interface to use
creation_date_offset = "00:00:00" # add an offset to creation date, e.g. to get different constants
slopes_ff_from_files = "" # Path to locally stored SlopesFF and BadPixelsFF constants
calfile = "" # path to calibration file. Leave empty if all data should come from DB
nodb = False # if set only file-based constants will be used
mem_cells = 0 # number of memory cells used, set to 0 to automatically infer
bias_voltage = 300
bias_voltage = 0 # bias voltage, set to 0 to use stored value in slow data.
acq_rate = 0. # the detector acquisition rate, use 0 to try to auto-determine
gain_setting = 0.1 # the gain setting, use 0.1 to try to auto-determine
gain_setting = -1 # the gain setting, use -1 to use value stored in slow data.
gain_mode = -1 # gain mode (0: adaptive, 1-3 fixed high/med/low, -1: read from CONTROL data)
photon_energy = 9.2 # photon energy in keV
max_cells_db_dark = 0 # set to a value different than 0 to use this value for dark data DB queries
max_cells_db = 0 # set to a value different than 0 to use this value for DB queries
integration_time = -1 # integration time, negative values for auto-detection.
# Correction Booleans
only_offset = False # Apply only Offset correction. if False, Offset is applied by Default. if True, Offset is only applied.
rel_gain = False # do relative gain correction based on PC data
xray_gain = True # do relative gain correction based on xray data
blc_noise = False # if set, baseline correction via noise peak location is attempted
blc_stripes = False # if set, baseline corrected via stripes
blc_hmatch = False # if set, base line correction via histogram matching is attempted
match_asics = False # if set, inner ASIC borders are matched to the same signal level
adjust_mg_baseline = False # adjust medium gain baseline to match highest high gain value
```
%% Cell type:code id: tags:
``` python
# Fill dictionaries comprising bools and arguments for correction and data analysis
# Here the hierarichy and dependencies for correction booleans are defined
corr_bools = {}
# offset is at the bottom of AGIPD correction pyramid.
corr_bools["only_offset"] = only_offset
# Dont apply any corrections if only_offset is requested
if not only_offset:
corr_bools["adjust_mg_baseline"] = adjust_mg_baseline
corr_bools["rel_gain"] = rel_gain
corr_bools["xray_corr"] = xray_gain
corr_bools["blc_noise"] = blc_noise
corr_bools["blc_hmatch"] = blc_hmatch
```
%% Cell type:code id: tags:
``` python
from pathlib import Path
from typing import List, Tuple
import matplotlib
import matplotlib.pyplot as plt
import multiprocessing
import numpy as np
from datetime import timedelta
from dateutil import parser
from extra_data import RunDirectory
matplotlib.use("agg")
import multiprocessing
from datetime import timedelta
from pathlib import Path
import matplotlib.pyplot as plt
from cal_tools import agipdlib, tools
from cal_tools.enums import AgipdGainMode
from dateutil import parser
from iCalibrationDB import Conditions, Constants, Detectors
```
%% Cell type:code id: tags:
``` python
# slopes_ff_from_files left as str for now
in_folder = Path(in_folder)
out_folder = Path(out_folder)
metadata = tools.CalibrationMetadata(out_folder)
```
%% Cell type:code id: tags:
``` python
max_cells = mem_cells
creation_time = None
if use_dir_creation_date:
creation_time = tools.get_dir_creation_date(str(in_folder), run)
offset = parser.parse(creation_date_offset)
delta = timedelta(hours=offset.hour, minutes=offset.minute, seconds=offset.second)
creation_time += delta
print(f"Using {creation_time} as creation time")
if sequences[0] == -1:
sequences = None
print(f"Outputting to {out_folder}")
out_folder.mkdir(parents=True, exist_ok=True)
melt_snow = False if corr_bools["only_offset"] else agipdlib.SnowResolution.NONE
```
%% Cell type:code id: tags:
``` python
control_fn = in_folder / f'r{run:04d}' / f'RAW-R{run:04d}-{karabo_da_control}-S00000.h5'
h5path_ctrl = h5path_ctrl.format(karabo_id_control)
slow_paths = (control_fn, karabo_id_control)
if gain_setting == 0.1:
if creation_time.replace(tzinfo=None) < parser.parse('2020-01-31'):
print("Set gain-setting to None for runs taken before 2020-01-31")
gain_setting = None
else:
try:
gain_setting = agipdlib.get_gain_setting(str(control_fn), h5path_ctrl)
except Exception as e:
print(f'ERROR: while reading gain setting from: \n{control_fn}')
print(e)
print("Set gain setting to 0")
gain_setting = 0
# Evaluate gain mode (operation mode)
if gain_mode < 0:
gain_mode = agipdlib.get_gain_mode(control_fn, h5path_ctrl)
else:
gain_mode = AgipdGainMode(gain_mode)
# Evaluate integration time
if integration_time < 0:
integration_time = agipdlib.get_integration_time(control_fn, h5path_ctrl)
ctrl_src = ctrl_source_template.format(karabo_id_control)
print(f"Gain setting: {gain_setting}")
print(f"Gain mode: {gain_mode.name}")
print(f"Detector in use is {karabo_id}")
# Extracting Instrument string
instrument = karabo_id.split("_")[0]
# Evaluate detector instance for mapping
if instrument == "SPB":
dinstance = "AGIPD1M1"
nmods = 16
elif instrument == "MID":
dinstance = "AGIPD1M2"
nmods = 16
elif instrument == "HED":
dinstance = "AGIPD500K"
nmods = 8
print(f"Instrument {instrument}")
print(f"Detector instance {dinstance}")
if karabo_da[0] == '-1':
if modules[0] == -1:
modules = list(range(nmods))
karabo_da = ["AGIPD{:02d}".format(i) for i in modules]
else:
modules = [int(x[-2:]) for x in karabo_da]
```
%% Cell type:code id: tags:
``` python
run_dc = RunDirectory(in_folder / f"r{run:04d}")
# set everything up filewise
mapped_files, _, _, _, _ = tools.map_modules_from_folder(
str(in_folder), run, path_template, karabo_da, sequences=[0]
)
```
%% Cell type:code id: tags:
``` python
# Read AGIPD conditions from the 1st sequence of 1st module and slow data.
instrument_src = instrument_source_template.format(karabo_id, receiver_template)
instrument_src_mod = instrument_src.format(0)
agipd_cond = agipdlib.AgipdCtrl(
run_dc=run_dc,
image_src=None, # Not need, as we wont read mem_cells or acq_rate.
ctrl_src=ctrl_src,
)
if gain_setting == -1:
gain_setting = agipd_cond.get_gain_setting(creation_time)
if bias_voltage == 0.:
bias_voltage = agipd_cond.get_bias_voltage(karabo_id_control)
if integration_time == -1:
integration_time = agipd_cond.get_integration_time()
if gain_mode == -1:
gain_mode = agipd_cond.get_gain_mode()
else:
gain_mode = AgipdGainMode(gain_mode)
```
%% Cell type:markdown id: tags:
## Retrieve Constants ##
%% Cell type:code id: tags:
``` python
def retrieve_constants(
qm_files: List[Path], qm: str, karabo_da: str, idx: int
karabo_da: str, idx: int
) -> Tuple[str, str, float, float, str, dict]:
"""
Retrieve constants for a module.
:return:
qm: module virtual name i.e. Q1M1.
karabo_da: karabo data aggregator.
acq_rate: acquisition rate parameter.
max_cells: number of memory cells.
mem_cells: number of memory cells.
mdata_dict: (DICT) dictionary with the metadata for the retrieved constants.
"""
if max_cells != 0:
# either use overriding notebook parameter
local_max_cells = max_cells
if mem_cells == 0:
# either or look around in sequence files
agipd_cond.image_src = instrument_src.format(idx)
local_mem_cells = agipd_cond.get_num_cells()
else:
# or look around in sequence files
for f in qm_files:
local_max_cells = agipdlib.get_num_cells(f, karabo_id, idx)
if local_max_cells is not None:
break
# or use overriding notebook parameter
local_mem_cells = mem_cells
# maybe we never found this in a sequence file...
if local_max_cells is None:
raise ValueError(f"No raw images found for {qm} for all sequences")
if local_mem_cells is None:
raise ValueError(
"No raw images found for "
f"{tools.module_index_to_qm(module_index)}({karabo_da}) for all sequences")
if acq_rate == 0:
local_acq_rate = agipdlib.get_acq_rate(
fast_paths=(f, karabo_id, idx), slow_paths=slow_paths)
if acq_rate == 0.:
local_acq_rate = agipd_cond.get_acq_rate()
else:
local_acq_rate = acq_rate
# avoid retrieving constant, if requested.
if nodb_with_dark:
return
const_dict = agipdlib.assemble_constant_dict(
corr_bools,
pc_bools,
local_max_cells,
local_mem_cells,
bias_voltage,
gain_setting,
local_acq_rate,
photon_energy,
gain_mode=gain_mode,
beam_energy=None,
only_dark=only_dark,
integration_time=integration_time
)
# Retrieve multiple constants through an input dictionary
# to return a dict of useful metadata.
mdata_dict = dict()
mdata_dict["constants"] = dict()
mdata_dict["physical-detector-unit"] = None # initialization
for const_name, (const_init_fun, const_shape, (cond_type, cond_param)) in const_dict.items():
for const_name, (const_init_fun, const_shape, (cond_type, cond_param)) in const_dict.items(): # noqa
if gain_mode and const_name in ("ThresholdsDark",):
continue
# saving metadata in a dict
const_mdata = dict()
mdata_dict["constants"][const_name] = const_mdata
if slopes_ff_from_files and const_name in ["SlopesFF", "BadPixelsFF"]:
const_mdata["file-path"] = f"{slopes_ff_from_files}/slopesff_bpmask_module_{qm}.h5"
const_mdata["file-path"] = (
f"{slopes_ff_from_files}/slopesff_bpmask_module_{tools.module_index_to_qm(module_index)}.h5") # noqa
const_mdata["creation-time"] = "00:00:00"
continue
if gain_mode and const_name in ("BadPixelsPC", "SlopesPC", "BadPixelsFF", "SlopesFF"):
if gain_mode and const_name in (
"BadPixelsPC", "SlopesPC", "BadPixelsFF", "SlopesFF"
):
param_copy = cond_param.copy()
del param_copy["gain_mode"]
condition = getattr(Conditions, cond_type).AGIPD(**param_copy)
else:
condition = getattr(Conditions, cond_type).AGIPD(**cond_param)
_, mdata = tools.get_from_db(
karabo_id,
karabo_da,
getattr(Constants.AGIPD, const_name)(),
condition,
getattr(np, const_init_fun)(const_shape),
cal_db_interface,
creation_time,
meta_only=True,
verbosity=0,
)
mdata_const = mdata.calibration_constant_version
# check if constant was sucessfully retrieved.
if mdata.comm_db_success:
const_mdata["file-path"] = (
f"{mdata_const.hdf5path}" f"{mdata_const.filename}"
)
const_mdata["creation-time"] = f"{mdata_const.begin_at}"
mdata_dict["physical-detector-unit"] = mdata_const.device_name
else:
const_mdata["file-path"] = const_dict[const_name][:2]
const_mdata["creation-time"] = None
return qm, mdata_dict, karabo_da, local_acq_rate, local_max_cells
return mdata_dict, karabo_da, local_acq_rate, local_mem_cells
```
%% Cell type:code id: tags:
``` python
# Constant paths & timestamps are saved under retrieved-constants in calibration_metadata.yml
retrieved_constants = metadata.setdefault("retrieved-constants", {})
```
%% Cell type:code id: tags:
``` python
# set everything up filewise
mapped_files, _, _, _, _ = tools.map_modules_from_folder(
str(in_folder), run, path_template, karabo_da, sequences
)
pc_bools = [corr_bools.get("rel_gain"),
corr_bools.get("adjust_mg_baseline"),
corr_bools.get('blc_noise'),
corr_bools.get('blc_hmatch'),
corr_bools.get('blc_stripes'),
melt_snow]
inp = []
only_dark = False
nodb_with_dark = False
if not nodb:
only_dark = (calfile != "")
if calfile != "" and not corr_bools["only_offset"]:
nodb_with_dark = nodb
da_to_qm = dict()
for module_index, k_da in zip(modules, karabo_da):
qm = tools.module_index_to_qm(module_index)
da_to_qm[k_da] = qm
da_to_qm[k_da] = tools.module_index_to_qm(module_index)
if k_da in retrieved_constants:
print(f"Constant for {k_da} already in calibration_metadata.yml, won't query again.")
print(
f"Constant for {k_da} already in calibration_metadata.yml, won't query again.")
continue
if qm in mapped_files and not mapped_files[qm].empty():
# TODO: make map_modules_from_folder just return list(s)
qm_files = [Path(mapped_files[qm].get()) for _ in range(mapped_files[qm].qsize())]
else:
continue
inp.append((qm_files, qm, k_da, module_index))
inp.append((k_da, module_index))
```
%% Cell type:code id: tags:
``` python
with multiprocessing.Pool(processes=nmods) as pool:
results = pool.starmap(retrieve_constants, inp)
```
%% Cell type:code id: tags:
``` python
for qm, md_dict, karabo_da, acq_rate, max_cells in results:
acq_rate_mods = []
mem_cells_mods = []
for md_dict, karabo_da, acq_rate, mem_cells in results:
retrieved_constants[karabo_da] = md_dict
mem_cells_mods.append(mem_cells)
acq_rate_mods.append(acq_rate)
# Validate that mem_cells and acq_rate are the same for all modules.
# TODO: Should a warning be enough?
if len(set(mem_cells_mods)) != 1 or len(set(acq_rate_mods)) != 1:
print(
"WARNING: Number of memory cells or "
"acquisition rate are not identical for all modules.\n"
f"mem_cells: {mem_cells_mods}.\nacq_rate: {acq_rate_mods}.")
# check if it is requested not to retrieve any constants from the database
if nodb_with_dark:
print("No constants were retrieved as calibrated files will be used.")
else:
print("\nRetrieved constants for modules:",
', '.join([tools.module_index_to_qm(x) for x in modules]))
print(f"Operating conditions are:")
print(f"• Bias voltage: {bias_voltage}")
print(f"• Memory cells: {max_cells}")
print(f"• Memory cells: {mem_cells}")
print(f"• Acquisition rate: {acq_rate}")
print(f"• Gain mode: {gain_mode.name}")
print(f"• Gain setting: {gain_setting}")
print(f"• Integration time: {integration_time}")
print(f"• Photon Energy: {photon_energy}")
print("Constant metadata is saved under \"retrieved-constants\" in calibration_metadata.yml\n")
```
%% Cell type:code id: tags:
``` python
print("Using constants with creation times:")
timestamps = {}
for k_da, module_name in da_to_qm.items():
module_timestamps = timestamps[module_name] = {}
module_constants = retrieved_constants[k_da]
print(f"{module_name}:")
for cname, mdata in module_constants["constants"].items():
if hasattr(mdata["creation-time"], 'strftime'):
mdata["creation-time"] = mdata["creation-time"].strftime('%y-%m-%d %H:%M')
print(f'{cname:.<12s}', mdata["creation-time"])
for cname in ['Offset', 'SlopesPC', 'SlopesFF']:
if cname in module_constants["constants"]:
module_timestamps[cname] = module_constants["constants"][cname]["creation-time"]
else:
module_timestamps[cname] = "NA"
time_summary = retrieved_constants.setdefault("time-summary", {})
time_summary["SAll"] = timestamps
metadata.save()
```
......
%% Cell type:markdown id: tags:
# Gain Characterization Summary #
%% Cell type:code id: tags:
``` python
in_folder = "" # in this notebook, in_folder is not used as the data source is in the destination folder
out_folder = "" # the folder to output to, required
hist_file_template = "hists_m{:02d}_sum.h5"
proc_folder = "" # Path to corrected image data used to create histograms and validation plots
raw_folder = "/gpfs/exfel/exp/MID/202030/p900137/raw" # folder of raw data. This is used to save information of source data of generated constants, required
run = 449 # runs of image data used to create histograms
karabo_id = "MID_DET_AGIPD1M-1" # karabo karabo_id
receiver_id = "{}CH0" # inset for receiver devices
path_template = 'RAW-R{:04d}-{}-S{:05d}.h5' # the template to use to access data
h5path = 'INSTRUMENT/{}/DET/{}:xtdf/' # path in the HDF5 file to images
h5path_idx = 'INDEX/{}/DET/{}:xtdf/' # path in the HDF5 file to images
h5path_ctrl = '/CONTROL/{}/MDL/FPGA_COMP' # path to control information
karabo_id_control = "MID_IRU_AGIPD1M1" # karabo-id for control device
karabo_da_control = 'AGIPD1MCTRL00' # karabo DA for control infromation
ctrl_source_template = '{}/MDL/FPGA_COMP' # path to control information
karabo_id_control = "MID_EXP_AGIPD1M1" # karabo-id for control device
use_dir_creation_date = True # use the creation data of the input dir for database queries
cal_db_interface = "tcp://max-exfl016:8015#8045" # the database interface to use
cal_db_timeout = 30000 # in milli seconds
local_output = True # output constants locally
db_output = False # output constants to database
# Fit parameters
peak_range = [-30,30,35,65,80,130,145,200] # where to look for the peaks, [a0, b0, a1, b1, ...] exactly 8 elements
peak_width_range = [0, 30, 0, 35, 0, 40, 0, 45] # fit limits on the peak widths, [a0, b0, a1, b1, ...] exactly 8 elements
# Bad-pixel thresholds
d0_lim = [10, 70] # hard limits for d0 value (distance between noise and first peak)
peak_width_lim = [0.97, 1.43, 1.03, 1.57] # hard limits on the peak widths, [a0, b0, a1, b1, ...] in units of the noise peak. 4 parameters.
chi2_lim = [0,3.0] # Hard limit on chi2/nDOF value
gain_lim = [0.80, 1.2] # Threshold on gain in relative number. Contribute to BadPixel bit "Gain_deviation"
cell_range = [1,5] # range of cell to be considered, [0,0] for all
pixel_range = [0,0,512,128] # range of pixels x1,y1,x2,y2 to consider [0,0,512,128] for all
max_bins = 250 # Maximum number of bins to consider
batch_size = [1,8,8] # batch size: [cell,x,y]
n_peaks_fit = 4 # Number of gaussian peaks to fit including noise peak
fix_peaks = True # Fix distance between photon peaks
# Detector conditions
max_cells = 0 # number of memory cells used, set to 0 to automatically infer
bias_voltage = 300 # Bias voltage
bias_voltage = 0. # Bias voltage
acq_rate = 0. # the detector acquisition rate, use 0 to try to auto-determine
gain_setting = 0.1 # the gain setting, use 0.1 to try to auto-determine
gain_setting = -1 # the gain setting, use 0.1 to try to auto-determine
photon_energy = 8.05 # photon energy in keV
integration_time = -1 # integration time, negative values for auto-detection.
```
%% Cell type:code id: tags:
``` python
import glob
import os
import re
import traceback
import warnings
from multiprocessing import Pool
import h5py
import matplotlib.pyplot as plt
import numpy as np
import tabulate
from cal_tools.agipdlib import (
get_acq_rate,
get_bias_voltage,
get_gain_setting,
get_integration_time,
get_num_cells,
)
from cal_tools.agipdlib import AgipdCtrl
from cal_tools.agipdutils_ff import (
BadPixelsFF,
any_in,
fit_n_peaks,
gaussian_sum,
get_starting_parameters,
)
from cal_tools.ana_tools import get_range, save_dict_to_hdf5
from cal_tools.enums import BadPixels
from cal_tools.tools import (
get_dir_creation_date,
get_pdu_from_db,
get_report,
module_index_to_qm,
send_to_db
)
from dateutil import parser
from extra_data import RunDirectory, stack_detector_data
from extra_data import H5File, RunDirectory, stack_detector_data
from extra_geom import AGIPD_1MGeometry, AGIPD_500K2GGeometry
from iCalibrationDB import Conditions, Constants, Detectors
from iminuit import Minuit
from IPython.display import HTML, Latex, Markdown, display
from XFELDetAna.plotting.heatmap import heatmapPlot
from XFELDetAna.plotting.simpleplot import simplePlot
%matplotlib inline
warnings.filterwarnings('ignore')
```
%% Cell type:code id: tags:
``` python
peak_range = np.reshape(peak_range,(4,2))
```
%% Cell type:code id: tags:
``` python
# Get operation conditions
filename = glob.glob(f"{raw_folder}/r{run:04d}/*-AGIPD[0-1][0-9]-*")[0]
channel = int(re.findall(r".*-AGIPD([0-9]+)-.*", filename)[0])
control_fname = f'{raw_folder}/r{run:04d}/RAW-R{run:04d}-{karabo_da_control}-S00000.h5'
h5path_ctrl = h5path_ctrl.format(karabo_id_control)
# Evaluate number of memory cells
mem_cells = get_num_cells(filename, karabo_id, channel)
if mem_cells is None:
raise ValueError(f"No raw images found in {filename}")
ctrl_source = ctrl_source_template.format(karabo_id_control)
# Evaluate aquisition rate
fast_paths = (filename, karabo_id, channel)
slow_paths = (control_fname, karabo_id_control)
if acq_rate == 0.:
acq_rate = get_acq_rate(fast_paths,slow_paths)
raw_dc = RunDirectory(f'{raw_folder}/r{run:04d}/')
# Read operating conditions from AGIPD00 files
instrument_src_mod = [
s for s in list(raw_dc.all_sources) if "0CH" in s][0]
ctrl_src = [
s for s in list(raw_dc.all_sources) if ctrl_source in s][0]
# Evaluate creation time
creation_time = None
if use_dir_creation_date:
creation_time = get_dir_creation_date(raw_folder, run)
# Evaluate gain setting
if gain_setting == 0.1:
if creation_time.replace(tzinfo=None) < parser.parse('2020-01-31'):
print("Set gain-setting to None for runs taken before 2020-01-31")
gain_setting = None
else:
try:
gain_setting = get_gain_setting(control_fname, h5path_ctrl)
except Exception as e:
print(f'Error while reading gain setting from: \n{control_fname}')
print(e)
print("Set gain settion to 0")
gain_setting = 0
# Evaluate integration time
if integration_time < 0:
integration_time = get_integration_time(control_fname, h5path_ctrl)
agipd_cond = AgipdCtrl(
run_dc=raw_dc,
image_src=instrument_src_mod,
ctrl_src=ctrl_src,
raise_error=False, # to be able to process very old data without mosetting value
)
mem_cells = agipd_cond.get_num_cells()
if mem_cells is None:
raise ValueError(f"No raw images found in {raw_dc[instrument_src_mod].files}")
if acq_rate == 0.:
acq_rate = agipd_cond.get_acq_rate()
if gain_setting == -1:
gain_setting = agipd_cond.get_gain_setting(creation_time)
if bias_voltage == 0.:
bias_voltage = agipd_cond.get_bias_voltage(karabo_id_control)
if integration_time == -1:
integration_time = agipd_cond.get_integration_time()
# Evaluate detector instance for mapping
instrument = karabo_id.split("_")[0]
if instrument == "HED":
nmods = 8
else:
nmods = 16
print(f"Using {creation_time} as creation time")
print(f"Operating conditions are:\n• Bias voltage: {bias_voltage}\n• Memory cells: {mem_cells}\n"
f"• Acquisition rate: {acq_rate}\n• Gain setting: {gain_setting}\n• Integration time: {integration_time}\n"
f"• Photon Energy: {photon_energy}\n")
```
%% Cell type:code id: tags:
``` python
# Load constants for all modules
keys = ['g0mean', 'g1mean', 'gain', 'chi2_ndof', 'mask']
all_keys = set(keys)
for i in range(n_peaks_fit) :
all_keys.add(f'g{i}mean')
all_keys.add(f'g{i}sigma')
fit_data = {}
labels = {'g0mean': 'Noise peak position [ADU]',
'g1mean': 'First photon peak [ADU]',
'gain': f"Gain [ADU/keV], $\gamma$={photon_energy} [keV]",
'chi2_ndof': '$\chi^2$/nDOF',
'mask': 'Fraction of bad pixels over cells' }
modules = []
karabo_da = []
for mod in range(nmods):
qm = module_index_to_qm(mod)
fit_data[mod] = {}
try:
hf = h5py.File(f'{out_folder}/fits_m{mod:02d}.h5', 'r')
shape = hf['data/g0mean'].shape
for key in keys:
fit_data[mod][key] = hf[f'data/{key}'][()]
print(f"{in_folder}/{hist_file_template.format(mod)}")
modules.append(mod)
karabo_da.append(f"AGIPD{mod:02d}")
except Exception as e:
err = f"Error: {e}\nError traceback: {traceback.format_exc()}"
print(f"No fit data available for module {qm}")
```
%% Cell type:code id: tags:
``` python
# Calculate SlopesFF and BadPixels to be send to DB
bpmask = {}
slopesFF = {}
for mod in modules:
bpmask[mod] = np.zeros(fit_data[mod]['mask'].shape).astype(np.int32)
bpmask[mod][ any_in(fit_data[mod]['mask'], BadPixelsFF.NO_ENTRY.value) ] = BadPixels.FF_NO_ENTRIES.value
bpmask[mod][ any_in(fit_data[mod]['mask'],
BadPixelsFF.GAIN_DEVIATION.value) ] |= BadPixels.FF_GAIN_DEVIATION.value
bpmask[mod][ any_in(fit_data[mod]['mask'],
BadPixelsFF.FIT_FAILED.value | BadPixelsFF.ACCURATE_COVAR.value |
BadPixelsFF.CHI2_THRESHOLD.value | BadPixelsFF.GAIN_THRESHOLD.value |
BadPixelsFF.NOISE_PEAK_THRESHOLD.value | BadPixelsFF.PEAK_WIDTH_THRESHOLD.value) ] |= BadPixels.FF_GAIN_EVAL_ERROR.value
# Set value for bad pixel to average across pixels for a given module
slopesFF[mod] = np.copy(fit_data[mod]['gain'])
slopesFF[mod][fit_data[mod]['mask']>0] = np.nan
gain_mean = np.nanmean(slopesFF[mod], axis=(1,2))
for i in range(slopesFF[mod].shape[0]):
slopesFF[mod][i][ fit_data[mod]['mask'][i] > 0 ] = gain_mean[i]
```
%% Cell type:code id: tags:
``` python
# Read report path and create file location tuple to add with the injection
proposal = list(filter(None, raw_folder.strip('/').split('/')))[-2]
file_loc = f'Proposal: {proposal}, Run: {run}'
report = get_report(out_folder)
```
%% Cell type:code id: tags:
``` python
# set the operating condition
condition = Conditions.Illuminated.AGIPD(mem_cells, bias_voltage, 9.2,
pixels_x=512, pixels_y=128, beam_energy=None,
acquisition_rate=acq_rate, gain_setting=gain_setting,
integration_time=integration_time)
# Modify acceptable deviations for integration time condition if and only if
# the integration time is not using the standard value (12).
if integration_time != 12:
for p in condition.parameters:
if p.name == 'Integration Time':
p.lower_deviation = 5
p.upper_deviation = 5
# Retrieve a list of all modules corresponding to processed karabo_das
db_modules = get_pdu_from_db(karabo_id, karabo_da, Constants.AGIPD.SlopesFF(),
condition, cal_db_interface,
snapshot_at=creation_time)
```
%% Cell type:code id: tags:
``` python
# Send constants to DB
def send_const(mod, pdu):
try:
# gain
constant = Constants.AGIPD.SlopesFF()
constant.data = np.moveaxis(np.moveaxis(slopesFF[mod], 0, 2), 0, 1)
send_to_db(
pdu, karabo_id, constant, condition, file_loc,
report, cal_db_interface, creation_time,
timeout=cal_db_timeout,
)
# bad pixels
constant_bp = Constants.AGIPD.BadPixelsFF()
constant_bp.data = np.moveaxis(np.moveaxis(bpmask[mod], 0, 2), 0, 1)
send_to_db(
pdu, karabo_id, constant_bp, condition, file_loc,
report, cal_db_interface, creation_time,
timeout=cal_db_timeout,
)
except Exception as e:
err = f"Error: {e}\nError traceback: {traceback.format_exc()}"
when = None
# Check, if we have a shape we expect
if db_output:
if slopesFF[modules[0]].shape == (mem_cells, 512, 128):
with Pool(processes=len(modules)) as pool:
const_out = pool.starmap(send_const, zip(modules, db_modules))
else:
print(f"Constants are not sent to the DB because of the shape mismatsh")
print(f"Expected {(mem_cells, 512, 128)}, observed {slopesFF[modules[0]].shape}")
condition_dict ={}
for entry in condition.to_dict()['parameters']:
key = entry.pop('parameter_name')
del entry['description']
del entry['flg_available']
condition_dict[key] = entry
# Create the same file structure as database constants files, in which
# each constant type has its corresponding condition and data.
if local_output:
for mod, pdu in zip(modules, db_modules):
qm = module_index_to_qm(mod)
file = f"{out_folder}/slopesff_bpmask_module_{qm}.h5"
dic = {
pdu:{
'SlopesFF': {
0:{
'condition': condition_dict,
'data': np.moveaxis(np.moveaxis(slopesFF[mod],0,2),0,1)}
},
'BadPixelsFF':{
0:{
'condition': condition_dict,
'data': np.moveaxis(np.moveaxis(bpmask[mod],0,2),0,1)}
},
}
}
save_dict_to_hdf5(dic, file)
```
%% Cell type:code id: tags:
``` python
#Define AGIPD geometry
#To do: find the better way to do it?
if instrument == "HED":
geom = AGIPD_500K2GGeometry.from_origin()
else:
geom = AGIPD_1MGeometry.from_quad_positions(quad_pos=[
(-525, 625),
(-550, -10),
(520, -160),
(542.5, 475),
])
```
%% Cell type:code id: tags:
``` python
# Create the arrays that will be used for figures.
# A dictionary contains all the data for each of the processing stages (gains, mean, slopesFF...).
# Each array correponds to the data for all processed modules.
# These are updated with their fit/slopes data in the following loops.
if cell_range==[0,0]:
cell_range[1] = shape[0]
const_data = {}
for key in keys:
const_data[key] = np.full((nmods, shape[0],512,128), np.nan)
for i in range(nmods):
if key in fit_data[i]:
const_data[key][i,:,pixel_range[0]:pixel_range[2],
pixel_range[1]:pixel_range[3]] = fit_data[i][key]
const_data['slopesFF'] = np.full((nmods, shape[0],512,128), np.nan)
labels['slopesFF'] = f'slopesFF [ADU/keV], $\gamma$={photon_energy} [keV]'
for i in range(nmods):
if i in slopesFF:
const_data['slopesFF'][i,:,pixel_range[0]:pixel_range[2],
pixel_range[1]:pixel_range[3]] = slopesFF[i]
```
%% Cell type:markdown id: tags:
## Summary across pixels ##
%% Cell type:code id: tags:
``` python
for key in const_data.keys():
fig = plt.figure(figsize=(20,20))
ax = fig.add_subplot(111)
if key=='mask':
data = np.nanmean(const_data[key]>0, axis=1)
vmin, vmax = (0,1)
else:
data = np.nanmean(const_data[key], axis=1)
vmin, vmax = get_range(data, 5)
ax = geom.plot_data_fast(data, ax=ax, cmap="jet", vmin=vmin, vmax=vmax, figsize=(20,20))
_ = ax.set_title(labels[key])
```
%% Cell type:markdown id: tags:
## Summary histograms ##
%% Cell type:code id: tags:
``` python
sel = (const_data['mask'] == 0)
module_mean = np.nanmean(const_data[f"gain"],axis=(1,2,3))
module_mean = module_mean[:,np.newaxis,np.newaxis,np.newaxis]
dsets = {'d01 [ADU]':const_data[f"g1mean"]-const_data[f"g0mean"],
'gain [ADU/keV]':const_data[f"gain"],
'gain relative to module mean':const_data[f"gain"]/module_mean,
}
fig = plt.figure(figsize=(16,5))
for i, (par, data) in enumerate(dsets.items()):
ax = fig.add_subplot(1, 3, i+1)
plt_range= np.nanmin(data), np.nanmax(data)
if 'd01' in par :
ax.axvline(d0_lim[0])
ax.axvline(d0_lim[1])
elif 'rel' in par :
ax.axvline(gain_lim[0])
ax.axvline(gain_lim[1])
num_bins = 100
_ = ax.hist(data.flatten(),
bins= num_bins,range=plt_range,
log=True,color='red',
label='all fits',)
a = ax.hist(data[sel].flatten(),
bins=num_bins, range=plt_range,
log=True,color='g',
label='good fits only',
)
ax.set_xlabel(f"{par}")
ax.legend()
```
%% Cell type:markdown id: tags:
## Summary across cells ##
Good pixels only.
%% Cell type:code id: tags:
``` python
for key in const_data.keys():
data = np.copy(const_data[key])
if key=='mask':
data = data>0
else:
data[const_data['mask']>0] = np.nan
d = []
for i in range(nmods):
d.append({'x': np.arange(data[i].shape[0]),
'y': np.nanmean(data[i], axis=(1,2)),
'drawstyle': 'steps-pre',
'label': f'{i}',
'linewidth': 2,
'linestyle': '--' if i>7 else '-'
})
fig = plt.figure(figsize=(15, 6))
ax = fig.add_subplot(111)
_ = simplePlot(d, xrange=(-12, 510),
x_label='Memory Cell ID',
y_label=labels[key],
use_axis=ax,
legend='top-left-frame-ncol8',)
ylim = ax.get_ylim()
ax.set_ylim(ylim[0], ylim[1] + np.abs(ylim[1]-ylim[0])*0.2)
ax.grid()
```
%% Cell type:markdown id: tags:
## Summary table ##
%% Cell type:code id: tags:
``` python
table = []
for i in modules:
table.append((i,
f"{np.nanmean(slopesFF[i]):0.1f} +- {np.nanstd(slopesFF[i]):0.2f}",
f"{np.nanmean(bpmask[i]>0)*100:0.1f} ({np.nansum(bpmask[i]>0)})"
))
all_SFF = np.array([list(sff) for sff in slopesFF.values()])
all_MSK = np.array([list(msk) for msk in bpmask.values()])
table.append(('overall',
f"{np.nanmean(all_SFF):0.1f} +- {np.nanstd(all_SFF):0.2f}",
f"{np.nanmean(all_MSK>0)*100:0.1f} ({np.nansum(all_MSK>0)})"
))
md = display(Latex(tabulate.tabulate(table, tablefmt='latex',
headers=["Module", "Gain [ADU/keV]", "Bad pixels [%(Count)]"])))
```
%% Cell type:markdown id: tags:
## Performance plots
%% Cell type:code id: tags:
``` python
def get_trains_data(run_folder, source, include, tid=None):
"""
Load single train for all module
:param run_folder: Path to folder with data
:param source: Data source to be loaded
:param include: Inset of file name to be considered
:param tid: Train Id to be loaded. First train is considered if None is given
"""
run_data = RunDirectory(run_folder, include)
if tid:
tid, data = run_data.select('*/DET/*', source).train_from_id(tid)
return tid, stack_detector_data(data, source, modules=nmods)
else:
for tid, data in run_data.select('*/DET/*', source).trains(require_all=True):
return tid, stack_detector_data(data, source, modules=nmods)
return None, None
include = '*S00000*'
tid, orig = get_trains_data(f'{proc_folder}/r{run:04d}/', 'image.data', include)
orig = orig[cell_range[0]:cell_range[1], ...]
```
%% Cell type:code id: tags:
``` python
# FIXME: mask bad pixels from median
# mask = const_data['BadPixelsFF']
corrections = const_data['slopesFF'] # (16,shape[0],512,128) shape[0]= cell_range[1]-cell_range[0] /
corrections = np.moveaxis(corrections, 1, 0) # (shape[0],16,512,128)
rel_corr = corrections/np.nanmedian(corrections)
corrected = orig / rel_corr
```
%% Cell type:markdown id: tags:
### Mean value not corrected (train 0)
%% Cell type:code id: tags:
``` python
fig = plt.figure(figsize=(20,20))
ax = fig.add_subplot(111)
odata = np.nanmean(orig, axis=0)
vmin, vmax = get_range(odata, 5)
ax = geom.plot_data_fast(odata, ax=ax, cmap="jet", vmin=vmin, vmax=vmax, figsize=(20,20))
_ = ax.set_title("Original data, mean across one train")
```
%% Cell type:markdown id: tags:
### Mean value corrected (train 0)
%% Cell type:code id: tags:
``` python
fig = plt.figure(figsize=(20,20))
ax = fig.add_subplot(111)
cdata = np.nanmean(corrected, axis=0)
ax = geom.plot_data_fast(cdata, ax=ax, cmap="jet", vmin=vmin, vmax=vmax, figsize=(20,20))
_ = ax.set_title("Corrected data, mean across one train")
```
%% Cell type:markdown id: tags:
### Laplace transform of mean image
%% Cell type:code id: tags:
``` python
from scipy.ndimage import laplace
cmax = np.max(cdata)
omax = np.max(odata)
clap = np.zeros_like(cdata)
olap = np.zeros_like(odata)
for i in range(nmods) :
clap[i] = np.abs(laplace(cdata[i].astype(float)/cmax))
olap[i] = np.abs(laplace(odata[i].astype(float)/omax))
fig = plt.figure(figsize=(20,10))
vmin, vmax = get_range(olap, 2)
ax = fig.add_subplot(121)
ax = geom.plot_data_fast(olap, ax=ax, cmap="jet", vmin=vmin, vmax=vmax, )
_ = ax.set_title("Laplace (original data)")
ax = fig.add_subplot(122)
ax = geom.plot_data_fast(clap, ax=ax, cmap="jet", vmin=vmin, vmax=vmax, )
_ = ax.set_title("Laplace (gain corrected data)")
```
%% Cell type:markdown id: tags:
### Histogram of corrected and uncorrected spectrum (train 0)
%% Cell type:code id: tags:
``` python
######################################
# FIT PEAKS
######################################
x_range = [peak_range[0][0], peak_range[-1][-1]]
nb = x_range[1] - x_range[0]+1
sel = ~np.isnan(corrected)
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot(111)
y,xe, _ = ax.hist(corrected[sel].flatten(), bins=nb, range=x_range, label='corrected', alpha=0.5)
# get the bin centers from the bin edges
xc=xe[:-1]+(xe[1]-xe[0])/2
pars, _ = get_starting_parameters(xc, y, peak_range,4)
minuit = fit_n_peaks(xc, y, pars, x_range,fix_d01=False,sigma_limit=1)
pc = minuit.args
resc=minuit.fitarg
yfc = gaussian_sum(xc,4, *pc)
plt.plot(xc, yfc, label='corrected fit')
y,_, _ = ax.hist(orig[sel].flatten(), bins=nb, range=x_range, label='original',alpha=0.5)
pars, _ = get_starting_parameters(xc, y, peak_range,4)
minuit = fit_n_peaks(xc, y, pars, x_range,fix_d01=False,sigma_limit=1)
po = minuit.args
reso=minuit.fitarg
yfo = gaussian_sum(xc,4, *po)
plt.plot(xc, yfo, label='original fit')
plt.title(f"Signal spectrum, first train")
plt.xlabel('[ADU]')
plt.legend()
plt.show()
```
%% Cell type:markdown id: tags:
### Summary table ##
%% Cell type:code id: tags:
``` python
from scipy.stats import median_absolute_deviation as mad
table = []
headers = ["Parameter",
"Value (original data)",
"Value (gain corrected data)",
"Relative difference"]
for i in range(4):
table.append((f"Sigma{i} (ADU)",
f"{reso[f'g{i}sigma']:0.2f} ",
f"{resc[f'g{i}sigma']:0.2f} ",
f"{(reso[f'g{i}sigma']-resc[f'g{i}sigma'])/reso[f'g{i}sigma']:0.2f} ",
))
ovar = np.std(odata)
cvar = np.std(cdata)
table.append((f"RMS of mean image",
f"{ovar:0.3f} ",
f"{cvar:0.3f} ",
f"{(ovar-cvar)/ovar:0.3f} ",
))
omin, omax = get_range(odata, 5)
cmin, cmax = get_range(cdata, 5)
ovar = np.std(odata[(odata > omin) & (odata<omax)])
cvar = np.std(cdata[(cdata > cmin) & (cdata<cmax)])
table.append((f"RMS of mean image (mu+-5sigma)",
f"{ovar:0.3f} ",
f"{cvar:0.3f} ",
f"{(ovar-cvar)/ovar:0.3f} ",
))
ovar = mad(odata.flatten())
cvar = mad(cdata.flatten())
table.append((f"MAD of mean image",
f"{ovar:0.3f} ",
f"{cvar:0.3f} ",
f"{(ovar-cvar)/ovar:0.3f} ",
))
ovar = np.median(olap)
cvar = np.median(clap)
table.append((f"Median Laplace",
f"{ovar:0.3f} ",
f"{cvar:0.3f} ",
f"{(ovar-cvar)/ovar:0.3f} ",
))
md = display(Latex(tabulate.tabulate(table,
tablefmt='latex',
headers=headers)))
```
......
%% Cell type:markdown id: tags:
# Jungfrau Dark Image Characterization #
Author: European XFEL Detector Group, Version: 2.0
Analyzes Jungfrau dark image data to deduce offset, noise and resulting bad pixel maps
%% Cell type:code id: tags:
``` python
in_folder = '/gpfs/exfel/exp/SPB/202130/p900204/raw/' # folder under which runs are located, required
out_folder = '/gpfs/exfel/data/scratch/ahmedk/test/remove' # path to place reports at, required
run_high = 141 # run number for G0 dark run, required
run_med = 142 # run number for G1 dark run, required
run_low = 143 # run number for G2 dark run, required
# Parameters used to access raw data.
karabo_da = ['JNGFR01', 'JNGFR02','JNGFR03','JNGFR04', 'JNGFR05', 'JNGFR06','JNGFR07','JNGFR08'] # list of data aggregators, which corresponds to different JF modules
karabo_id = 'SPB_IRDA_JF4M' # karabo_id (detector identifier) prefix of Jungfrau detector to process.
karabo_id_control = '' # if control is on a different ID, set to empty string if it is the same a karabo-id
receiver_template = 'JNGFR{:02}' # inset for receiver devices
instrument_source_template = '{}/DET/{}:daqOutput' # template for instrument source name (filled with karabo_id & receiver_id). e.g. 'SPB_IRDA_JF4M/DET/JNGFR01:daqOutput'
ctrl_source_template = '{}/DET/CONTROL' # template for control source name (filled with karabo_id_control)
# Parameters for calibration database and storing constants.
use_dir_creation_date = True # use dir creation date
cal_db_interface = 'tcp://max-exfl016:8016' # calibrate db interface to connect to
cal_db_timeout = 300000 # timeout on caldb requests
local_output = True # output constants locally
db_output = False # output constants to database
# Parameters affecting creating dark calibration constants.
badpixel_threshold_sigma = 5. # bad pixels defined by values outside n times this std from median
offset_abs_threshold_low = [1000, 10000, 10000] # absolute bad pixel threshold in terms of offset, lower values
offset_abs_threshold_high = [8000, 15000, 15000] # absolute bad pixel threshold in terms of offset, upper values
max_trains = 0 # Maximum trains to process darks. Set to 0 to process all available train images.
min_trains = 1 # Minimum number of trains that should be available to process dark constants. Default 1.
manual_slow_data = False # if true, use manually entered bias_voltage and integration_time values
time_limits = 0.025 # to find calibration constants later on, the integration time is allowed to vary by 0.5 us
# Parameters to be used for injecting dark calibration constants.
integration_time = 1000 # integration time in us, will be overwritten by value in file
gain_setting = 0 # 0 for dynamic, forceswitchg1, forceswitchg2, 1 for dynamichg0, fixgain1, fixgain2. Will be overwritten by value in file
gain_mode = 0 # 1 if medium and low runs are fixgain1 and fixgain2, otherwise 0. It will be overwritten by value in file, if manual_slow_data
bias_voltage = 90 # sensor bias voltage in V, will be overwritten by value in file
memory_cells = 16 # number of memory cells
# TODO: this is used for only Warning check at AGIPD dark.
# Need to rethink if it makes sense to use it here as well.
operation_mode = 'ADAPTIVE_GAIN' # Detector operation mode, optional
# TODO: Remove
db_module = [""] # ID of module in calibration database. TODO: remove from calibration_configurations.
```
%% Cell type:code id: tags:
``` python
import glob
import os
import warnings
from pathlib import Path
warnings.filterwarnings('ignore')
import matplotlib
import matplotlib.pyplot as plt
import multiprocessing
import numpy as np
import pasha as psh
from IPython.display import Markdown, display
from extra_data import RunDirectory
matplotlib.use('agg')
%matplotlib inline
from XFELDetAna.plotting.heatmap import heatmapPlot
from XFELDetAna.plotting.histogram import histPlot
from cal_tools import jungfraulib, step_timing
from cal_tools.ana_tools import save_dict_to_hdf5
from cal_tools.enums import BadPixels, JungfrauSettings
from cal_tools.tools import (
get_dir_creation_date,
get_pdu_from_db,
get_random_db_interface,
get_report,
save_const_to_h5,
send_to_db,
)
from iCalibrationDB import Conditions, Constants
```
%% Cell type:code id: tags:
``` python
# Constants relevant for the analysis
run_nums = [run_high, run_med, run_low] # run number for G0/HG0, G1, G2
sensor_size = (1024, 512)
gains = [0, 1, 2]
fixed_settings = [
JungfrauSettings.FIX_GAIN_1.value, JungfrauSettings.FIX_GAIN_2.value] # noqa
dynamic_settings = [
JungfrauSettings.FORCE_SWITCH_HG1.value, JungfrauSettings.FORCE_SWITCH_HG2.value] # noqa
creation_time = None
if use_dir_creation_date:
creation_time = get_dir_creation_date(in_folder, run_high)
print(f"Using {creation_time} as creation time")
os.makedirs(out_folder, exist_ok=True)
cal_db_interface = get_random_db_interface(cal_db_interface)
print(f'Calibration database interface: {cal_db_interface}')
if karabo_id_control == "":
karabo_id_control = karabo_id
```
%% Cell type:code id: tags:
``` python
proposal = list(filter(None, in_folder.strip('/').split('/')))[-2]
file_loc = f"proposal:{proposal} runs:{run_high} {run_med} {run_low}"
report = get_report(out_folder)
step_timer = step_timing.StepTimer()
```
%% Cell type:markdown id: tags:
## Reading control data
%% Cell type:code id: tags:
``` python
step_timer.start()
gain_runs = dict()
med_low_settings = []
ctrl_src = ctrl_source_template.format(karabo_id_control)
for gain, run_n in enumerate(run_nums):
run_dc = RunDirectory(f"{in_folder}/r{run_n:04d}/")
gain_runs[run_n] = [gain, run_dc]
ctrl_data = jungfraulib.JungfrauCtrl(run_dc, ctrl_src)
run_settings = ctrl_data.run_settings.value if ctrl_data.run_settings else ctrl_data.run_settings # noqa
# Read control data for the high gain run only.
if run_n == run_high:
run_mcells, sc_start = ctrl_data.get_memory_cells()
if not manual_slow_data:
integration_time = ctrl_data.get_integration_time()
bias_voltage = ctrl_data.get_bias_voltage()
gain_setting = ctrl_data.get_gain_setting()
print(f"Gain setting is {gain_setting} ({run_settings})")
print(f"Integration time is {integration_time} us")
print(f"Bias voltage is {bias_voltage} V")
if run_mcells == 1:
memory_cells = 1
print('Dark runs in single cell mode, '
f'storage cell start: {sc_start:02d}')
else:
memory_cells = 16
print('Dark runs in burst mode, '
f'storage cell start: {sc_start:02d}')
else:
gain_mode = ctrl_data.get_gain_mode()
med_low_settings.append(run_settings)
# A transperent workaround for old raw data with wrong/missing medium and low settings
if med_low_settings == [None, None]:
print("WARNING: run.settings is not stored in the data to read. "
f"Hence assuming gain_mode = {gain_mode} for adaptive old data.")
elif med_low_settings == ["dynamicgain", "forceswitchg1"]:
print(f"WARNING: run.settings for medium and low gain runs are wrong {med_low_settings}. "
f"This is an expected bug for old raw data. Setting gain_mode to {gain_mode}.")
# Validate that low_med_settings is not a mix of adaptive and fixed settings.
elif not (sorted(med_low_settings) in [fixed_settings, dynamic_settings]): # noqa
raise ValueError(
"Medium and low run settings are not as expected. "
f"Either {dynamic_settings} or {fixed_settings} are expected.\n"
f"Got {sorted(med_low_settings)} for both runs, respectively.")
print(f"Gain mode is {gain_mode} ({med_low_settings})")
step_timer.done_step(f'Reading control data.')
```
%% Cell type:code id: tags:
``` python
# Use only high gain threshold for all gains in case of fixed_gain.
if gain_mode: # fixed_gain
offset_abs_threshold = [[offset_abs_threshold_low[0]]*3, [offset_abs_threshold_high[0]]*3]
else:
offset_abs_threshold = [offset_abs_threshold_low, offset_abs_threshold_high]
```
%% Cell type:code id: tags:
``` python
context = psh.context.ThreadContext(num_workers=multiprocessing.cpu_count())
```
%% Cell type:code id: tags:
``` python
"""
All jungfrau runs are taken through one acquisition, except for the forceswitch runs.
While taking non-fixed dark runs, a procedure of multiple acquisitions is used to switch the storage cell indices.
This is done for medium and low gain dark dynamic runs, only [forceswitchg1, forceswitchg2]:
Switching the cell indices in burst mode is a work around for hardware procedure
deficiency that produces wrong data for dark runs except for the first storage cell.
This is why multiple acquisitions are taken to switch the used storage cells and
acquire data through two cells for each of the 16 cells instead of acquiring darks through all 16 cells.
"""
print(f"Maximum trains to process is set to {max_trains}")
noise_map = dict()
offset_map = dict()
bad_pixels_map = dict()
for mod in karabo_da:
step_timer.start()
instrument_src = instrument_source_template.format(
karabo_id, receiver_template.format(int(mod[-2:])))
print(f"\n- Instrument data path for {mod} is {instrument_src}.")
offset_map[mod] = context.alloc(shape=(sensor_size+(memory_cells, 3)), fill=0)
noise_map[mod] = context.alloc(like=offset_map[mod], fill=0)
bad_pixels_map[mod] = context.alloc(like=offset_map[mod], dtype=np.uint32, fill=0)
for run_n, [gain, run_dc] in gain_runs.items():
def process_cell(worker_id, array_index, cell_number):
cell_slice_idx = acelltable == cell_number
thiscell = images[..., cell_slice_idx]
offset_map[mod][..., cell_number, gain] = np.mean(thiscell, axis=2)
noise_map[mod][..., cell_number, gain] = np.std(thiscell, axis=2)
# Check if there are wrong bad gain values.
# Indicate pixels with wrong gain value across all trains for each cell.
bad_pixels_map[mod][
np.average(gain_vals[..., cell_slice_idx], axis=2) != raw_g] |= BadPixels.WRONG_GAIN_VALUE.value
print(f"Gain stage {gain}, run {run_n}")
# load shape of data for memory cells, and detector size (imgs, cells, x, y)
n_imgs = run_dc[instrument_src, "data.adc"].shape[0]
# load number of data available, including trains with empty data.
n_trains = len(run_dc.train_ids)
instr_dc = run_dc.select(instrument_src, require_all=True)
empty_trains = n_trains - n_imgs
if empty_trains != 0:
print(
f"\tWARNING: {mod} has {empty_trains} trains with empty data out of {n_trains} trains at " # noqa
f"{Path(run_dc[instrument_src, 'data.adc'].files[0].filename).parent}.")
if max_trains > 0:
n_imgs = min(n_imgs, max_trains)
print(f"Processing {n_imgs} images.")
# Select only requested number of images to process darks.
instr_dc = instr_dc.select_trains(np.s_[:n_imgs])
if n_imgs < min_trains:
raise ValueError(
f"Less than {min_trains} trains are available in RAW data."
" Not enough data to process darks.")
images = np.transpose(
instr_dc[instrument_src, "data.adc"].ndarray().astype(np.float64), (3, 2, 1, 0))
instr_dc[instrument_src, "data.adc"].ndarray(), (3, 2, 1, 0))
acelltable = np.transpose(instr_dc[instrument_src, "data.memoryCell"].ndarray())
gain_vals = np.transpose(
instr_dc[instrument_src, "data.gain"].ndarray(), (3, 2, 1, 0))
# define gain value as saved in raw gain map
raw_g = 3 if gain == 2 else gain
if memory_cells == 1:
acelltable -= sc_start
# Only for dynamic medium and low gain runs [forceswitchg1, forceswitchg2] in burst mode.
if gain_mode == 0 and gain > 0 and memory_cells == 16:
# 255 similar to the receiver which uses the 255
# value to indicate a cell without an image.
# image shape for forceswitchg1 and forceswitchg2 = (1024, 512, 2, trains)
# compared to expected shape of (1024, 512, 16, trains) for high gain run.
acelltable[1:] = 255
# Calculate offset and noise maps
context.map(process_cell, range(memory_cells))
step_timer.done_step(f'Creating Offset and noise constants for a module.')
```
%% Cell type:markdown id: tags:
## Offset and Noise Maps ##
Below offset and noise maps for the high ($g_0$) gain stage are shown, alongside the distribution of these values. One expects block-like structures mapping to the ASICs of the detector
%% Cell type:code id: tags:
``` python
g_name = ['G0', 'G1', 'G2']
g_range = [(0, 8000), (8000, 16000), (8000, 16000)]
n_range = [(0., 50.), (0., 50.), (0., 50.)]
unit = '[ADCu]'
```
%% Cell type:code id: tags:
``` python
# TODO: Fix plots arrangment and speed for Jungfrau burst mode.
step_timer.start()
for mod in karabo_da:
for g_idx in gains:
for cell in range(0, memory_cells):
f_o0 = heatmapPlot(
np.swapaxes(offset_map[mod][..., cell, g_idx], 0, 1),
y_label="Row",
x_label="Column",
lut_label=unit,
aspect=1.,
vmin=g_range[g_idx][0],
vmax=g_range[g_idx][1],
title=f'Pedestal {g_name[g_idx]} - Cell {cell:02d} - Module {mod}')
fo0, ax_o0 = plt.subplots()
res_o0 = histPlot(
ax_o0, offset_map[mod][..., cell, g_idx],
bins=800,
range=g_range[g_idx],
facecolor='b',
histotype='stepfilled',
)
ax_o0.tick_params(axis='both',which='major',labelsize=15)
ax_o0.set_title(
f'Module pedestal distribution - Cell {cell:02d} - Module {mod}',
fontsize=15)
ax_o0.set_xlabel(f'Pedestal {g_name[g_idx]} {unit}',fontsize=15)
ax_o0.set_yscale('log')
f_n0 = heatmapPlot(
np.swapaxes(noise_map[mod][..., cell, g_idx], 0, 1),
y_label="Row",
x_label="Column",
lut_label= unit,
aspect=1.,
vmin=n_range[g_idx][0],
vmax=n_range[g_idx][1],
title=f"RMS noise {g_name[g_idx]} - Cell {cell:02d} - Module {mod}",
)
fn0, ax_n0 = plt.subplots()
res_n0 = histPlot(
ax_n0,
noise_map[mod][..., cell, g_idx],
bins=100,
range=n_range[g_idx],
facecolor='b',
histotype='stepfilled',
)
ax_n0.tick_params(axis='both', which='major', labelsize=15)
ax_n0.set_title(
f'Module noise distribution - Cell {cell:02d} - Module {mod}',
fontsize=15)
ax_n0.set_xlabel(
f'RMS noise {g_name[g_idx]} ' + unit, fontsize=15)
plt.show()
step_timer.done_step(f'Plotting offset and noise maps.')
```
%% Cell type:markdown id: tags:
## Bad Pixel Map ###
The bad pixel map is deduced by comparing offset and noise of each pixel ($v_i$) and each gain ($g$) against the median value for that gain stage:
$$
v_i > \mathrm{median}(v_{k,g}) + n \sigma_{v_{k,g}}
$$
or
$$
v_i < \mathrm{median}(v_{k,g}) - n \sigma_{v_{k,g}}
$$
Values are encoded in a 32 bit mask, where for the dark image deduced bad pixels the following non-zero entries are relevant:
%% Cell type:code id: tags:
``` python
def print_bp_entry(bp):
print("{:<30s} {:032b} -> {}".format(bp.name, bp.value, int(bp.value)))
print_bp_entry(BadPixels.OFFSET_OUT_OF_THRESHOLD)
print_bp_entry(BadPixels.NOISE_OUT_OF_THRESHOLD)
print_bp_entry(BadPixels.OFFSET_NOISE_EVAL_ERROR)
print_bp_entry(BadPixels.WRONG_GAIN_VALUE)
def eval_bpidx(d):
mdn = np.nanmedian(d, axis=(0, 1))[None, None, :, :]
std = np.nanstd(d, axis=(0, 1))[None, None, :, :]
idx = (d > badpixel_threshold_sigma*std+mdn) | (d < (-badpixel_threshold_sigma)*std+mdn)
return idx
```
%% Cell type:code id: tags:
``` python
step_timer.start()
for mod in karabo_da:
display(Markdown(f"### Badpixels for module {mod}:"))
offset_abs_threshold = np.array(offset_abs_threshold)
bad_pixels_map[mod][eval_bpidx(offset_map[mod])] |= BadPixels.OFFSET_OUT_OF_THRESHOLD.value
bad_pixels_map[mod][~np.isfinite(offset_map[mod])] |= BadPixels.OFFSET_NOISE_EVAL_ERROR.value
bad_pixels_map[mod][eval_bpidx(noise_map[mod])] |= BadPixels.NOISE_OUT_OF_THRESHOLD.value
bad_pixels_map[mod][~np.isfinite(noise_map[mod])] |= BadPixels.OFFSET_NOISE_EVAL_ERROR.value
bad_pixels_map[mod][(offset_map[mod] < offset_abs_threshold[0][None, None, None, :]) | (offset_map[mod] > offset_abs_threshold[1][None, None, None, :])] |= BadPixels.OFFSET_OUT_OF_THRESHOLD.value # noqa
for g_idx in gains:
for cell in range(memory_cells):
bad_pixels = bad_pixels_map[mod][:, :, cell, g_idx]
fn_0 = heatmapPlot(
np.swapaxes(bad_pixels, 0, 1),
y_label="Row",
x_label="Column",
lut_label=f"Badpixels {g_name[g_idx]} [ADCu]",
aspect=1.,
vmin=0, vmax=5,
title=f'G{g_idx} Bad pixel map - Cell {cell:02d} - Module {mod}')
step_timer.done_step(f'Creating bad pixels constant and plotting it for a module.')
```
%% Cell type:code id: tags:
``` python
# set the operating condition
condition = Conditions.Dark.jungfrau(
memory_cells=memory_cells,
bias_voltage=bias_voltage,
integration_time=integration_time,
gain_setting=gain_setting,
gain_mode=gain_mode,
)
db_modules = get_pdu_from_db(
karabo_id=karabo_id,
karabo_da=karabo_da,
constant=Constants.jungfrau.Offset(),
condition=condition,
cal_db_interface=cal_db_interface,
snapshot_at=creation_time)
```
%% Cell type:markdown id: tags:
## Inject and save calibration constants
%% Cell type:code id: tags:
``` python
step_timer.start()
for mod, db_mod in zip(karabo_da, db_modules):
constants = {
'Offset': np.moveaxis(offset_map[mod], 0, 1),
'Noise': np.moveaxis(noise_map[mod], 0, 1),
'BadPixelsDark': np.moveaxis(bad_pixels_map[mod], 0, 1),
}
md = None
for key, const_data in constants.items():
const = getattr(Constants.jungfrau, key)()
const.data = const_data
for parm in condition.parameters:
if parm.name == "Integration Time":
parm.lower_deviation = time_limits
parm.upper_deviation = time_limits
if db_output:
md = send_to_db(
db_module=db_mod,
karabo_id=karabo_id,
constant=const,
condition=condition,
file_loc=file_loc,
report_path=report,
cal_db_interface=cal_db_interface,
creation_time=creation_time,
timeout=cal_db_timeout,
)
if local_output:
md = save_const_to_h5(
db_module=db_mod,
karabo_id=karabo_id,
constant=const,
condition=condition,
data=const.data,
file_loc=file_loc,
report=report,
creation_time=creation_time,
out_folder=out_folder,
)
print(f"Calibration constant {key} is stored locally at {out_folder}.\n")
print("Constants parameter conditions are:\n")
print(
f"• Bias voltage: {bias_voltage}\n"
f"• Memory cells: {memory_cells}\n"
f"• Integration time: {integration_time}\n"
f"• Gain setting: {gain_setting}\n"
f"• Gain mode: {gain_mode}\n"
f"• Creation time: {md.calibration_constant_version.begin_at if md is not None else creation_time}\n") # noqa
step_timer.done_step("Injecting constants.")
```
%% Cell type:code id: tags:
``` python
print(f"Total processing time {step_timer.timespan():.01f} s")
step_timer.print_summary()
```
......
%% Cell type:code id: tags:
``` python
#Author: K. Ahmed, M. Karnevsky, Version: 0.1
#The following is a summary for the processing of dark images and calibration constants production.
# Author: European XFEL Detector Group, Version: 1.0
# Summary for processed of dark calibration constants and a comparison with previous injected constants.
out_folder = "/gpfs/exfel/data/scratch/ahmedk/test/fixed_gain/SPB_summary_fix2" # path to output to, required
karabo_id = "SPB_DET_AGIPD1M-1" # detector instance
gain_names = ['High gain', 'Medium gain', 'Low gain'] # a list of gain names to be used in plotting
threshold_names = ['HG-MG threshold', 'MG_LG threshold'] # a list of gain names to be used in plotting
```
%% Cell type:code id: tags:
``` python
import copy
import os
import warnings
from collections import OrderedDict
from datetime import datetime
from pathlib import Path
warnings.filterwarnings('ignore')
import glob
import h5py
import matplotlib
import numpy as np
import yaml
from IPython.display import Latex, Markdown, display
matplotlib.use("agg")
import matplotlib.gridspec as gridspec
import matplotlib.patches as patches
import matplotlib.pyplot as plt
%matplotlib inline
import extra_geom
import tabulate
from cal_tools.ana_tools import get_range
from cal_tools.plotting import show_processed_modules
from cal_tools.tools import CalibrationMetadata, module_index_to_qm
from iCalibrationDB import Detectors
from XFELDetAna.plotting.heatmap import heatmapPlot
from XFELDetAna.plotting.simpleplot import simplePlot
```
%% Cell type:code id: tags:
``` python
# Note: this notebook assumes that local_output was set to True in the preceding characterization notebook
```
%% Cell type:code id: tags:
``` python
if "AGIPD" in karabo_id:
if "SPB" in karabo_id:
dinstance = "AGIPD1M1"
nmods = 16
elif "MID" in karabo_id:
dinstance = "AGIPD1M2"
nmods = 16
elif "HED" in karabo_id:
dinstance = "AGIPD500K"
nmods = 8
display(Markdown("""
# Summary of AGIPD dark characterization #
The following report shows a set of dark images taken with the AGIPD detector to deduce detector offsets, noise, bad-pixel maps and thresholding. All four types of constants are evaluated per-pixel and per-memory cell.
**The offset** ($O$) is defined as the median ($M$) of the dark signal ($Ds$) over trains ($t$) for a given pixel ($x,y$) and memory cell ($c$).
**The noise** $N$ is the standard deviation $\sigma$ of the dark signal.
$$ O_{x,y,c} = M(Ds)_{t} ,\,\,\,\,\,\, N_{x,y,c} = \sigma(Ds)_{t}$$
**The bad pixel** mask is encoded as a bit mask.
**"OFFSET_OUT_OF_THRESHOLD":**
Offset outside of bounds:
$$M(O)_{x,y} - \sigma(O)_{x,y} * \mathrm{thresholds\_offset\_sigma} < O < M(O)_{x,y} + \sigma(O)_{x,y} * \mathrm{thresholds\_offset\_sigma} $$
or offset outside of hard limits
$$ \mathrm{thresholds\_offset\_hard}_\mathrm{low} < O < \mathrm{thresholds\_offset\_hard}_\mathrm{high} $$
**"NOISE_OUT_OF_THRESHOLD":**
Noise outside of bounds:
$$M(N)_{x,y} - \sigma(N)_{x,y} * \mathrm{thresholds\_noise\_sigma} < N < M(N)_{x,y} + \sigma(N)_{x,y} * \mathrm{thresholds\_noise\_sigma} $$
or noise outside of hard limits
$$\mathrm{thresholds\_noise\_hard}_\mathrm{low} < N < \mathrm{thresholds\_noise\_hard}_\mathrm{high} $$
**"OFFSET_NOISE_EVAL_ERROR":**
Offset and Noise both not $nan$ values
Values: $\mathrm{thresholds\_offset\_sigma}$, $\mathrm{thresholds\_offset\_hard}$, $\mathrm{thresholds\_noise\_sigma}$, $\mathrm{thresholds\_noise\_hard}$ are given as parameters.
"**\"GAIN_THRESHOLDING_ERROR\":**
Bad gain separated pixels with sigma separation less than gain_separation_sigma_threshold
$$ sigma\_separation = \\frac{\mathrm{gain\_offset} - \mathrm{previous\_gain\_offset}}{\sqrt{\mathrm{gain\_offset_{std}}^\mathrm{2} + \mathrm{previuos\_gain\_offset_{std}}^\mathrm{2}}}$$
$$ Bad\_separation = sigma\_separation < \mathrm{gain\_separation\_sigma\_threshold} $$
"""))
elif "LPD" in karabo_id:
dinstance = "LPD1M1"
nmods = 16
display(Markdown("""
# Summary of LPD dark characterization #
The following report shows a set of dark images taken with the LPD detector to deduce detector offsets, noise, bad-pixel maps. All three types of constants are evaluated per-pixel and per-memory cell.
**The offset** ($O$) is defined as the median ($M$) of the dark signal ($Ds$) over trains ($t$) for a given pixel ($x,y$) and memory cell ($c$).
**The noise** $N$ is the standard deviation $\sigma$ of the dark signal.
$$ O_{x,y,c} = M(Ds)_{t} ,\,\,\,\,\,\, N_{x,y,c} = \sigma(Ds)_{t}$$
**The bad pixel** mask is encoded as a bit mask.
**"OFFSET_OUT_OF_THRESHOLD":**
Offset outside of bounds:
$$M(O)_{x,y} - \sigma(O)_{x,y} * \mathrm{thresholds\_offset\_sigma} < O < M(O)_{x,y} + \sigma(O)_{x,y} * \mathrm{thresholds\_offset\_sigma} $$
or offset outside of hard limits
$$ \mathrm{thresholds\_offset\_hard}_\mathrm{low} < O < \mathrm{thresholds\_offset\_hard}_\mathrm{high} $$
**"NOISE_OUT_OF_THRESHOLD":**
Noise outside of bounds:
$$M(N)_{x,y} - \sigma(N)_{x,y} * \mathrm{thresholds\_noise\_sigma} < N < M(N)_{x,y} + \sigma(N)_{x,y} * \mathrm{thresholds\_noise\_sigma} $$
or noise outside of hard limits
$$\mathrm{thresholds\_noise\_hard}_\mathrm{low} < N < \mathrm{thresholds\_noise\_hard}_\mathrm{high} $$
**"OFFSET_NOISE_EVAL_ERROR":**
Offset and Noise both not $nan$ values
"Values: $\\mathrm{thresholds\\_offset\\_sigma}$, $\\mathrm{thresholds\\_offset\\_hard}$, $\\mathrm{thresholds\\_noise\\_sigma}$, $\\mathrm{thresholds\\_noise\\_hard}$ are given as parameters.\n",
"""))
elif "DSSC" in karabo_id:
dinstance = "DSSC1M1"
nmods = 16
display(Markdown("""
# Summary of DSSC dark characterization #
"""))
```
%% Cell type:code id: tags:
``` python
out_folder = Path(out_folder)
metadata = CalibrationMetadata(out_folder)
mod_mapping = metadata.setdefault("modules-mapping", {})
old_constant_metadata = {}
for fn in out_folder.glob("module_metadata_*.yml"):
with fn.open("r") as fd:
fdict = yaml.safe_load(fd)
module = fdict["module"]
mod_mapping[module] = fdict["pdu"]
old_constant_metadata[module] = fdict["old-constants"]
fn.unlink()
metadata.save()
```
%% Cell type:markdown id: tags:
Preparing newly injected and previous constants from produced local folder in out_folder.
%% Cell type:code id: tags:
``` python
# TODO: After changes in the Cal DB interface read files from cal repository
# Load constants from local files
data = OrderedDict()
old_cons = OrderedDict()
mod_names = []
# Loop over modules
for i in range(nmods):
qm = module_index_to_qm(i)
if not mod_mapping.get(qm):
continue
mod_pdu = mod_mapping[qm]
# loop over constants
for const in ['Offset', 'Noise', 'ThresholdsDark', 'BadPixelsDark']:
# first load new constant
fpath = out_folder / f"const_{const}_{mod_pdu}.h5"
if not fpath.exists():
print(f"No local output file {fpath} found")
continue
with h5py.File(fpath, 'r') as f:
if qm not in data:
mod_names.append(qm)
data[qm] = OrderedDict()
data[qm][const] = f["data"][()]
# try finding old constants using paths from CalCat store
qm_mdata = old_constant_metadata[qm]
if const not in qm_mdata:
continue
filepath = qm_mdata[const]["filepath"]
h5path = qm_mdata[const]["h5path"]
if not filepath or not h5path:
continue
with h5py.File(filepath, "r") as fd:
old_cons.setdefault(qm, OrderedDict())[const] = fd[f"{h5path}/data"][:]
```
%% Cell type:code id: tags:
``` python
cons_shape = {}
# extracting constant shape.
for qm, constant in data.items():
for cname, cons in constant.items():
shape = data[qm][cname].shape
if cname not in cons_shape:
cons_shape[cname] = shape
break
constants = {}
prev_const = {}
for cname, sh in cons_shape.items():
constants[cname]= np.zeros((len(mod_names),) + sh[:])
prev_const[cname]= np.zeros((len(mod_names),) + sh[:])
for i in range(len(mod_names)):
for cname, cval in constants.items():
cval[i] = data[mod_names[i]][cname]
if mod_names[i] in old_cons.keys():
prev_const[cname][i] = old_cons[mod_names[i]][cname]
else:
print(f"No previous {cname} found for {mod_names[i]}")
```
%% Cell type:code id: tags:
``` python
display(Markdown('## Processed modules'))
show_processed_modules(dinstance, constants, mod_names, mode="processed")
```
%% Cell type:markdown id: tags:
## Summary figures across Modules ##
The following plots give an overview of calibration constants averaged across pixels and memory cells. A bad pixel mask is applied.
%% Cell type:code id: tags:
``` python
if "LPD" in dinstance:
geom = extra_geom.LPD_1MGeometry.from_quad_positions(quad_pos=[(11.4, 299),
(-11.5, 8),
(254.5, -16),
(278.5, 275)])
pixels_y = 256
pixels_x = 256
elif dinstance in ('AGIPD1M1', 'AGIPD1M2'):
geom = extra_geom.AGIPD_1MGeometry.from_quad_positions(quad_pos=[(-525, 625),
(-550, -10),
(520, -160),
(542.5, 475)])
pixels_y = 128
pixels_x = 512
elif dinstance == "AGIPD500K":
geom = extra_geom.AGIPD_500K2GGeometry.from_origin()
pixels_y = 128
pixels_x = 512
elif "DSSC" in dinstance:
pixels_y = 512
pixels_x = 128
quadpos = [(-130, 5), (-130, -125), (5, -125), (5, 5)]
extrageom_pth = os.path.dirname(extra_geom.__file__)
geom = extra_geom.DSSC_1MGeometry.from_h5_file_and_quad_positions(
f"{extrageom_pth}/tests/dssc_geo_june19.h5", positions=quadpos)
Mod_data=OrderedDict()
gainstages = 1
for const_name, const in constants.items():
if const_name == 'BadPixelsDark':
continue
# Check if constant gain available in constant e.g. AGIPD, LPD
if len(const.shape) == 5:
gainstages = 3
else:
gainstages = 1
for dname in ['{}', 'd-{}', 'dpct-{}']:
Mod_data[dname.format(const_name)] = OrderedDict()
display(Markdown(f'##### {const_name}'))
print_once = True
for gain in range(gainstages):
if const_name == 'ThresholdsDark':
if gain > 1:
continue
glabel = threshold_names
else:
glabel = gain_names
for i in range(nmods):
qm = module_index_to_qm(i)
if qm in mod_names:
m_idx = mod_names.index(qm)
# Check if constant shape of 5 indices e.g. AGIPD, LPD
if len(const.shape) == 5:
values = np.nanmean(const[m_idx, :, :, :, gain], axis=2)
dval = np.nanmean(prev_const[const_name][m_idx, :, :, :, gain], axis=2)
else:
values = np.nanmean(const[m_idx, :, :, :], axis=2)
dval = np.nanmean(prev_const[const_name][m_idx, :, :, :], axis=2)
values[values == 0] = np.nan
dval[dval == 0] = np.nan
dval = values - dval
dval_pct = dval/values * 100
values = np.moveaxis(values, 0, -1).reshape(1, values.shape[1], values.shape[0])
dval = np.moveaxis(dval, 0, -1).reshape(1, dval.shape[1], dval.shape[0])
dval_pct = np.moveaxis(dval_pct, 0, -1).reshape(1, dval_pct.shape[1], dval_pct.shape[0])
else:
# if module not available fill arrays with nan
values = np.zeros((1, pixels_x, pixels_y),dtype=np.float64)
values[values == 0] = np.nan
dval = values
dval_pct = dval
for k, v in {'{}': values, 'd-{}': dval , 'dpct-{}': dval_pct}.items():
try:
Mod_data[k.format(const_name)][gain_names[gain]] = \
np.concatenate((Mod_data[k.format(const_name)][gain_names[gain]],
v), axis=0)
except:
Mod_data[k.format(const_name)][gain_names[gain]] = v
if np.count_nonzero(dval) == 0 and print_once:
display(Markdown(f'New and previous {const_name} are the same, hence there is no difference.'))
print_once = False
# Plotting constant overall modules.
display(Markdown(f'###### {glabel[gain]} ######'))
gs = gridspec.GridSpec(2, 2)
fig = plt.figure(figsize=(24, 32))
axis = OrderedDict()
axis = {"ax0": {"cname": "{}" ,"gs": gs[0, :], "shrink": 0.9, "pad": 0.05, "label": "ADUs", "title": '{}'},
"ax1": {"cname": "d-{}","gs": gs[1, 0], "shrink": 0.6, "pad": 0.1, "label": "ADUs", "title": 'Difference with previous {}'},
"ax2": {"cname": "dpct-{}", "gs": gs[1, 1], "shrink": 0.6, "pad": 0.1, "label": "%", "title": 'Difference with previous {} %'}}
for ax, axv in axis.items():
# Add the min and max plot values for each axis.
vmin, vmax = get_range(Mod_data[axv["cname"].format(const_name)][gain_names[gain]], 2)
ax = fig.add_subplot(axv["gs"])
geom.plot_data_fast(Mod_data[axv["cname"].format(const_name)][gain_names[gain]],
vmin=vmin, vmax=vmax, ax=ax,
colorbar={'shrink': axv["shrink"],
'pad': axv["pad"]
}
)
colorbar = ax.images[0].colorbar
colorbar.set_label(axv["label"])
ax.set_title(axv["title"].format(f"{const_name} {glabel[gain]}"), fontsize=15)
ax.set_xlabel('Columns', fontsize=15)
ax.set_ylabel('Rows', fontsize=15)
plt.show()
```
%% Cell type:code id: tags:
``` python
# Loop over modules and constants
for const_name, const in constants.items():
display(Markdown(f'### Summary across Modules - {const_name}'))
for gain in range(gainstages):
if const_name == 'ThresholdsDark':
if gain == 2:
continue
glabel = threshold_names
else:
glabel = gain_names
if len(const.shape) == 5:
data = np.copy(const[:, :, :, :, gain])
else:
data = np.copy(const[:, :, :, :])
if const_name != 'BadPixelsDark':
if "BadPixelsDark" in constants.keys():
label = f'{const_name} value [ADU], good pixels only'
if len(const.shape) == 5:
data[constants['BadPixelsDark'][:, :, :, :, gain] > 0] = np.nan
else:
data[constants['BadPixelsDark'][:, :, :, :] > 0] = np.nan
else:
label = f'{const_name} value [ADU], good and bad pixels'
datamean = np.nanmean(data, axis=(1, 2))
fig = plt.figure(figsize=(15, 6), tight_layout={
'pad': 0.2, 'w_pad': 1.3, 'h_pad': 1.3})
ax = fig.add_subplot(121)
else:
label = 'Fraction of bad pixels'
data[data > 0] = 1.0
datamean = np.nanmean(data, axis=(1, 2))
datamean[datamean == 1.0] = np.nan
fig = plt.figure(figsize=(15, 6), tight_layout={
'pad': 0.2, 'w_pad': 1.3, 'h_pad': 1.3})
ax = fig.add_subplot(111)
d = []
for im, mod in enumerate(datamean):
d.append({'x': np.arange(mod.shape[0]),
'y': mod,
'drawstyle': 'steps-pre',
'label': mod_names[im],
})
_ = simplePlot(d, figsize=(10, 10), xrange=(-12, 510),
x_label='Memory Cell ID',
y_label=label,
use_axis=ax,
title='{}'.format(glabel[gain]),
title_position=[0.5, 1.18],
legend='outside-top-ncol6-frame', legend_size='18%',
legend_pad=0.00)
if const_name != 'BadPixelsDark':
ax = fig.add_subplot(122)
if "BadPixelsDark" in constants.keys():
label = f'$\sigma$ {const_name} [ADU], good pixels only'
else:
label = f'$\sigma$ {const_name} [ADU], good and bad pixels'
d = []
for im, mod in enumerate(np.nanstd(data, axis=(1, 2))):
d.append({'x': np.arange(mod.shape[0]),
'y': mod,
'drawstyle': 'steps-pre',
'label': mod_names[im],
})
_ = simplePlot(d, figsize=(10, 10), xrange=(-12, 510),
x_label='Memory Cell ID',
y_label=label,
use_axis=ax,
title='{} $\sigma$'.format(glabel[gain]),
title_position=[0.5, 1.18],
legend='outside-top-ncol6-frame', legend_size='18%',
legend_pad=0.00)
plt.show()
```
%% Cell type:markdown id: tags:
## Summary tables across Modules ##
Tables show values averaged across all pixels and memory cells of a given detector module.
%% Cell type:code id: tags:
``` python
if u'$' in tabulate.LATEX_ESCAPE_RULES:
del(tabulate.LATEX_ESCAPE_RULES[u'$'])
if u'\\' in tabulate.LATEX_ESCAPE_RULES:
del(tabulate.LATEX_ESCAPE_RULES[u'\\'])
```
%% Cell type:code id: tags:
``` python
head = ['Module', 'High gain', 'Medium gain', 'Low gain']
head_th = ['Module', 'HG_MG threshold', 'MG_LG threshold']
for const_name, const in constants.items():
table = []
for i_mod, mod in enumerate(mod_names):
t_line = [mod]
for gain in range(gainstages):
if const_name == 'ThresholdsDark':
if gain == 2:
continue
header = head_th
else:
header = head
if len(const.shape) == 5:
data = np.copy(const[i_mod, :, :, :, gain])
else:
data = np.copy(const[i_mod, :, :, :])
if const_name == 'BadPixelsDark':
data[data > 0] = 1.0
datasum = np.nansum(data)
datamean = np.nanmean(data)
if datamean == 1.0:
datamean = np.nan
datasum = np.nan
t_line.append(f'{datasum:6.0f} ({datamean:6.3f}) ')
label = '## Number(fraction) of bad pixels'
else:
if "BadPixelsDark" in constants.keys():
data[constants['BadPixelsDark']
[i_mod, :, :, :, gain] > 0] = np.nan
label = f'### Average {const_name} [ADU], good pixels only'
else:
label = f'### Average {const_name} [ADU], good and bad pixels'
t_line.append(f'{np.nanmean(data):6.1f} $\\pm$ {np.nanstd(data):6.1f}')
label = f'## Average {const_name} [ADU], good pixels only'
table.append(t_line)
display(Markdown(label))
md = display(Latex(tabulate.tabulate(
table, tablefmt='latex', headers=header)))
```
......
......@@ -51,7 +51,7 @@ install_requires = [
"dill==0.3.0",
"docutils==0.17.1",
"dynaconf==3.1.4",
"extra_data==1.8.0",
"extra_data==1.9.1",
"extra_geom==1.6.0",
"gitpython==3.1.0",
"h5py==3.5.0",
......@@ -78,6 +78,7 @@ install_requires = [
"pasha==0.1.0",
"prettytable==0.7.2",
"princess==0.5",
"psutil==5.9.0",
"pypandoc==1.4",
"python-dateutil==2.8.1",
"pyyaml==5.3",
......
This diff is collapsed.