Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • calibration/pycalibration
1 result
Show changes
Commits on Source (4)
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
# AGIPD Retrieving Constants Pre-correction # # AGIPD Retrieving Constants Pre-correction #
Author: European XFEL Detector Group, Version: 1.0 Author: European XFEL Detector Group, Version: 1.0
Retrieving Required Constants for Offline Calibration of the AGIPD Detector Retrieving Required Constants for Offline Calibration of the AGIPD Detector
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
in_folder = "/gpfs/exfel/exp/SPB/202030/p900119/raw" # the folder to read data from, required in_folder = "/gpfs/exfel/exp/SPB/202030/p900119/raw" # the folder to read data from, required
out_folder = "/gpfs/exfel/data/scratch/ahmedk/test/AGIPD_" # the folder to output to, required out_folder = "/gpfs/exfel/data/scratch/ahmedk/test/AGIPD_" # the folder to output to, required
sequences = [-1] # sequences to correct, set to -1 for all, range allowed sequences = [-1] # sequences to correct, set to -1 for all, range allowed
modules = [-1] # modules to correct, set to -1 for all, range allowed modules = [-1] # modules to correct, set to -1 for all, range allowed
run = 80 # runs to process, required run = 80 # runs to process, required
karabo_id = "SPB_DET_AGIPD1M-1" # karabo karabo_id karabo_id = "SPB_DET_AGIPD1M-1" # karabo karabo_id
karabo_da = ['-1'] # a list of data aggregators names, Default [-1] for selecting all data aggregators karabo_da = ['-1'] # a list of data aggregators names, Default [-1] for selecting all data aggregators
path_template = 'RAW-R{:04d}-{}-S{:05d}.h5' # the template to use to access data path_template = 'RAW-R{:04d}-{}-S{:05d}.h5' # the template to use to access data
h5path_ctrl = '/CONTROL/{}/MDL/FPGA_COMP_TEST' # path to control information ctrl_source_template = '{}/MDL/FPGA_COMP_TEST' # path to control information
instrument_source_template = '{}/DET/{}:xtdf' # path in the HDF5 file to images
receiver_template = "{}CH0" # inset for receiver devices
karabo_id_control = "SPB_IRU_AGIPD1M1" # karabo-id for control device karabo_id_control = "SPB_IRU_AGIPD1M1" # karabo-id for control device
karabo_da_control = 'AGIPD1MCTRL00' # karabo DA for control infromation
use_dir_creation_date = True # use the creation data of the input dir for database queries use_dir_creation_date = True # use the creation data of the input dir for database queries
cal_db_interface = "tcp://max-exfl016:8015#8045" # the database interface to use cal_db_interface = "tcp://max-exfl016:8015#8045" # the database interface to use
creation_date_offset = "00:00:00" # add an offset to creation date, e.g. to get different constants creation_date_offset = "00:00:00" # add an offset to creation date, e.g. to get different constants
slopes_ff_from_files = "" # Path to locally stored SlopesFF and BadPixelsFF constants slopes_ff_from_files = "" # Path to locally stored SlopesFF and BadPixelsFF constants
calfile = "" # path to calibration file. Leave empty if all data should come from DB calfile = "" # path to calibration file. Leave empty if all data should come from DB
nodb = False # if set only file-based constants will be used nodb = False # if set only file-based constants will be used
mem_cells = 0 # number of memory cells used, set to 0 to automatically infer mem_cells = 0 # number of memory cells used, set to 0 to automatically infer
bias_voltage = 300 bias_voltage = 0 # bias voltage, set to 0 to use stored value in slow data.
acq_rate = 0. # the detector acquisition rate, use 0 to try to auto-determine acq_rate = 0. # the detector acquisition rate, use 0 to try to auto-determine
gain_setting = 0.1 # the gain setting, use 0.1 to try to auto-determine gain_setting = -1 # the gain setting, use -1 to use value stored in slow data.
gain_mode = -1 # gain mode (0: adaptive, 1-3 fixed high/med/low, -1: read from CONTROL data) gain_mode = -1 # gain mode (0: adaptive, 1-3 fixed high/med/low, -1: read from CONTROL data)
photon_energy = 9.2 # photon energy in keV photon_energy = 9.2 # photon energy in keV
max_cells_db_dark = 0 # set to a value different than 0 to use this value for dark data DB queries
max_cells_db = 0 # set to a value different than 0 to use this value for DB queries
integration_time = -1 # integration time, negative values for auto-detection. integration_time = -1 # integration time, negative values for auto-detection.
# Correction Booleans # Correction Booleans
only_offset = False # Apply only Offset correction. if False, Offset is applied by Default. if True, Offset is only applied. only_offset = False # Apply only Offset correction. if False, Offset is applied by Default. if True, Offset is only applied.
rel_gain = False # do relative gain correction based on PC data rel_gain = False # do relative gain correction based on PC data
xray_gain = True # do relative gain correction based on xray data xray_gain = True # do relative gain correction based on xray data
blc_noise = False # if set, baseline correction via noise peak location is attempted blc_noise = False # if set, baseline correction via noise peak location is attempted
blc_stripes = False # if set, baseline corrected via stripes blc_stripes = False # if set, baseline corrected via stripes
blc_hmatch = False # if set, base line correction via histogram matching is attempted blc_hmatch = False # if set, base line correction via histogram matching is attempted
match_asics = False # if set, inner ASIC borders are matched to the same signal level match_asics = False # if set, inner ASIC borders are matched to the same signal level
adjust_mg_baseline = False # adjust medium gain baseline to match highest high gain value adjust_mg_baseline = False # adjust medium gain baseline to match highest high gain value
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Fill dictionaries comprising bools and arguments for correction and data analysis # Fill dictionaries comprising bools and arguments for correction and data analysis
# Here the hierarichy and dependencies for correction booleans are defined # Here the hierarichy and dependencies for correction booleans are defined
corr_bools = {} corr_bools = {}
# offset is at the bottom of AGIPD correction pyramid. # offset is at the bottom of AGIPD correction pyramid.
corr_bools["only_offset"] = only_offset corr_bools["only_offset"] = only_offset
# Dont apply any corrections if only_offset is requested # Dont apply any corrections if only_offset is requested
if not only_offset: if not only_offset:
corr_bools["adjust_mg_baseline"] = adjust_mg_baseline corr_bools["adjust_mg_baseline"] = adjust_mg_baseline
corr_bools["rel_gain"] = rel_gain corr_bools["rel_gain"] = rel_gain
corr_bools["xray_corr"] = xray_gain corr_bools["xray_corr"] = xray_gain
corr_bools["blc_noise"] = blc_noise corr_bools["blc_noise"] = blc_noise
corr_bools["blc_hmatch"] = blc_hmatch corr_bools["blc_hmatch"] = blc_hmatch
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
from pathlib import Path
from typing import List, Tuple from typing import List, Tuple
import matplotlib import matplotlib
import matplotlib.pyplot as plt
import multiprocessing
import numpy as np import numpy as np
from datetime import timedelta
from dateutil import parser
from extra_data import RunDirectory
matplotlib.use("agg") matplotlib.use("agg")
import multiprocessing
from datetime import timedelta
from pathlib import Path
import matplotlib.pyplot as plt
from cal_tools import agipdlib, tools from cal_tools import agipdlib, tools
from cal_tools.enums import AgipdGainMode from cal_tools.enums import AgipdGainMode
from dateutil import parser
from iCalibrationDB import Conditions, Constants, Detectors from iCalibrationDB import Conditions, Constants, Detectors
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# slopes_ff_from_files left as str for now # slopes_ff_from_files left as str for now
in_folder = Path(in_folder) in_folder = Path(in_folder)
out_folder = Path(out_folder) out_folder = Path(out_folder)
metadata = tools.CalibrationMetadata(out_folder) metadata = tools.CalibrationMetadata(out_folder)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
max_cells = mem_cells
creation_time = None creation_time = None
if use_dir_creation_date: if use_dir_creation_date:
creation_time = tools.get_dir_creation_date(str(in_folder), run) creation_time = tools.get_dir_creation_date(str(in_folder), run)
offset = parser.parse(creation_date_offset) offset = parser.parse(creation_date_offset)
delta = timedelta(hours=offset.hour, minutes=offset.minute, seconds=offset.second) delta = timedelta(hours=offset.hour, minutes=offset.minute, seconds=offset.second)
creation_time += delta creation_time += delta
print(f"Using {creation_time} as creation time") print(f"Using {creation_time} as creation time")
if sequences[0] == -1: if sequences[0] == -1:
sequences = None sequences = None
print(f"Outputting to {out_folder}") print(f"Outputting to {out_folder}")
out_folder.mkdir(parents=True, exist_ok=True) out_folder.mkdir(parents=True, exist_ok=True)
melt_snow = False if corr_bools["only_offset"] else agipdlib.SnowResolution.NONE melt_snow = False if corr_bools["only_offset"] else agipdlib.SnowResolution.NONE
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
control_fn = in_folder / f'r{run:04d}' / f'RAW-R{run:04d}-{karabo_da_control}-S00000.h5' ctrl_src = ctrl_source_template.format(karabo_id_control)
h5path_ctrl = h5path_ctrl.format(karabo_id_control)
slow_paths = (control_fn, karabo_id_control)
if gain_setting == 0.1:
if creation_time.replace(tzinfo=None) < parser.parse('2020-01-31'):
print("Set gain-setting to None for runs taken before 2020-01-31")
gain_setting = None
else:
try:
gain_setting = agipdlib.get_gain_setting(str(control_fn), h5path_ctrl)
except Exception as e:
print(f'ERROR: while reading gain setting from: \n{control_fn}')
print(e)
print("Set gain setting to 0")
gain_setting = 0
# Evaluate gain mode (operation mode)
if gain_mode < 0:
gain_mode = agipdlib.get_gain_mode(control_fn, h5path_ctrl)
else:
gain_mode = AgipdGainMode(gain_mode)
# Evaluate integration time
if integration_time < 0:
integration_time = agipdlib.get_integration_time(control_fn, h5path_ctrl)
print(f"Gain setting: {gain_setting}")
print(f"Gain mode: {gain_mode.name}")
print(f"Detector in use is {karabo_id}") print(f"Detector in use is {karabo_id}")
# Extracting Instrument string # Extracting Instrument string
instrument = karabo_id.split("_")[0] instrument = karabo_id.split("_")[0]
# Evaluate detector instance for mapping # Evaluate detector instance for mapping
if instrument == "SPB": if instrument == "SPB":
dinstance = "AGIPD1M1"
nmods = 16 nmods = 16
elif instrument == "MID": elif instrument == "MID":
dinstance = "AGIPD1M2"
nmods = 16 nmods = 16
elif instrument == "HED": elif instrument == "HED":
dinstance = "AGIPD500K"
nmods = 8 nmods = 8
print(f"Instrument {instrument}") print(f"Instrument {instrument}")
print(f"Detector instance {dinstance}")
if karabo_da[0] == '-1': if karabo_da[0] == '-1':
if modules[0] == -1: if modules[0] == -1:
modules = list(range(nmods)) modules = list(range(nmods))
karabo_da = ["AGIPD{:02d}".format(i) for i in modules] karabo_da = ["AGIPD{:02d}".format(i) for i in modules]
else: else:
modules = [int(x[-2:]) for x in karabo_da] modules = [int(x[-2:]) for x in karabo_da]
``` ```
%% Cell type:code id: tags:
``` python
run_dc = RunDirectory(in_folder / f"r{run:04d}")
# set everything up filewise
mapped_files, _, _, _, _ = tools.map_modules_from_folder(
str(in_folder), run, path_template, karabo_da, sequences=[0]
)
```
%% Cell type:code id: tags:
``` python
# Read AGIPD conditions from the 1st sequence of 1st module and slow data.
instrument_src = instrument_source_template.format(karabo_id, receiver_template)
instrument_src_mod = instrument_src.format(0)
agipd_cond = agipdlib.AgipdCtrl(
run_dc=run_dc,
image_src=None, # Not need, as we wont read mem_cells or acq_rate.
ctrl_src=ctrl_src,
)
if gain_setting == -1:
gain_setting = agipd_cond.get_gain_setting(creation_time)
if bias_voltage == 0.:
bias_voltage = agipd_cond.get_bias_voltage(karabo_id_control)
if integration_time == -1:
integration_time = agipd_cond.get_integration_time()
if gain_mode == -1:
gain_mode = agipd_cond.get_gain_mode()
else:
gain_mode = AgipdGainMode(gain_mode)
```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Retrieve Constants ## ## Retrieve Constants ##
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
def retrieve_constants( def retrieve_constants(
qm_files: List[Path], qm: str, karabo_da: str, idx: int karabo_da: str, idx: int
) -> Tuple[str, str, float, float, str, dict]: ) -> Tuple[str, str, float, float, str, dict]:
""" """
Retrieve constants for a module. Retrieve constants for a module.
:return: :return:
qm: module virtual name i.e. Q1M1.
karabo_da: karabo data aggregator. karabo_da: karabo data aggregator.
acq_rate: acquisition rate parameter. acq_rate: acquisition rate parameter.
max_cells: number of memory cells. mem_cells: number of memory cells.
mdata_dict: (DICT) dictionary with the metadata for the retrieved constants. mdata_dict: (DICT) dictionary with the metadata for the retrieved constants.
""" """
if max_cells != 0: if mem_cells == 0:
# either use overriding notebook parameter # either or look around in sequence files
local_max_cells = max_cells agipd_cond.image_src = instrument_src.format(idx)
local_mem_cells = agipd_cond.get_num_cells()
else: else:
# or look around in sequence files # or use overriding notebook parameter
for f in qm_files: local_mem_cells = mem_cells
local_max_cells = agipdlib.get_num_cells(f, karabo_id, idx)
if local_max_cells is not None:
break
# maybe we never found this in a sequence file... # maybe we never found this in a sequence file...
if local_max_cells is None: if local_mem_cells is None:
raise ValueError(f"No raw images found for {qm} for all sequences") raise ValueError(
"No raw images found for "
f"{tools.module_index_to_qm(module_index)}({karabo_da}) for all sequences")
if acq_rate == 0: if acq_rate == 0.:
local_acq_rate = agipdlib.get_acq_rate( local_acq_rate = agipd_cond.get_acq_rate()
fast_paths=(f, karabo_id, idx), slow_paths=slow_paths)
else: else:
local_acq_rate = acq_rate local_acq_rate = acq_rate
# avoid retrieving constant, if requested. # avoid retrieving constant, if requested.
if nodb_with_dark: if nodb_with_dark:
return return
const_dict = agipdlib.assemble_constant_dict( const_dict = agipdlib.assemble_constant_dict(
corr_bools, corr_bools,
pc_bools, pc_bools,
local_max_cells, local_mem_cells,
bias_voltage, bias_voltage,
gain_setting, gain_setting,
local_acq_rate, local_acq_rate,
photon_energy, photon_energy,
gain_mode=gain_mode, gain_mode=gain_mode,
beam_energy=None, beam_energy=None,
only_dark=only_dark, only_dark=only_dark,
integration_time=integration_time integration_time=integration_time
) )
# Retrieve multiple constants through an input dictionary # Retrieve multiple constants through an input dictionary
# to return a dict of useful metadata. # to return a dict of useful metadata.
mdata_dict = dict() mdata_dict = dict()
mdata_dict["constants"] = dict() mdata_dict["constants"] = dict()
mdata_dict["physical-detector-unit"] = None # initialization mdata_dict["physical-detector-unit"] = None # initialization
for const_name, (const_init_fun, const_shape, (cond_type, cond_param)) in const_dict.items(): for const_name, (const_init_fun, const_shape, (cond_type, cond_param)) in const_dict.items(): # noqa
if gain_mode and const_name in ("ThresholdsDark",): if gain_mode and const_name in ("ThresholdsDark",):
continue continue
# saving metadata in a dict # saving metadata in a dict
const_mdata = dict() const_mdata = dict()
mdata_dict["constants"][const_name] = const_mdata mdata_dict["constants"][const_name] = const_mdata
if slopes_ff_from_files and const_name in ["SlopesFF", "BadPixelsFF"]: if slopes_ff_from_files and const_name in ["SlopesFF", "BadPixelsFF"]:
const_mdata["file-path"] = f"{slopes_ff_from_files}/slopesff_bpmask_module_{qm}.h5" const_mdata["file-path"] = (
f"{slopes_ff_from_files}/slopesff_bpmask_module_{tools.module_index_to_qm(module_index)}.h5") # noqa
const_mdata["creation-time"] = "00:00:00" const_mdata["creation-time"] = "00:00:00"
continue continue
if gain_mode and const_name in ("BadPixelsPC", "SlopesPC", "BadPixelsFF", "SlopesFF"): if gain_mode and const_name in (
"BadPixelsPC", "SlopesPC", "BadPixelsFF", "SlopesFF"
):
param_copy = cond_param.copy() param_copy = cond_param.copy()
del param_copy["gain_mode"] del param_copy["gain_mode"]
condition = getattr(Conditions, cond_type).AGIPD(**param_copy) condition = getattr(Conditions, cond_type).AGIPD(**param_copy)
else: else:
condition = getattr(Conditions, cond_type).AGIPD(**cond_param) condition = getattr(Conditions, cond_type).AGIPD(**cond_param)
_, mdata = tools.get_from_db( _, mdata = tools.get_from_db(
karabo_id, karabo_id,
karabo_da, karabo_da,
getattr(Constants.AGIPD, const_name)(), getattr(Constants.AGIPD, const_name)(),
condition, condition,
getattr(np, const_init_fun)(const_shape), getattr(np, const_init_fun)(const_shape),
cal_db_interface, cal_db_interface,
creation_time, creation_time,
meta_only=True, meta_only=True,
verbosity=0, verbosity=0,
) )
mdata_const = mdata.calibration_constant_version mdata_const = mdata.calibration_constant_version
# check if constant was sucessfully retrieved. # check if constant was sucessfully retrieved.
if mdata.comm_db_success: if mdata.comm_db_success:
const_mdata["file-path"] = ( const_mdata["file-path"] = (
f"{mdata_const.hdf5path}" f"{mdata_const.filename}" f"{mdata_const.hdf5path}" f"{mdata_const.filename}"
) )
const_mdata["creation-time"] = f"{mdata_const.begin_at}" const_mdata["creation-time"] = f"{mdata_const.begin_at}"
mdata_dict["physical-detector-unit"] = mdata_const.device_name mdata_dict["physical-detector-unit"] = mdata_const.device_name
else: else:
const_mdata["file-path"] = const_dict[const_name][:2] const_mdata["file-path"] = const_dict[const_name][:2]
const_mdata["creation-time"] = None const_mdata["creation-time"] = None
return qm, mdata_dict, karabo_da, local_acq_rate, local_max_cells return mdata_dict, karabo_da, local_acq_rate, local_mem_cells
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Constant paths & timestamps are saved under retrieved-constants in calibration_metadata.yml # Constant paths & timestamps are saved under retrieved-constants in calibration_metadata.yml
retrieved_constants = metadata.setdefault("retrieved-constants", {}) retrieved_constants = metadata.setdefault("retrieved-constants", {})
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# set everything up filewise
mapped_files, _, _, _, _ = tools.map_modules_from_folder(
str(in_folder), run, path_template, karabo_da, sequences
)
pc_bools = [corr_bools.get("rel_gain"), pc_bools = [corr_bools.get("rel_gain"),
corr_bools.get("adjust_mg_baseline"), corr_bools.get("adjust_mg_baseline"),
corr_bools.get('blc_noise'), corr_bools.get('blc_noise'),
corr_bools.get('blc_hmatch'), corr_bools.get('blc_hmatch'),
corr_bools.get('blc_stripes'), corr_bools.get('blc_stripes'),
melt_snow] melt_snow]
inp = [] inp = []
only_dark = False only_dark = False
nodb_with_dark = False nodb_with_dark = False
if not nodb: if not nodb:
only_dark = (calfile != "") only_dark = (calfile != "")
if calfile != "" and not corr_bools["only_offset"]: if calfile != "" and not corr_bools["only_offset"]:
nodb_with_dark = nodb nodb_with_dark = nodb
da_to_qm = dict() da_to_qm = dict()
for module_index, k_da in zip(modules, karabo_da): for module_index, k_da in zip(modules, karabo_da):
qm = tools.module_index_to_qm(module_index) da_to_qm[k_da] = tools.module_index_to_qm(module_index)
da_to_qm[k_da] = qm
if k_da in retrieved_constants: if k_da in retrieved_constants:
print(f"Constant for {k_da} already in calibration_metadata.yml, won't query again.") print(
f"Constant for {k_da} already in calibration_metadata.yml, won't query again.")
continue continue
if qm in mapped_files and not mapped_files[qm].empty(): inp.append((k_da, module_index))
# TODO: make map_modules_from_folder just return list(s)
qm_files = [Path(mapped_files[qm].get()) for _ in range(mapped_files[qm].qsize())]
else:
continue
inp.append((qm_files, qm, k_da, module_index))
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
with multiprocessing.Pool(processes=nmods) as pool: with multiprocessing.Pool(processes=nmods) as pool:
results = pool.starmap(retrieve_constants, inp) results = pool.starmap(retrieve_constants, inp)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
for qm, md_dict, karabo_da, acq_rate, max_cells in results: acq_rate_mods = []
mem_cells_mods = []
for md_dict, karabo_da, acq_rate, mem_cells in results:
retrieved_constants[karabo_da] = md_dict retrieved_constants[karabo_da] = md_dict
mem_cells_mods.append(mem_cells)
acq_rate_mods.append(acq_rate)
# Validate that mem_cells and acq_rate are the same for all modules.
# TODO: Should a warning be enough?
if len(set(mem_cells_mods)) != 1 or len(set(acq_rate_mods)) != 1:
print(
"WARNING: Number of memory cells or "
"acquisition rate are not identical for all modules.\n"
f"mem_cells: {mem_cells_mods}.\nacq_rate: {acq_rate_mods}.")
# check if it is requested not to retrieve any constants from the database # check if it is requested not to retrieve any constants from the database
if nodb_with_dark: if nodb_with_dark:
print("No constants were retrieved as calibrated files will be used.") print("No constants were retrieved as calibrated files will be used.")
else: else:
print("\nRetrieved constants for modules:", print("\nRetrieved constants for modules:",
', '.join([tools.module_index_to_qm(x) for x in modules])) ', '.join([tools.module_index_to_qm(x) for x in modules]))
print(f"Operating conditions are:") print(f"Operating conditions are:")
print(f"• Bias voltage: {bias_voltage}") print(f"• Bias voltage: {bias_voltage}")
print(f"• Memory cells: {max_cells}") print(f"• Memory cells: {mem_cells}")
print(f"• Acquisition rate: {acq_rate}") print(f"• Acquisition rate: {acq_rate}")
print(f"• Gain mode: {gain_mode.name}") print(f"• Gain mode: {gain_mode.name}")
print(f"• Gain setting: {gain_setting}") print(f"• Gain setting: {gain_setting}")
print(f"• Integration time: {integration_time}") print(f"• Integration time: {integration_time}")
print(f"• Photon Energy: {photon_energy}") print(f"• Photon Energy: {photon_energy}")
print("Constant metadata is saved under \"retrieved-constants\" in calibration_metadata.yml\n") print("Constant metadata is saved under \"retrieved-constants\" in calibration_metadata.yml\n")
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
print("Using constants with creation times:") print("Using constants with creation times:")
timestamps = {} timestamps = {}
for k_da, module_name in da_to_qm.items(): for k_da, module_name in da_to_qm.items():
module_timestamps = timestamps[module_name] = {} module_timestamps = timestamps[module_name] = {}
module_constants = retrieved_constants[k_da] module_constants = retrieved_constants[k_da]
print(f"{module_name}:") print(f"{module_name}:")
for cname, mdata in module_constants["constants"].items(): for cname, mdata in module_constants["constants"].items():
if hasattr(mdata["creation-time"], 'strftime'): if hasattr(mdata["creation-time"], 'strftime'):
mdata["creation-time"] = mdata["creation-time"].strftime('%y-%m-%d %H:%M') mdata["creation-time"] = mdata["creation-time"].strftime('%y-%m-%d %H:%M')
print(f'{cname:.<12s}', mdata["creation-time"]) print(f'{cname:.<12s}', mdata["creation-time"])
for cname in ['Offset', 'SlopesPC', 'SlopesFF']: for cname in ['Offset', 'SlopesPC', 'SlopesFF']:
if cname in module_constants["constants"]: if cname in module_constants["constants"]:
module_timestamps[cname] = module_constants["constants"][cname]["creation-time"] module_timestamps[cname] = module_constants["constants"][cname]["creation-time"]
else: else:
module_timestamps[cname] = "NA" module_timestamps[cname] = "NA"
time_summary = retrieved_constants.setdefault("time-summary", {}) time_summary = retrieved_constants.setdefault("time-summary", {})
time_summary["SAll"] = timestamps time_summary["SAll"] = timestamps
metadata.save() metadata.save()
``` ```
......
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
# Gain Characterization Summary # # Gain Characterization Summary #
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
in_folder = "" # in this notebook, in_folder is not used as the data source is in the destination folder in_folder = "" # in this notebook, in_folder is not used as the data source is in the destination folder
out_folder = "" # the folder to output to, required out_folder = "" # the folder to output to, required
hist_file_template = "hists_m{:02d}_sum.h5" hist_file_template = "hists_m{:02d}_sum.h5"
proc_folder = "" # Path to corrected image data used to create histograms and validation plots proc_folder = "" # Path to corrected image data used to create histograms and validation plots
raw_folder = "/gpfs/exfel/exp/MID/202030/p900137/raw" # folder of raw data. This is used to save information of source data of generated constants, required raw_folder = "/gpfs/exfel/exp/MID/202030/p900137/raw" # folder of raw data. This is used to save information of source data of generated constants, required
run = 449 # runs of image data used to create histograms run = 449 # runs of image data used to create histograms
karabo_id = "MID_DET_AGIPD1M-1" # karabo karabo_id karabo_id = "MID_DET_AGIPD1M-1" # karabo karabo_id
receiver_id = "{}CH0" # inset for receiver devices ctrl_source_template = '{}/MDL/FPGA_COMP' # path to control information
path_template = 'RAW-R{:04d}-{}-S{:05d}.h5' # the template to use to access data karabo_id_control = "MID_EXP_AGIPD1M1" # karabo-id for control device
h5path = 'INSTRUMENT/{}/DET/{}:xtdf/' # path in the HDF5 file to images
h5path_idx = 'INDEX/{}/DET/{}:xtdf/' # path in the HDF5 file to images
h5path_ctrl = '/CONTROL/{}/MDL/FPGA_COMP' # path to control information
karabo_id_control = "MID_IRU_AGIPD1M1" # karabo-id for control device
karabo_da_control = 'AGIPD1MCTRL00' # karabo DA for control infromation
use_dir_creation_date = True # use the creation data of the input dir for database queries use_dir_creation_date = True # use the creation data of the input dir for database queries
cal_db_interface = "tcp://max-exfl016:8015#8045" # the database interface to use cal_db_interface = "tcp://max-exfl016:8015#8045" # the database interface to use
cal_db_timeout = 30000 # in milli seconds cal_db_timeout = 30000 # in milli seconds
local_output = True # output constants locally local_output = True # output constants locally
db_output = False # output constants to database db_output = False # output constants to database
# Fit parameters # Fit parameters
peak_range = [-30,30,35,65,80,130,145,200] # where to look for the peaks, [a0, b0, a1, b1, ...] exactly 8 elements peak_range = [-30,30,35,65,80,130,145,200] # where to look for the peaks, [a0, b0, a1, b1, ...] exactly 8 elements
peak_width_range = [0, 30, 0, 35, 0, 40, 0, 45] # fit limits on the peak widths, [a0, b0, a1, b1, ...] exactly 8 elements peak_width_range = [0, 30, 0, 35, 0, 40, 0, 45] # fit limits on the peak widths, [a0, b0, a1, b1, ...] exactly 8 elements
# Bad-pixel thresholds # Bad-pixel thresholds
d0_lim = [10, 70] # hard limits for d0 value (distance between noise and first peak) d0_lim = [10, 70] # hard limits for d0 value (distance between noise and first peak)
peak_width_lim = [0.97, 1.43, 1.03, 1.57] # hard limits on the peak widths, [a0, b0, a1, b1, ...] in units of the noise peak. 4 parameters. peak_width_lim = [0.97, 1.43, 1.03, 1.57] # hard limits on the peak widths, [a0, b0, a1, b1, ...] in units of the noise peak. 4 parameters.
chi2_lim = [0,3.0] # Hard limit on chi2/nDOF value chi2_lim = [0,3.0] # Hard limit on chi2/nDOF value
gain_lim = [0.80, 1.2] # Threshold on gain in relative number. Contribute to BadPixel bit "Gain_deviation" gain_lim = [0.80, 1.2] # Threshold on gain in relative number. Contribute to BadPixel bit "Gain_deviation"
cell_range = [1,5] # range of cell to be considered, [0,0] for all cell_range = [1,5] # range of cell to be considered, [0,0] for all
pixel_range = [0,0,512,128] # range of pixels x1,y1,x2,y2 to consider [0,0,512,128] for all pixel_range = [0,0,512,128] # range of pixels x1,y1,x2,y2 to consider [0,0,512,128] for all
max_bins = 250 # Maximum number of bins to consider max_bins = 250 # Maximum number of bins to consider
batch_size = [1,8,8] # batch size: [cell,x,y] batch_size = [1,8,8] # batch size: [cell,x,y]
n_peaks_fit = 4 # Number of gaussian peaks to fit including noise peak n_peaks_fit = 4 # Number of gaussian peaks to fit including noise peak
fix_peaks = True # Fix distance between photon peaks fix_peaks = True # Fix distance between photon peaks
# Detector conditions # Detector conditions
max_cells = 0 # number of memory cells used, set to 0 to automatically infer max_cells = 0 # number of memory cells used, set to 0 to automatically infer
bias_voltage = 300 # Bias voltage bias_voltage = 0. # Bias voltage
acq_rate = 0. # the detector acquisition rate, use 0 to try to auto-determine acq_rate = 0. # the detector acquisition rate, use 0 to try to auto-determine
gain_setting = 0.1 # the gain setting, use 0.1 to try to auto-determine gain_setting = -1 # the gain setting, use 0.1 to try to auto-determine
photon_energy = 8.05 # photon energy in keV photon_energy = 8.05 # photon energy in keV
integration_time = -1 # integration time, negative values for auto-detection. integration_time = -1 # integration time, negative values for auto-detection.
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
import glob import glob
import os import os
import re import re
import traceback import traceback
import warnings import warnings
from multiprocessing import Pool from multiprocessing import Pool
import h5py import h5py
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import numpy as np import numpy as np
import tabulate import tabulate
from cal_tools.agipdlib import ( from cal_tools.agipdlib import AgipdCtrl
get_acq_rate,
get_bias_voltage,
get_gain_setting,
get_integration_time,
get_num_cells,
)
from cal_tools.agipdutils_ff import ( from cal_tools.agipdutils_ff import (
BadPixelsFF, BadPixelsFF,
any_in, any_in,
fit_n_peaks, fit_n_peaks,
gaussian_sum, gaussian_sum,
get_starting_parameters, get_starting_parameters,
) )
from cal_tools.ana_tools import get_range, save_dict_to_hdf5 from cal_tools.ana_tools import get_range, save_dict_to_hdf5
from cal_tools.enums import BadPixels from cal_tools.enums import BadPixels
from cal_tools.tools import ( from cal_tools.tools import (
get_dir_creation_date, get_dir_creation_date,
get_pdu_from_db, get_pdu_from_db,
get_report, get_report,
module_index_to_qm, module_index_to_qm,
send_to_db send_to_db
) )
from dateutil import parser from dateutil import parser
from extra_data import RunDirectory, stack_detector_data from extra_data import H5File, RunDirectory, stack_detector_data
from extra_geom import AGIPD_1MGeometry, AGIPD_500K2GGeometry from extra_geom import AGIPD_1MGeometry, AGIPD_500K2GGeometry
from iCalibrationDB import Conditions, Constants, Detectors from iCalibrationDB import Conditions, Constants, Detectors
from iminuit import Minuit from iminuit import Minuit
from IPython.display import HTML, Latex, Markdown, display from IPython.display import HTML, Latex, Markdown, display
from XFELDetAna.plotting.heatmap import heatmapPlot from XFELDetAna.plotting.heatmap import heatmapPlot
from XFELDetAna.plotting.simpleplot import simplePlot from XFELDetAna.plotting.simpleplot import simplePlot
%matplotlib inline %matplotlib inline
warnings.filterwarnings('ignore') warnings.filterwarnings('ignore')
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
peak_range = np.reshape(peak_range,(4,2)) peak_range = np.reshape(peak_range,(4,2))
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Get operation conditions # Get operation conditions
filename = glob.glob(f"{raw_folder}/r{run:04d}/*-AGIPD[0-1][0-9]-*")[0] ctrl_source = ctrl_source_template.format(karabo_id_control)
channel = int(re.findall(r".*-AGIPD([0-9]+)-.*", filename)[0])
control_fname = f'{raw_folder}/r{run:04d}/RAW-R{run:04d}-{karabo_da_control}-S00000.h5'
h5path_ctrl = h5path_ctrl.format(karabo_id_control)
# Evaluate number of memory cells
mem_cells = get_num_cells(filename, karabo_id, channel)
if mem_cells is None:
raise ValueError(f"No raw images found in {filename}")
# Evaluate aquisition rate raw_dc = RunDirectory(f'{raw_folder}/r{run:04d}/')
fast_paths = (filename, karabo_id, channel)
slow_paths = (control_fname, karabo_id_control)
if acq_rate == 0.:
acq_rate = get_acq_rate(fast_paths,slow_paths)
# Read operating conditions from AGIPD00 files
instrument_src_mod = [
s for s in list(raw_dc.all_sources) if "0CH" in s][0]
ctrl_src = [
s for s in list(raw_dc.all_sources) if ctrl_source in s][0]
# Evaluate creation time # Evaluate creation time
creation_time = None creation_time = None
if use_dir_creation_date: if use_dir_creation_date:
creation_time = get_dir_creation_date(raw_folder, run) creation_time = get_dir_creation_date(raw_folder, run)
# Evaluate gain setting agipd_cond = AgipdCtrl(
if gain_setting == 0.1: run_dc=raw_dc,
if creation_time.replace(tzinfo=None) < parser.parse('2020-01-31'): image_src=instrument_src_mod,
print("Set gain-setting to None for runs taken before 2020-01-31") ctrl_src=ctrl_src,
gain_setting = None raise_error=False, # to be able to process very old data without mosetting value
else: )
try:
gain_setting = get_gain_setting(control_fname, h5path_ctrl) mem_cells = agipd_cond.get_num_cells()
except Exception as e: if mem_cells is None:
print(f'Error while reading gain setting from: \n{control_fname}') raise ValueError(f"No raw images found in {raw_dc[instrument_src_mod].files}")
print(e) if acq_rate == 0.:
print("Set gain settion to 0") acq_rate = agipd_cond.get_acq_rate()
gain_setting = 0 if gain_setting == -1:
gain_setting = agipd_cond.get_gain_setting(creation_time)
# Evaluate integration time if bias_voltage == 0.:
if integration_time < 0: bias_voltage = agipd_cond.get_bias_voltage(karabo_id_control)
integration_time = get_integration_time(control_fname, h5path_ctrl) if integration_time == -1:
integration_time = agipd_cond.get_integration_time()
# Evaluate detector instance for mapping # Evaluate detector instance for mapping
instrument = karabo_id.split("_")[0] instrument = karabo_id.split("_")[0]
if instrument == "HED": if instrument == "HED":
nmods = 8 nmods = 8
else: else:
nmods = 16 nmods = 16
print(f"Using {creation_time} as creation time") print(f"Using {creation_time} as creation time")
print(f"Operating conditions are:\n• Bias voltage: {bias_voltage}\n• Memory cells: {mem_cells}\n" print(f"Operating conditions are:\n• Bias voltage: {bias_voltage}\n• Memory cells: {mem_cells}\n"
f"• Acquisition rate: {acq_rate}\n• Gain setting: {gain_setting}\n• Integration time: {integration_time}\n" f"• Acquisition rate: {acq_rate}\n• Gain setting: {gain_setting}\n• Integration time: {integration_time}\n"
f"• Photon Energy: {photon_energy}\n") f"• Photon Energy: {photon_energy}\n")
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Load constants for all modules # Load constants for all modules
keys = ['g0mean', 'g1mean', 'gain', 'chi2_ndof', 'mask'] keys = ['g0mean', 'g1mean', 'gain', 'chi2_ndof', 'mask']
all_keys = set(keys) all_keys = set(keys)
for i in range(n_peaks_fit) : for i in range(n_peaks_fit) :
all_keys.add(f'g{i}mean') all_keys.add(f'g{i}mean')
all_keys.add(f'g{i}sigma') all_keys.add(f'g{i}sigma')
fit_data = {} fit_data = {}
labels = {'g0mean': 'Noise peak position [ADU]', labels = {'g0mean': 'Noise peak position [ADU]',
'g1mean': 'First photon peak [ADU]', 'g1mean': 'First photon peak [ADU]',
'gain': f"Gain [ADU/keV], $\gamma$={photon_energy} [keV]", 'gain': f"Gain [ADU/keV], $\gamma$={photon_energy} [keV]",
'chi2_ndof': '$\chi^2$/nDOF', 'chi2_ndof': '$\chi^2$/nDOF',
'mask': 'Fraction of bad pixels over cells' } 'mask': 'Fraction of bad pixels over cells' }
modules = [] modules = []
karabo_da = [] karabo_da = []
for mod in range(nmods): for mod in range(nmods):
qm = module_index_to_qm(mod) qm = module_index_to_qm(mod)
fit_data[mod] = {} fit_data[mod] = {}
try: try:
hf = h5py.File(f'{out_folder}/fits_m{mod:02d}.h5', 'r') hf = h5py.File(f'{out_folder}/fits_m{mod:02d}.h5', 'r')
shape = hf['data/g0mean'].shape shape = hf['data/g0mean'].shape
for key in keys: for key in keys:
fit_data[mod][key] = hf[f'data/{key}'][()] fit_data[mod][key] = hf[f'data/{key}'][()]
print(f"{in_folder}/{hist_file_template.format(mod)}") print(f"{in_folder}/{hist_file_template.format(mod)}")
modules.append(mod) modules.append(mod)
karabo_da.append(f"AGIPD{mod:02d}") karabo_da.append(f"AGIPD{mod:02d}")
except Exception as e: except Exception as e:
err = f"Error: {e}\nError traceback: {traceback.format_exc()}" err = f"Error: {e}\nError traceback: {traceback.format_exc()}"
print(f"No fit data available for module {qm}") print(f"No fit data available for module {qm}")
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Calculate SlopesFF and BadPixels to be send to DB # Calculate SlopesFF and BadPixels to be send to DB
bpmask = {} bpmask = {}
slopesFF = {} slopesFF = {}
for mod in modules: for mod in modules:
bpmask[mod] = np.zeros(fit_data[mod]['mask'].shape).astype(np.int32) bpmask[mod] = np.zeros(fit_data[mod]['mask'].shape).astype(np.int32)
bpmask[mod][ any_in(fit_data[mod]['mask'], BadPixelsFF.NO_ENTRY.value) ] = BadPixels.FF_NO_ENTRIES.value bpmask[mod][ any_in(fit_data[mod]['mask'], BadPixelsFF.NO_ENTRY.value) ] = BadPixels.FF_NO_ENTRIES.value
bpmask[mod][ any_in(fit_data[mod]['mask'], bpmask[mod][ any_in(fit_data[mod]['mask'],
BadPixelsFF.GAIN_DEVIATION.value) ] |= BadPixels.FF_GAIN_DEVIATION.value BadPixelsFF.GAIN_DEVIATION.value) ] |= BadPixels.FF_GAIN_DEVIATION.value
bpmask[mod][ any_in(fit_data[mod]['mask'], bpmask[mod][ any_in(fit_data[mod]['mask'],
BadPixelsFF.FIT_FAILED.value | BadPixelsFF.ACCURATE_COVAR.value | BadPixelsFF.FIT_FAILED.value | BadPixelsFF.ACCURATE_COVAR.value |
BadPixelsFF.CHI2_THRESHOLD.value | BadPixelsFF.GAIN_THRESHOLD.value | BadPixelsFF.CHI2_THRESHOLD.value | BadPixelsFF.GAIN_THRESHOLD.value |
BadPixelsFF.NOISE_PEAK_THRESHOLD.value | BadPixelsFF.PEAK_WIDTH_THRESHOLD.value) ] |= BadPixels.FF_GAIN_EVAL_ERROR.value BadPixelsFF.NOISE_PEAK_THRESHOLD.value | BadPixelsFF.PEAK_WIDTH_THRESHOLD.value) ] |= BadPixels.FF_GAIN_EVAL_ERROR.value
# Set value for bad pixel to average across pixels for a given module # Set value for bad pixel to average across pixels for a given module
slopesFF[mod] = np.copy(fit_data[mod]['gain']) slopesFF[mod] = np.copy(fit_data[mod]['gain'])
slopesFF[mod][fit_data[mod]['mask']>0] = np.nan slopesFF[mod][fit_data[mod]['mask']>0] = np.nan
gain_mean = np.nanmean(slopesFF[mod], axis=(1,2)) gain_mean = np.nanmean(slopesFF[mod], axis=(1,2))
for i in range(slopesFF[mod].shape[0]): for i in range(slopesFF[mod].shape[0]):
slopesFF[mod][i][ fit_data[mod]['mask'][i] > 0 ] = gain_mean[i] slopesFF[mod][i][ fit_data[mod]['mask'][i] > 0 ] = gain_mean[i]
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Read report path and create file location tuple to add with the injection # Read report path and create file location tuple to add with the injection
proposal = list(filter(None, raw_folder.strip('/').split('/')))[-2] proposal = list(filter(None, raw_folder.strip('/').split('/')))[-2]
file_loc = f'Proposal: {proposal}, Run: {run}' file_loc = f'Proposal: {proposal}, Run: {run}'
report = get_report(out_folder) report = get_report(out_folder)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# set the operating condition # set the operating condition
condition = Conditions.Illuminated.AGIPD(mem_cells, bias_voltage, 9.2, condition = Conditions.Illuminated.AGIPD(mem_cells, bias_voltage, 9.2,
pixels_x=512, pixels_y=128, beam_energy=None, pixels_x=512, pixels_y=128, beam_energy=None,
acquisition_rate=acq_rate, gain_setting=gain_setting, acquisition_rate=acq_rate, gain_setting=gain_setting,
integration_time=integration_time) integration_time=integration_time)
# Modify acceptable deviations for integration time condition if and only if # Modify acceptable deviations for integration time condition if and only if
# the integration time is not using the standard value (12). # the integration time is not using the standard value (12).
if integration_time != 12: if integration_time != 12:
for p in condition.parameters: for p in condition.parameters:
if p.name == 'Integration Time': if p.name == 'Integration Time':
p.lower_deviation = 5 p.lower_deviation = 5
p.upper_deviation = 5 p.upper_deviation = 5
# Retrieve a list of all modules corresponding to processed karabo_das # Retrieve a list of all modules corresponding to processed karabo_das
db_modules = get_pdu_from_db(karabo_id, karabo_da, Constants.AGIPD.SlopesFF(), db_modules = get_pdu_from_db(karabo_id, karabo_da, Constants.AGIPD.SlopesFF(),
condition, cal_db_interface, condition, cal_db_interface,
snapshot_at=creation_time) snapshot_at=creation_time)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Send constants to DB # Send constants to DB
def send_const(mod, pdu): def send_const(mod, pdu):
try: try:
# gain # gain
constant = Constants.AGIPD.SlopesFF() constant = Constants.AGIPD.SlopesFF()
constant.data = np.moveaxis(np.moveaxis(slopesFF[mod], 0, 2), 0, 1) constant.data = np.moveaxis(np.moveaxis(slopesFF[mod], 0, 2), 0, 1)
send_to_db( send_to_db(
pdu, karabo_id, constant, condition, file_loc, pdu, karabo_id, constant, condition, file_loc,
report, cal_db_interface, creation_time, report, cal_db_interface, creation_time,
timeout=cal_db_timeout, timeout=cal_db_timeout,
) )
# bad pixels # bad pixels
constant_bp = Constants.AGIPD.BadPixelsFF() constant_bp = Constants.AGIPD.BadPixelsFF()
constant_bp.data = np.moveaxis(np.moveaxis(bpmask[mod], 0, 2), 0, 1) constant_bp.data = np.moveaxis(np.moveaxis(bpmask[mod], 0, 2), 0, 1)
send_to_db( send_to_db(
pdu, karabo_id, constant_bp, condition, file_loc, pdu, karabo_id, constant_bp, condition, file_loc,
report, cal_db_interface, creation_time, report, cal_db_interface, creation_time,
timeout=cal_db_timeout, timeout=cal_db_timeout,
) )
except Exception as e: except Exception as e:
err = f"Error: {e}\nError traceback: {traceback.format_exc()}" err = f"Error: {e}\nError traceback: {traceback.format_exc()}"
when = None when = None
# Check, if we have a shape we expect # Check, if we have a shape we expect
if db_output: if db_output:
if slopesFF[modules[0]].shape == (mem_cells, 512, 128): if slopesFF[modules[0]].shape == (mem_cells, 512, 128):
with Pool(processes=len(modules)) as pool: with Pool(processes=len(modules)) as pool:
const_out = pool.starmap(send_const, zip(modules, db_modules)) const_out = pool.starmap(send_const, zip(modules, db_modules))
else: else:
print(f"Constants are not sent to the DB because of the shape mismatsh") print(f"Constants are not sent to the DB because of the shape mismatsh")
print(f"Expected {(mem_cells, 512, 128)}, observed {slopesFF[modules[0]].shape}") print(f"Expected {(mem_cells, 512, 128)}, observed {slopesFF[modules[0]].shape}")
condition_dict ={} condition_dict ={}
for entry in condition.to_dict()['parameters']: for entry in condition.to_dict()['parameters']:
key = entry.pop('parameter_name') key = entry.pop('parameter_name')
del entry['description'] del entry['description']
del entry['flg_available'] del entry['flg_available']
condition_dict[key] = entry condition_dict[key] = entry
# Create the same file structure as database constants files, in which # Create the same file structure as database constants files, in which
# each constant type has its corresponding condition and data. # each constant type has its corresponding condition and data.
if local_output: if local_output:
for mod, pdu in zip(modules, db_modules): for mod, pdu in zip(modules, db_modules):
qm = module_index_to_qm(mod) qm = module_index_to_qm(mod)
file = f"{out_folder}/slopesff_bpmask_module_{qm}.h5" file = f"{out_folder}/slopesff_bpmask_module_{qm}.h5"
dic = { dic = {
pdu:{ pdu:{
'SlopesFF': { 'SlopesFF': {
0:{ 0:{
'condition': condition_dict, 'condition': condition_dict,
'data': np.moveaxis(np.moveaxis(slopesFF[mod],0,2),0,1)} 'data': np.moveaxis(np.moveaxis(slopesFF[mod],0,2),0,1)}
}, },
'BadPixelsFF':{ 'BadPixelsFF':{
0:{ 0:{
'condition': condition_dict, 'condition': condition_dict,
'data': np.moveaxis(np.moveaxis(bpmask[mod],0,2),0,1)} 'data': np.moveaxis(np.moveaxis(bpmask[mod],0,2),0,1)}
}, },
} }
} }
save_dict_to_hdf5(dic, file) save_dict_to_hdf5(dic, file)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
#Define AGIPD geometry #Define AGIPD geometry
#To do: find the better way to do it? #To do: find the better way to do it?
if instrument == "HED": if instrument == "HED":
geom = AGIPD_500K2GGeometry.from_origin() geom = AGIPD_500K2GGeometry.from_origin()
else: else:
geom = AGIPD_1MGeometry.from_quad_positions(quad_pos=[ geom = AGIPD_1MGeometry.from_quad_positions(quad_pos=[
(-525, 625), (-525, 625),
(-550, -10), (-550, -10),
(520, -160), (520, -160),
(542.5, 475), (542.5, 475),
]) ])
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Create the arrays that will be used for figures. # Create the arrays that will be used for figures.
# A dictionary contains all the data for each of the processing stages (gains, mean, slopesFF...). # A dictionary contains all the data for each of the processing stages (gains, mean, slopesFF...).
# Each array correponds to the data for all processed modules. # Each array correponds to the data for all processed modules.
# These are updated with their fit/slopes data in the following loops. # These are updated with their fit/slopes data in the following loops.
if cell_range==[0,0]: if cell_range==[0,0]:
cell_range[1] = shape[0] cell_range[1] = shape[0]
const_data = {} const_data = {}
for key in keys: for key in keys:
const_data[key] = np.full((nmods, shape[0],512,128), np.nan) const_data[key] = np.full((nmods, shape[0],512,128), np.nan)
for i in range(nmods): for i in range(nmods):
if key in fit_data[i]: if key in fit_data[i]:
const_data[key][i,:,pixel_range[0]:pixel_range[2], const_data[key][i,:,pixel_range[0]:pixel_range[2],
pixel_range[1]:pixel_range[3]] = fit_data[i][key] pixel_range[1]:pixel_range[3]] = fit_data[i][key]
const_data['slopesFF'] = np.full((nmods, shape[0],512,128), np.nan) const_data['slopesFF'] = np.full((nmods, shape[0],512,128), np.nan)
labels['slopesFF'] = f'slopesFF [ADU/keV], $\gamma$={photon_energy} [keV]' labels['slopesFF'] = f'slopesFF [ADU/keV], $\gamma$={photon_energy} [keV]'
for i in range(nmods): for i in range(nmods):
if i in slopesFF: if i in slopesFF:
const_data['slopesFF'][i,:,pixel_range[0]:pixel_range[2], const_data['slopesFF'][i,:,pixel_range[0]:pixel_range[2],
pixel_range[1]:pixel_range[3]] = slopesFF[i] pixel_range[1]:pixel_range[3]] = slopesFF[i]
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Summary across pixels ## ## Summary across pixels ##
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
for key in const_data.keys(): for key in const_data.keys():
fig = plt.figure(figsize=(20,20)) fig = plt.figure(figsize=(20,20))
ax = fig.add_subplot(111) ax = fig.add_subplot(111)
if key=='mask': if key=='mask':
data = np.nanmean(const_data[key]>0, axis=1) data = np.nanmean(const_data[key]>0, axis=1)
vmin, vmax = (0,1) vmin, vmax = (0,1)
else: else:
data = np.nanmean(const_data[key], axis=1) data = np.nanmean(const_data[key], axis=1)
vmin, vmax = get_range(data, 5) vmin, vmax = get_range(data, 5)
ax = geom.plot_data_fast(data, ax=ax, cmap="jet", vmin=vmin, vmax=vmax, figsize=(20,20)) ax = geom.plot_data_fast(data, ax=ax, cmap="jet", vmin=vmin, vmax=vmax, figsize=(20,20))
_ = ax.set_title(labels[key]) _ = ax.set_title(labels[key])
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Summary histograms ## ## Summary histograms ##
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
sel = (const_data['mask'] == 0) sel = (const_data['mask'] == 0)
module_mean = np.nanmean(const_data[f"gain"],axis=(1,2,3)) module_mean = np.nanmean(const_data[f"gain"],axis=(1,2,3))
module_mean = module_mean[:,np.newaxis,np.newaxis,np.newaxis] module_mean = module_mean[:,np.newaxis,np.newaxis,np.newaxis]
dsets = {'d01 [ADU]':const_data[f"g1mean"]-const_data[f"g0mean"], dsets = {'d01 [ADU]':const_data[f"g1mean"]-const_data[f"g0mean"],
'gain [ADU/keV]':const_data[f"gain"], 'gain [ADU/keV]':const_data[f"gain"],
'gain relative to module mean':const_data[f"gain"]/module_mean, 'gain relative to module mean':const_data[f"gain"]/module_mean,
} }
fig = plt.figure(figsize=(16,5)) fig = plt.figure(figsize=(16,5))
for i, (par, data) in enumerate(dsets.items()): for i, (par, data) in enumerate(dsets.items()):
ax = fig.add_subplot(1, 3, i+1) ax = fig.add_subplot(1, 3, i+1)
plt_range= np.nanmin(data), np.nanmax(data) plt_range= np.nanmin(data), np.nanmax(data)
if 'd01' in par : if 'd01' in par :
ax.axvline(d0_lim[0]) ax.axvline(d0_lim[0])
ax.axvline(d0_lim[1]) ax.axvline(d0_lim[1])
elif 'rel' in par : elif 'rel' in par :
ax.axvline(gain_lim[0]) ax.axvline(gain_lim[0])
ax.axvline(gain_lim[1]) ax.axvline(gain_lim[1])
num_bins = 100 num_bins = 100
_ = ax.hist(data.flatten(), _ = ax.hist(data.flatten(),
bins= num_bins,range=plt_range, bins= num_bins,range=plt_range,
log=True,color='red', log=True,color='red',
label='all fits',) label='all fits',)
a = ax.hist(data[sel].flatten(), a = ax.hist(data[sel].flatten(),
bins=num_bins, range=plt_range, bins=num_bins, range=plt_range,
log=True,color='g', log=True,color='g',
label='good fits only', label='good fits only',
) )
ax.set_xlabel(f"{par}") ax.set_xlabel(f"{par}")
ax.legend() ax.legend()
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Summary across cells ## ## Summary across cells ##
Good pixels only. Good pixels only.
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
for key in const_data.keys(): for key in const_data.keys():
data = np.copy(const_data[key]) data = np.copy(const_data[key])
if key=='mask': if key=='mask':
data = data>0 data = data>0
else: else:
data[const_data['mask']>0] = np.nan data[const_data['mask']>0] = np.nan
d = [] d = []
for i in range(nmods): for i in range(nmods):
d.append({'x': np.arange(data[i].shape[0]), d.append({'x': np.arange(data[i].shape[0]),
'y': np.nanmean(data[i], axis=(1,2)), 'y': np.nanmean(data[i], axis=(1,2)),
'drawstyle': 'steps-pre', 'drawstyle': 'steps-pre',
'label': f'{i}', 'label': f'{i}',
'linewidth': 2, 'linewidth': 2,
'linestyle': '--' if i>7 else '-' 'linestyle': '--' if i>7 else '-'
}) })
fig = plt.figure(figsize=(15, 6)) fig = plt.figure(figsize=(15, 6))
ax = fig.add_subplot(111) ax = fig.add_subplot(111)
_ = simplePlot(d, xrange=(-12, 510), _ = simplePlot(d, xrange=(-12, 510),
x_label='Memory Cell ID', x_label='Memory Cell ID',
y_label=labels[key], y_label=labels[key],
use_axis=ax, use_axis=ax,
legend='top-left-frame-ncol8',) legend='top-left-frame-ncol8',)
ylim = ax.get_ylim() ylim = ax.get_ylim()
ax.set_ylim(ylim[0], ylim[1] + np.abs(ylim[1]-ylim[0])*0.2) ax.set_ylim(ylim[0], ylim[1] + np.abs(ylim[1]-ylim[0])*0.2)
ax.grid() ax.grid()
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Summary table ## ## Summary table ##
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
table = [] table = []
for i in modules: for i in modules:
table.append((i, table.append((i,
f"{np.nanmean(slopesFF[i]):0.1f} +- {np.nanstd(slopesFF[i]):0.2f}", f"{np.nanmean(slopesFF[i]):0.1f} +- {np.nanstd(slopesFF[i]):0.2f}",
f"{np.nanmean(bpmask[i]>0)*100:0.1f} ({np.nansum(bpmask[i]>0)})" f"{np.nanmean(bpmask[i]>0)*100:0.1f} ({np.nansum(bpmask[i]>0)})"
)) ))
all_SFF = np.array([list(sff) for sff in slopesFF.values()]) all_SFF = np.array([list(sff) for sff in slopesFF.values()])
all_MSK = np.array([list(msk) for msk in bpmask.values()]) all_MSK = np.array([list(msk) for msk in bpmask.values()])
table.append(('overall', table.append(('overall',
f"{np.nanmean(all_SFF):0.1f} +- {np.nanstd(all_SFF):0.2f}", f"{np.nanmean(all_SFF):0.1f} +- {np.nanstd(all_SFF):0.2f}",
f"{np.nanmean(all_MSK>0)*100:0.1f} ({np.nansum(all_MSK>0)})" f"{np.nanmean(all_MSK>0)*100:0.1f} ({np.nansum(all_MSK>0)})"
)) ))
md = display(Latex(tabulate.tabulate(table, tablefmt='latex', md = display(Latex(tabulate.tabulate(table, tablefmt='latex',
headers=["Module", "Gain [ADU/keV]", "Bad pixels [%(Count)]"]))) headers=["Module", "Gain [ADU/keV]", "Bad pixels [%(Count)]"])))
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Performance plots ## Performance plots
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
def get_trains_data(run_folder, source, include, tid=None): def get_trains_data(run_folder, source, include, tid=None):
""" """
Load single train for all module Load single train for all module
:param run_folder: Path to folder with data :param run_folder: Path to folder with data
:param source: Data source to be loaded :param source: Data source to be loaded
:param include: Inset of file name to be considered :param include: Inset of file name to be considered
:param tid: Train Id to be loaded. First train is considered if None is given :param tid: Train Id to be loaded. First train is considered if None is given
""" """
run_data = RunDirectory(run_folder, include) run_data = RunDirectory(run_folder, include)
if tid: if tid:
tid, data = run_data.select('*/DET/*', source).train_from_id(tid) tid, data = run_data.select('*/DET/*', source).train_from_id(tid)
return tid, stack_detector_data(data, source, modules=nmods) return tid, stack_detector_data(data, source, modules=nmods)
else: else:
for tid, data in run_data.select('*/DET/*', source).trains(require_all=True): for tid, data in run_data.select('*/DET/*', source).trains(require_all=True):
return tid, stack_detector_data(data, source, modules=nmods) return tid, stack_detector_data(data, source, modules=nmods)
return None, None return None, None
include = '*S00000*' include = '*S00000*'
tid, orig = get_trains_data(f'{proc_folder}/r{run:04d}/', 'image.data', include) tid, orig = get_trains_data(f'{proc_folder}/r{run:04d}/', 'image.data', include)
orig = orig[cell_range[0]:cell_range[1], ...] orig = orig[cell_range[0]:cell_range[1], ...]
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# FIXME: mask bad pixels from median # FIXME: mask bad pixels from median
# mask = const_data['BadPixelsFF'] # mask = const_data['BadPixelsFF']
corrections = const_data['slopesFF'] # (16,shape[0],512,128) shape[0]= cell_range[1]-cell_range[0] / corrections = const_data['slopesFF'] # (16,shape[0],512,128) shape[0]= cell_range[1]-cell_range[0] /
corrections = np.moveaxis(corrections, 1, 0) # (shape[0],16,512,128) corrections = np.moveaxis(corrections, 1, 0) # (shape[0],16,512,128)
rel_corr = corrections/np.nanmedian(corrections) rel_corr = corrections/np.nanmedian(corrections)
corrected = orig / rel_corr corrected = orig / rel_corr
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
### Mean value not corrected (train 0) ### Mean value not corrected (train 0)
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
fig = plt.figure(figsize=(20,20)) fig = plt.figure(figsize=(20,20))
ax = fig.add_subplot(111) ax = fig.add_subplot(111)
odata = np.nanmean(orig, axis=0) odata = np.nanmean(orig, axis=0)
vmin, vmax = get_range(odata, 5) vmin, vmax = get_range(odata, 5)
ax = geom.plot_data_fast(odata, ax=ax, cmap="jet", vmin=vmin, vmax=vmax, figsize=(20,20)) ax = geom.plot_data_fast(odata, ax=ax, cmap="jet", vmin=vmin, vmax=vmax, figsize=(20,20))
_ = ax.set_title("Original data, mean across one train") _ = ax.set_title("Original data, mean across one train")
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
### Mean value corrected (train 0) ### Mean value corrected (train 0)
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
fig = plt.figure(figsize=(20,20)) fig = plt.figure(figsize=(20,20))
ax = fig.add_subplot(111) ax = fig.add_subplot(111)
cdata = np.nanmean(corrected, axis=0) cdata = np.nanmean(corrected, axis=0)
ax = geom.plot_data_fast(cdata, ax=ax, cmap="jet", vmin=vmin, vmax=vmax, figsize=(20,20)) ax = geom.plot_data_fast(cdata, ax=ax, cmap="jet", vmin=vmin, vmax=vmax, figsize=(20,20))
_ = ax.set_title("Corrected data, mean across one train") _ = ax.set_title("Corrected data, mean across one train")
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
### Laplace transform of mean image ### Laplace transform of mean image
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
from scipy.ndimage import laplace from scipy.ndimage import laplace
cmax = np.max(cdata) cmax = np.max(cdata)
omax = np.max(odata) omax = np.max(odata)
clap = np.zeros_like(cdata) clap = np.zeros_like(cdata)
olap = np.zeros_like(odata) olap = np.zeros_like(odata)
for i in range(nmods) : for i in range(nmods) :
clap[i] = np.abs(laplace(cdata[i].astype(float)/cmax)) clap[i] = np.abs(laplace(cdata[i].astype(float)/cmax))
olap[i] = np.abs(laplace(odata[i].astype(float)/omax)) olap[i] = np.abs(laplace(odata[i].astype(float)/omax))
fig = plt.figure(figsize=(20,10)) fig = plt.figure(figsize=(20,10))
vmin, vmax = get_range(olap, 2) vmin, vmax = get_range(olap, 2)
ax = fig.add_subplot(121) ax = fig.add_subplot(121)
ax = geom.plot_data_fast(olap, ax=ax, cmap="jet", vmin=vmin, vmax=vmax, ) ax = geom.plot_data_fast(olap, ax=ax, cmap="jet", vmin=vmin, vmax=vmax, )
_ = ax.set_title("Laplace (original data)") _ = ax.set_title("Laplace (original data)")
ax = fig.add_subplot(122) ax = fig.add_subplot(122)
ax = geom.plot_data_fast(clap, ax=ax, cmap="jet", vmin=vmin, vmax=vmax, ) ax = geom.plot_data_fast(clap, ax=ax, cmap="jet", vmin=vmin, vmax=vmax, )
_ = ax.set_title("Laplace (gain corrected data)") _ = ax.set_title("Laplace (gain corrected data)")
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
### Histogram of corrected and uncorrected spectrum (train 0) ### Histogram of corrected and uncorrected spectrum (train 0)
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
###################################### ######################################
# FIT PEAKS # FIT PEAKS
###################################### ######################################
x_range = [peak_range[0][0], peak_range[-1][-1]] x_range = [peak_range[0][0], peak_range[-1][-1]]
nb = x_range[1] - x_range[0]+1 nb = x_range[1] - x_range[0]+1
sel = ~np.isnan(corrected) sel = ~np.isnan(corrected)
fig = plt.figure(figsize=(10, 5)) fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot(111) ax = fig.add_subplot(111)
y,xe, _ = ax.hist(corrected[sel].flatten(), bins=nb, range=x_range, label='corrected', alpha=0.5) y,xe, _ = ax.hist(corrected[sel].flatten(), bins=nb, range=x_range, label='corrected', alpha=0.5)
# get the bin centers from the bin edges # get the bin centers from the bin edges
xc=xe[:-1]+(xe[1]-xe[0])/2 xc=xe[:-1]+(xe[1]-xe[0])/2
pars, _ = get_starting_parameters(xc, y, peak_range,4) pars, _ = get_starting_parameters(xc, y, peak_range,4)
minuit = fit_n_peaks(xc, y, pars, x_range,fix_d01=False,sigma_limit=1) minuit = fit_n_peaks(xc, y, pars, x_range,fix_d01=False,sigma_limit=1)
pc = minuit.args pc = minuit.args
resc=minuit.fitarg resc=minuit.fitarg
yfc = gaussian_sum(xc,4, *pc) yfc = gaussian_sum(xc,4, *pc)
plt.plot(xc, yfc, label='corrected fit') plt.plot(xc, yfc, label='corrected fit')
y,_, _ = ax.hist(orig[sel].flatten(), bins=nb, range=x_range, label='original',alpha=0.5) y,_, _ = ax.hist(orig[sel].flatten(), bins=nb, range=x_range, label='original',alpha=0.5)
pars, _ = get_starting_parameters(xc, y, peak_range,4) pars, _ = get_starting_parameters(xc, y, peak_range,4)
minuit = fit_n_peaks(xc, y, pars, x_range,fix_d01=False,sigma_limit=1) minuit = fit_n_peaks(xc, y, pars, x_range,fix_d01=False,sigma_limit=1)
po = minuit.args po = minuit.args
reso=minuit.fitarg reso=minuit.fitarg
yfo = gaussian_sum(xc,4, *po) yfo = gaussian_sum(xc,4, *po)
plt.plot(xc, yfo, label='original fit') plt.plot(xc, yfo, label='original fit')
plt.title(f"Signal spectrum, first train") plt.title(f"Signal spectrum, first train")
plt.xlabel('[ADU]') plt.xlabel('[ADU]')
plt.legend() plt.legend()
plt.show() plt.show()
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
### Summary table ## ### Summary table ##
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
from scipy.stats import median_absolute_deviation as mad from scipy.stats import median_absolute_deviation as mad
table = [] table = []
headers = ["Parameter", headers = ["Parameter",
"Value (original data)", "Value (original data)",
"Value (gain corrected data)", "Value (gain corrected data)",
"Relative difference"] "Relative difference"]
for i in range(4): for i in range(4):
table.append((f"Sigma{i} (ADU)", table.append((f"Sigma{i} (ADU)",
f"{reso[f'g{i}sigma']:0.2f} ", f"{reso[f'g{i}sigma']:0.2f} ",
f"{resc[f'g{i}sigma']:0.2f} ", f"{resc[f'g{i}sigma']:0.2f} ",
f"{(reso[f'g{i}sigma']-resc[f'g{i}sigma'])/reso[f'g{i}sigma']:0.2f} ", f"{(reso[f'g{i}sigma']-resc[f'g{i}sigma'])/reso[f'g{i}sigma']:0.2f} ",
)) ))
ovar = np.std(odata) ovar = np.std(odata)
cvar = np.std(cdata) cvar = np.std(cdata)
table.append((f"RMS of mean image", table.append((f"RMS of mean image",
f"{ovar:0.3f} ", f"{ovar:0.3f} ",
f"{cvar:0.3f} ", f"{cvar:0.3f} ",
f"{(ovar-cvar)/ovar:0.3f} ", f"{(ovar-cvar)/ovar:0.3f} ",
)) ))
omin, omax = get_range(odata, 5) omin, omax = get_range(odata, 5)
cmin, cmax = get_range(cdata, 5) cmin, cmax = get_range(cdata, 5)
ovar = np.std(odata[(odata > omin) & (odata<omax)]) ovar = np.std(odata[(odata > omin) & (odata<omax)])
cvar = np.std(cdata[(cdata > cmin) & (cdata<cmax)]) cvar = np.std(cdata[(cdata > cmin) & (cdata<cmax)])
table.append((f"RMS of mean image (mu+-5sigma)", table.append((f"RMS of mean image (mu+-5sigma)",
f"{ovar:0.3f} ", f"{ovar:0.3f} ",
f"{cvar:0.3f} ", f"{cvar:0.3f} ",
f"{(ovar-cvar)/ovar:0.3f} ", f"{(ovar-cvar)/ovar:0.3f} ",
)) ))
ovar = mad(odata.flatten()) ovar = mad(odata.flatten())
cvar = mad(cdata.flatten()) cvar = mad(cdata.flatten())
table.append((f"MAD of mean image", table.append((f"MAD of mean image",
f"{ovar:0.3f} ", f"{ovar:0.3f} ",
f"{cvar:0.3f} ", f"{cvar:0.3f} ",
f"{(ovar-cvar)/ovar:0.3f} ", f"{(ovar-cvar)/ovar:0.3f} ",
)) ))
ovar = np.median(olap) ovar = np.median(olap)
cvar = np.median(clap) cvar = np.median(clap)
table.append((f"Median Laplace", table.append((f"Median Laplace",
f"{ovar:0.3f} ", f"{ovar:0.3f} ",
f"{cvar:0.3f} ", f"{cvar:0.3f} ",
f"{(ovar-cvar)/ovar:0.3f} ", f"{(ovar-cvar)/ovar:0.3f} ",
)) ))
md = display(Latex(tabulate.tabulate(table, md = display(Latex(tabulate.tabulate(table,
tablefmt='latex', tablefmt='latex',
headers=headers))) headers=headers)))
``` ```
......
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
# Jungfrau Dark Image Characterization # # Jungfrau Dark Image Characterization #
Author: European XFEL Detector Group, Version: 2.0 Author: European XFEL Detector Group, Version: 2.0
Analyzes Jungfrau dark image data to deduce offset, noise and resulting bad pixel maps Analyzes Jungfrau dark image data to deduce offset, noise and resulting bad pixel maps
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
in_folder = '/gpfs/exfel/exp/SPB/202130/p900204/raw/' # folder under which runs are located, required in_folder = '/gpfs/exfel/exp/SPB/202130/p900204/raw/' # folder under which runs are located, required
out_folder = '/gpfs/exfel/data/scratch/ahmedk/test/remove' # path to place reports at, required out_folder = '/gpfs/exfel/data/scratch/ahmedk/test/remove' # path to place reports at, required
run_high = 141 # run number for G0 dark run, required run_high = 141 # run number for G0 dark run, required
run_med = 142 # run number for G1 dark run, required run_med = 142 # run number for G1 dark run, required
run_low = 143 # run number for G2 dark run, required run_low = 143 # run number for G2 dark run, required
# Parameters used to access raw data. # Parameters used to access raw data.
karabo_da = ['JNGFR01', 'JNGFR02','JNGFR03','JNGFR04', 'JNGFR05', 'JNGFR06','JNGFR07','JNGFR08'] # list of data aggregators, which corresponds to different JF modules karabo_da = ['JNGFR01', 'JNGFR02','JNGFR03','JNGFR04', 'JNGFR05', 'JNGFR06','JNGFR07','JNGFR08'] # list of data aggregators, which corresponds to different JF modules
karabo_id = 'SPB_IRDA_JF4M' # karabo_id (detector identifier) prefix of Jungfrau detector to process. karabo_id = 'SPB_IRDA_JF4M' # karabo_id (detector identifier) prefix of Jungfrau detector to process.
karabo_id_control = '' # if control is on a different ID, set to empty string if it is the same a karabo-id karabo_id_control = '' # if control is on a different ID, set to empty string if it is the same a karabo-id
receiver_template = 'JNGFR{:02}' # inset for receiver devices receiver_template = 'JNGFR{:02}' # inset for receiver devices
instrument_source_template = '{}/DET/{}:daqOutput' # template for instrument source name (filled with karabo_id & receiver_id). e.g. 'SPB_IRDA_JF4M/DET/JNGFR01:daqOutput' instrument_source_template = '{}/DET/{}:daqOutput' # template for instrument source name (filled with karabo_id & receiver_id). e.g. 'SPB_IRDA_JF4M/DET/JNGFR01:daqOutput'
ctrl_source_template = '{}/DET/CONTROL' # template for control source name (filled with karabo_id_control) ctrl_source_template = '{}/DET/CONTROL' # template for control source name (filled with karabo_id_control)
# Parameters for calibration database and storing constants. # Parameters for calibration database and storing constants.
use_dir_creation_date = True # use dir creation date use_dir_creation_date = True # use dir creation date
cal_db_interface = 'tcp://max-exfl016:8016' # calibrate db interface to connect to cal_db_interface = 'tcp://max-exfl016:8016' # calibrate db interface to connect to
cal_db_timeout = 300000 # timeout on caldb requests cal_db_timeout = 300000 # timeout on caldb requests
local_output = True # output constants locally local_output = True # output constants locally
db_output = False # output constants to database db_output = False # output constants to database
# Parameters affecting creating dark calibration constants. # Parameters affecting creating dark calibration constants.
badpixel_threshold_sigma = 5. # bad pixels defined by values outside n times this std from median badpixel_threshold_sigma = 5. # bad pixels defined by values outside n times this std from median
offset_abs_threshold_low = [1000, 10000, 10000] # absolute bad pixel threshold in terms of offset, lower values offset_abs_threshold_low = [1000, 10000, 10000] # absolute bad pixel threshold in terms of offset, lower values
offset_abs_threshold_high = [8000, 15000, 15000] # absolute bad pixel threshold in terms of offset, upper values offset_abs_threshold_high = [8000, 15000, 15000] # absolute bad pixel threshold in terms of offset, upper values
max_trains = 0 # Maximum trains to process darks. Set to 0 to process all available train images. max_trains = 0 # Maximum trains to process darks. Set to 0 to process all available train images.
min_trains = 1 # Minimum number of trains that should be available to process dark constants. Default 1. min_trains = 1 # Minimum number of trains that should be available to process dark constants. Default 1.
manual_slow_data = False # if true, use manually entered bias_voltage and integration_time values manual_slow_data = False # if true, use manually entered bias_voltage and integration_time values
time_limits = 0.025 # to find calibration constants later on, the integration time is allowed to vary by 0.5 us time_limits = 0.025 # to find calibration constants later on, the integration time is allowed to vary by 0.5 us
# Parameters to be used for injecting dark calibration constants. # Parameters to be used for injecting dark calibration constants.
integration_time = 1000 # integration time in us, will be overwritten by value in file integration_time = 1000 # integration time in us, will be overwritten by value in file
gain_setting = 0 # 0 for dynamic, forceswitchg1, forceswitchg2, 1 for dynamichg0, fixgain1, fixgain2. Will be overwritten by value in file gain_setting = 0 # 0 for dynamic, forceswitchg1, forceswitchg2, 1 for dynamichg0, fixgain1, fixgain2. Will be overwritten by value in file
gain_mode = 0 # 1 if medium and low runs are fixgain1 and fixgain2, otherwise 0. It will be overwritten by value in file, if manual_slow_data gain_mode = 0 # 1 if medium and low runs are fixgain1 and fixgain2, otherwise 0. It will be overwritten by value in file, if manual_slow_data
bias_voltage = 90 # sensor bias voltage in V, will be overwritten by value in file bias_voltage = 90 # sensor bias voltage in V, will be overwritten by value in file
memory_cells = 16 # number of memory cells memory_cells = 16 # number of memory cells
# TODO: this is used for only Warning check at AGIPD dark. # TODO: this is used for only Warning check at AGIPD dark.
# Need to rethink if it makes sense to use it here as well. # Need to rethink if it makes sense to use it here as well.
operation_mode = 'ADAPTIVE_GAIN' # Detector operation mode, optional operation_mode = 'ADAPTIVE_GAIN' # Detector operation mode, optional
# TODO: Remove # TODO: Remove
db_module = [""] # ID of module in calibration database. TODO: remove from calibration_configurations. db_module = [""] # ID of module in calibration database. TODO: remove from calibration_configurations.
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
import glob import glob
import os import os
import warnings import warnings
from pathlib import Path from pathlib import Path
warnings.filterwarnings('ignore') warnings.filterwarnings('ignore')
import matplotlib import matplotlib
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import multiprocessing import multiprocessing
import numpy as np import numpy as np
import pasha as psh import pasha as psh
from IPython.display import Markdown, display from IPython.display import Markdown, display
from extra_data import RunDirectory from extra_data import RunDirectory
matplotlib.use('agg') matplotlib.use('agg')
%matplotlib inline %matplotlib inline
from XFELDetAna.plotting.heatmap import heatmapPlot from XFELDetAna.plotting.heatmap import heatmapPlot
from XFELDetAna.plotting.histogram import histPlot from XFELDetAna.plotting.histogram import histPlot
from cal_tools import jungfraulib, step_timing from cal_tools import jungfraulib, step_timing
from cal_tools.ana_tools import save_dict_to_hdf5 from cal_tools.ana_tools import save_dict_to_hdf5
from cal_tools.enums import BadPixels, JungfrauSettings from cal_tools.enums import BadPixels, JungfrauSettings
from cal_tools.tools import ( from cal_tools.tools import (
get_dir_creation_date, get_dir_creation_date,
get_pdu_from_db, get_pdu_from_db,
get_random_db_interface, get_random_db_interface,
get_report, get_report,
save_const_to_h5, save_const_to_h5,
send_to_db, send_to_db,
) )
from iCalibrationDB import Conditions, Constants from iCalibrationDB import Conditions, Constants
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Constants relevant for the analysis # Constants relevant for the analysis
run_nums = [run_high, run_med, run_low] # run number for G0/HG0, G1, G2 run_nums = [run_high, run_med, run_low] # run number for G0/HG0, G1, G2
sensor_size = (1024, 512) sensor_size = (1024, 512)
gains = [0, 1, 2] gains = [0, 1, 2]
fixed_settings = [ fixed_settings = [
JungfrauSettings.FIX_GAIN_1.value, JungfrauSettings.FIX_GAIN_2.value] # noqa JungfrauSettings.FIX_GAIN_1.value, JungfrauSettings.FIX_GAIN_2.value] # noqa
dynamic_settings = [ dynamic_settings = [
JungfrauSettings.FORCE_SWITCH_HG1.value, JungfrauSettings.FORCE_SWITCH_HG2.value] # noqa JungfrauSettings.FORCE_SWITCH_HG1.value, JungfrauSettings.FORCE_SWITCH_HG2.value] # noqa
creation_time = None creation_time = None
if use_dir_creation_date: if use_dir_creation_date:
creation_time = get_dir_creation_date(in_folder, run_high) creation_time = get_dir_creation_date(in_folder, run_high)
print(f"Using {creation_time} as creation time") print(f"Using {creation_time} as creation time")
os.makedirs(out_folder, exist_ok=True) os.makedirs(out_folder, exist_ok=True)
cal_db_interface = get_random_db_interface(cal_db_interface) cal_db_interface = get_random_db_interface(cal_db_interface)
print(f'Calibration database interface: {cal_db_interface}') print(f'Calibration database interface: {cal_db_interface}')
if karabo_id_control == "": if karabo_id_control == "":
karabo_id_control = karabo_id karabo_id_control = karabo_id
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
proposal = list(filter(None, in_folder.strip('/').split('/')))[-2] proposal = list(filter(None, in_folder.strip('/').split('/')))[-2]
file_loc = f"proposal:{proposal} runs:{run_high} {run_med} {run_low}" file_loc = f"proposal:{proposal} runs:{run_high} {run_med} {run_low}"
report = get_report(out_folder) report = get_report(out_folder)
step_timer = step_timing.StepTimer() step_timer = step_timing.StepTimer()
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Reading control data ## Reading control data
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
step_timer.start() step_timer.start()
gain_runs = dict() gain_runs = dict()
med_low_settings = [] med_low_settings = []
ctrl_src = ctrl_source_template.format(karabo_id_control) ctrl_src = ctrl_source_template.format(karabo_id_control)
for gain, run_n in enumerate(run_nums): for gain, run_n in enumerate(run_nums):
run_dc = RunDirectory(f"{in_folder}/r{run_n:04d}/") run_dc = RunDirectory(f"{in_folder}/r{run_n:04d}/")
gain_runs[run_n] = [gain, run_dc] gain_runs[run_n] = [gain, run_dc]
ctrl_data = jungfraulib.JungfrauCtrl(run_dc, ctrl_src) ctrl_data = jungfraulib.JungfrauCtrl(run_dc, ctrl_src)
run_settings = ctrl_data.run_settings.value if ctrl_data.run_settings else ctrl_data.run_settings # noqa run_settings = ctrl_data.run_settings.value if ctrl_data.run_settings else ctrl_data.run_settings # noqa
# Read control data for the high gain run only. # Read control data for the high gain run only.
if run_n == run_high: if run_n == run_high:
run_mcells, sc_start = ctrl_data.get_memory_cells() run_mcells, sc_start = ctrl_data.get_memory_cells()
if not manual_slow_data: if not manual_slow_data:
integration_time = ctrl_data.get_integration_time() integration_time = ctrl_data.get_integration_time()
bias_voltage = ctrl_data.get_bias_voltage() bias_voltage = ctrl_data.get_bias_voltage()
gain_setting = ctrl_data.get_gain_setting() gain_setting = ctrl_data.get_gain_setting()
print(f"Gain setting is {gain_setting} ({run_settings})") print(f"Gain setting is {gain_setting} ({run_settings})")
print(f"Integration time is {integration_time} us") print(f"Integration time is {integration_time} us")
print(f"Bias voltage is {bias_voltage} V") print(f"Bias voltage is {bias_voltage} V")
if run_mcells == 1: if run_mcells == 1:
memory_cells = 1 memory_cells = 1
print('Dark runs in single cell mode, ' print('Dark runs in single cell mode, '
f'storage cell start: {sc_start:02d}') f'storage cell start: {sc_start:02d}')
else: else:
memory_cells = 16 memory_cells = 16
print('Dark runs in burst mode, ' print('Dark runs in burst mode, '
f'storage cell start: {sc_start:02d}') f'storage cell start: {sc_start:02d}')
else: else:
gain_mode = ctrl_data.get_gain_mode() gain_mode = ctrl_data.get_gain_mode()
med_low_settings.append(run_settings) med_low_settings.append(run_settings)
# A transperent workaround for old raw data with wrong/missing medium and low settings # A transperent workaround for old raw data with wrong/missing medium and low settings
if med_low_settings == [None, None]: if med_low_settings == [None, None]:
print("WARNING: run.settings is not stored in the data to read. " print("WARNING: run.settings is not stored in the data to read. "
f"Hence assuming gain_mode = {gain_mode} for adaptive old data.") f"Hence assuming gain_mode = {gain_mode} for adaptive old data.")
elif med_low_settings == ["dynamicgain", "forceswitchg1"]: elif med_low_settings == ["dynamicgain", "forceswitchg1"]:
print(f"WARNING: run.settings for medium and low gain runs are wrong {med_low_settings}. " print(f"WARNING: run.settings for medium and low gain runs are wrong {med_low_settings}. "
f"This is an expected bug for old raw data. Setting gain_mode to {gain_mode}.") f"This is an expected bug for old raw data. Setting gain_mode to {gain_mode}.")
# Validate that low_med_settings is not a mix of adaptive and fixed settings. # Validate that low_med_settings is not a mix of adaptive and fixed settings.
elif not (sorted(med_low_settings) in [fixed_settings, dynamic_settings]): # noqa elif not (sorted(med_low_settings) in [fixed_settings, dynamic_settings]): # noqa
raise ValueError( raise ValueError(
"Medium and low run settings are not as expected. " "Medium and low run settings are not as expected. "
f"Either {dynamic_settings} or {fixed_settings} are expected.\n" f"Either {dynamic_settings} or {fixed_settings} are expected.\n"
f"Got {sorted(med_low_settings)} for both runs, respectively.") f"Got {sorted(med_low_settings)} for both runs, respectively.")
print(f"Gain mode is {gain_mode} ({med_low_settings})") print(f"Gain mode is {gain_mode} ({med_low_settings})")
step_timer.done_step(f'Reading control data.') step_timer.done_step(f'Reading control data.')
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Use only high gain threshold for all gains in case of fixed_gain. # Use only high gain threshold for all gains in case of fixed_gain.
if gain_mode: # fixed_gain if gain_mode: # fixed_gain
offset_abs_threshold = [[offset_abs_threshold_low[0]]*3, [offset_abs_threshold_high[0]]*3] offset_abs_threshold = [[offset_abs_threshold_low[0]]*3, [offset_abs_threshold_high[0]]*3]
else: else:
offset_abs_threshold = [offset_abs_threshold_low, offset_abs_threshold_high] offset_abs_threshold = [offset_abs_threshold_low, offset_abs_threshold_high]
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
context = psh.context.ThreadContext(num_workers=multiprocessing.cpu_count()) context = psh.context.ThreadContext(num_workers=multiprocessing.cpu_count())
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
""" """
All jungfrau runs are taken through one acquisition, except for the forceswitch runs. All jungfrau runs are taken through one acquisition, except for the forceswitch runs.
While taking non-fixed dark runs, a procedure of multiple acquisitions is used to switch the storage cell indices. While taking non-fixed dark runs, a procedure of multiple acquisitions is used to switch the storage cell indices.
This is done for medium and low gain dark dynamic runs, only [forceswitchg1, forceswitchg2]: This is done for medium and low gain dark dynamic runs, only [forceswitchg1, forceswitchg2]:
Switching the cell indices in burst mode is a work around for hardware procedure Switching the cell indices in burst mode is a work around for hardware procedure
deficiency that produces wrong data for dark runs except for the first storage cell. deficiency that produces wrong data for dark runs except for the first storage cell.
This is why multiple acquisitions are taken to switch the used storage cells and This is why multiple acquisitions are taken to switch the used storage cells and
acquire data through two cells for each of the 16 cells instead of acquiring darks through all 16 cells. acquire data through two cells for each of the 16 cells instead of acquiring darks through all 16 cells.
""" """
print(f"Maximum trains to process is set to {max_trains}") print(f"Maximum trains to process is set to {max_trains}")
noise_map = dict() noise_map = dict()
offset_map = dict() offset_map = dict()
bad_pixels_map = dict() bad_pixels_map = dict()
for mod in karabo_da: for mod in karabo_da:
step_timer.start() step_timer.start()
instrument_src = instrument_source_template.format( instrument_src = instrument_source_template.format(
karabo_id, receiver_template.format(int(mod[-2:]))) karabo_id, receiver_template.format(int(mod[-2:])))
print(f"\n- Instrument data path for {mod} is {instrument_src}.") print(f"\n- Instrument data path for {mod} is {instrument_src}.")
offset_map[mod] = context.alloc(shape=(sensor_size+(memory_cells, 3)), fill=0) offset_map[mod] = context.alloc(shape=(sensor_size+(memory_cells, 3)), fill=0)
noise_map[mod] = context.alloc(like=offset_map[mod], fill=0) noise_map[mod] = context.alloc(like=offset_map[mod], fill=0)
bad_pixels_map[mod] = context.alloc(like=offset_map[mod], dtype=np.uint32, fill=0) bad_pixels_map[mod] = context.alloc(like=offset_map[mod], dtype=np.uint32, fill=0)
for run_n, [gain, run_dc] in gain_runs.items(): for run_n, [gain, run_dc] in gain_runs.items():
def process_cell(worker_id, array_index, cell_number): def process_cell(worker_id, array_index, cell_number):
cell_slice_idx = acelltable == cell_number cell_slice_idx = acelltable == cell_number
thiscell = images[..., cell_slice_idx] thiscell = images[..., cell_slice_idx]
offset_map[mod][..., cell_number, gain] = np.mean(thiscell, axis=2) offset_map[mod][..., cell_number, gain] = np.mean(thiscell, axis=2)
noise_map[mod][..., cell_number, gain] = np.std(thiscell, axis=2) noise_map[mod][..., cell_number, gain] = np.std(thiscell, axis=2)
# Check if there are wrong bad gain values. # Check if there are wrong bad gain values.
# Indicate pixels with wrong gain value across all trains for each cell. # Indicate pixels with wrong gain value across all trains for each cell.
bad_pixels_map[mod][ bad_pixels_map[mod][
np.average(gain_vals[..., cell_slice_idx], axis=2) != raw_g] |= BadPixels.WRONG_GAIN_VALUE.value np.average(gain_vals[..., cell_slice_idx], axis=2) != raw_g] |= BadPixels.WRONG_GAIN_VALUE.value
print(f"Gain stage {gain}, run {run_n}") print(f"Gain stage {gain}, run {run_n}")
# load shape of data for memory cells, and detector size (imgs, cells, x, y) # load shape of data for memory cells, and detector size (imgs, cells, x, y)
n_imgs = run_dc[instrument_src, "data.adc"].shape[0] n_imgs = run_dc[instrument_src, "data.adc"].shape[0]
# load number of data available, including trains with empty data. # load number of data available, including trains with empty data.
n_trains = len(run_dc.train_ids) n_trains = len(run_dc.train_ids)
instr_dc = run_dc.select(instrument_src, require_all=True) instr_dc = run_dc.select(instrument_src, require_all=True)
empty_trains = n_trains - n_imgs empty_trains = n_trains - n_imgs
if empty_trains != 0: if empty_trains != 0:
print( print(
f"\tWARNING: {mod} has {empty_trains} trains with empty data out of {n_trains} trains at " # noqa f"\tWARNING: {mod} has {empty_trains} trains with empty data out of {n_trains} trains at " # noqa
f"{Path(run_dc[instrument_src, 'data.adc'].files[0].filename).parent}.") f"{Path(run_dc[instrument_src, 'data.adc'].files[0].filename).parent}.")
if max_trains > 0: if max_trains > 0:
n_imgs = min(n_imgs, max_trains) n_imgs = min(n_imgs, max_trains)
print(f"Processing {n_imgs} images.") print(f"Processing {n_imgs} images.")
# Select only requested number of images to process darks. # Select only requested number of images to process darks.
instr_dc = instr_dc.select_trains(np.s_[:n_imgs]) instr_dc = instr_dc.select_trains(np.s_[:n_imgs])
if n_imgs < min_trains: if n_imgs < min_trains:
raise ValueError( raise ValueError(
f"Less than {min_trains} trains are available in RAW data." f"Less than {min_trains} trains are available in RAW data."
" Not enough data to process darks.") " Not enough data to process darks.")
images = np.transpose( images = np.transpose(
instr_dc[instrument_src, "data.adc"].ndarray().astype(np.float64), (3, 2, 1, 0)) instr_dc[instrument_src, "data.adc"].ndarray(), (3, 2, 1, 0))
acelltable = np.transpose(instr_dc[instrument_src, "data.memoryCell"].ndarray()) acelltable = np.transpose(instr_dc[instrument_src, "data.memoryCell"].ndarray())
gain_vals = np.transpose( gain_vals = np.transpose(
instr_dc[instrument_src, "data.gain"].ndarray(), (3, 2, 1, 0)) instr_dc[instrument_src, "data.gain"].ndarray(), (3, 2, 1, 0))
# define gain value as saved in raw gain map # define gain value as saved in raw gain map
raw_g = 3 if gain == 2 else gain raw_g = 3 if gain == 2 else gain
if memory_cells == 1: if memory_cells == 1:
acelltable -= sc_start acelltable -= sc_start
# Only for dynamic medium and low gain runs [forceswitchg1, forceswitchg2] in burst mode. # Only for dynamic medium and low gain runs [forceswitchg1, forceswitchg2] in burst mode.
if gain_mode == 0 and gain > 0 and memory_cells == 16: if gain_mode == 0 and gain > 0 and memory_cells == 16:
# 255 similar to the receiver which uses the 255 # 255 similar to the receiver which uses the 255
# value to indicate a cell without an image. # value to indicate a cell without an image.
# image shape for forceswitchg1 and forceswitchg2 = (1024, 512, 2, trains) # image shape for forceswitchg1 and forceswitchg2 = (1024, 512, 2, trains)
# compared to expected shape of (1024, 512, 16, trains) for high gain run. # compared to expected shape of (1024, 512, 16, trains) for high gain run.
acelltable[1:] = 255 acelltable[1:] = 255
# Calculate offset and noise maps # Calculate offset and noise maps
context.map(process_cell, range(memory_cells)) context.map(process_cell, range(memory_cells))
step_timer.done_step(f'Creating Offset and noise constants for a module.') step_timer.done_step(f'Creating Offset and noise constants for a module.')
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Offset and Noise Maps ## ## Offset and Noise Maps ##
Below offset and noise maps for the high ($g_0$) gain stage are shown, alongside the distribution of these values. One expects block-like structures mapping to the ASICs of the detector Below offset and noise maps for the high ($g_0$) gain stage are shown, alongside the distribution of these values. One expects block-like structures mapping to the ASICs of the detector
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
g_name = ['G0', 'G1', 'G2'] g_name = ['G0', 'G1', 'G2']
g_range = [(0, 8000), (8000, 16000), (8000, 16000)] g_range = [(0, 8000), (8000, 16000), (8000, 16000)]
n_range = [(0., 50.), (0., 50.), (0., 50.)] n_range = [(0., 50.), (0., 50.), (0., 50.)]
unit = '[ADCu]' unit = '[ADCu]'
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# TODO: Fix plots arrangment and speed for Jungfrau burst mode. # TODO: Fix plots arrangment and speed for Jungfrau burst mode.
step_timer.start() step_timer.start()
for mod in karabo_da: for mod in karabo_da:
for g_idx in gains: for g_idx in gains:
for cell in range(0, memory_cells): for cell in range(0, memory_cells):
f_o0 = heatmapPlot( f_o0 = heatmapPlot(
np.swapaxes(offset_map[mod][..., cell, g_idx], 0, 1), np.swapaxes(offset_map[mod][..., cell, g_idx], 0, 1),
y_label="Row", y_label="Row",
x_label="Column", x_label="Column",
lut_label=unit, lut_label=unit,
aspect=1., aspect=1.,
vmin=g_range[g_idx][0], vmin=g_range[g_idx][0],
vmax=g_range[g_idx][1], vmax=g_range[g_idx][1],
title=f'Pedestal {g_name[g_idx]} - Cell {cell:02d} - Module {mod}') title=f'Pedestal {g_name[g_idx]} - Cell {cell:02d} - Module {mod}')
fo0, ax_o0 = plt.subplots() fo0, ax_o0 = plt.subplots()
res_o0 = histPlot( res_o0 = histPlot(
ax_o0, offset_map[mod][..., cell, g_idx], ax_o0, offset_map[mod][..., cell, g_idx],
bins=800, bins=800,
range=g_range[g_idx], range=g_range[g_idx],
facecolor='b', facecolor='b',
histotype='stepfilled', histotype='stepfilled',
) )
ax_o0.tick_params(axis='both',which='major',labelsize=15) ax_o0.tick_params(axis='both',which='major',labelsize=15)
ax_o0.set_title( ax_o0.set_title(
f'Module pedestal distribution - Cell {cell:02d} - Module {mod}', f'Module pedestal distribution - Cell {cell:02d} - Module {mod}',
fontsize=15) fontsize=15)
ax_o0.set_xlabel(f'Pedestal {g_name[g_idx]} {unit}',fontsize=15) ax_o0.set_xlabel(f'Pedestal {g_name[g_idx]} {unit}',fontsize=15)
ax_o0.set_yscale('log') ax_o0.set_yscale('log')
f_n0 = heatmapPlot( f_n0 = heatmapPlot(
np.swapaxes(noise_map[mod][..., cell, g_idx], 0, 1), np.swapaxes(noise_map[mod][..., cell, g_idx], 0, 1),
y_label="Row", y_label="Row",
x_label="Column", x_label="Column",
lut_label= unit, lut_label= unit,
aspect=1., aspect=1.,
vmin=n_range[g_idx][0], vmin=n_range[g_idx][0],
vmax=n_range[g_idx][1], vmax=n_range[g_idx][1],
title=f"RMS noise {g_name[g_idx]} - Cell {cell:02d} - Module {mod}", title=f"RMS noise {g_name[g_idx]} - Cell {cell:02d} - Module {mod}",
) )
fn0, ax_n0 = plt.subplots() fn0, ax_n0 = plt.subplots()
res_n0 = histPlot( res_n0 = histPlot(
ax_n0, ax_n0,
noise_map[mod][..., cell, g_idx], noise_map[mod][..., cell, g_idx],
bins=100, bins=100,
range=n_range[g_idx], range=n_range[g_idx],
facecolor='b', facecolor='b',
histotype='stepfilled', histotype='stepfilled',
) )
ax_n0.tick_params(axis='both', which='major', labelsize=15) ax_n0.tick_params(axis='both', which='major', labelsize=15)
ax_n0.set_title( ax_n0.set_title(
f'Module noise distribution - Cell {cell:02d} - Module {mod}', f'Module noise distribution - Cell {cell:02d} - Module {mod}',
fontsize=15) fontsize=15)
ax_n0.set_xlabel( ax_n0.set_xlabel(
f'RMS noise {g_name[g_idx]} ' + unit, fontsize=15) f'RMS noise {g_name[g_idx]} ' + unit, fontsize=15)
plt.show() plt.show()
step_timer.done_step(f'Plotting offset and noise maps.') step_timer.done_step(f'Plotting offset and noise maps.')
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Bad Pixel Map ### ## Bad Pixel Map ###
The bad pixel map is deduced by comparing offset and noise of each pixel ($v_i$) and each gain ($g$) against the median value for that gain stage: The bad pixel map is deduced by comparing offset and noise of each pixel ($v_i$) and each gain ($g$) against the median value for that gain stage:
$$ $$
v_i > \mathrm{median}(v_{k,g}) + n \sigma_{v_{k,g}} v_i > \mathrm{median}(v_{k,g}) + n \sigma_{v_{k,g}}
$$ $$
or or
$$ $$
v_i < \mathrm{median}(v_{k,g}) - n \sigma_{v_{k,g}} v_i < \mathrm{median}(v_{k,g}) - n \sigma_{v_{k,g}}
$$ $$
Values are encoded in a 32 bit mask, where for the dark image deduced bad pixels the following non-zero entries are relevant: Values are encoded in a 32 bit mask, where for the dark image deduced bad pixels the following non-zero entries are relevant:
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
def print_bp_entry(bp): def print_bp_entry(bp):
print("{:<30s} {:032b} -> {}".format(bp.name, bp.value, int(bp.value))) print("{:<30s} {:032b} -> {}".format(bp.name, bp.value, int(bp.value)))
print_bp_entry(BadPixels.OFFSET_OUT_OF_THRESHOLD) print_bp_entry(BadPixels.OFFSET_OUT_OF_THRESHOLD)
print_bp_entry(BadPixels.NOISE_OUT_OF_THRESHOLD) print_bp_entry(BadPixels.NOISE_OUT_OF_THRESHOLD)
print_bp_entry(BadPixels.OFFSET_NOISE_EVAL_ERROR) print_bp_entry(BadPixels.OFFSET_NOISE_EVAL_ERROR)
print_bp_entry(BadPixels.WRONG_GAIN_VALUE) print_bp_entry(BadPixels.WRONG_GAIN_VALUE)
def eval_bpidx(d): def eval_bpidx(d):
mdn = np.nanmedian(d, axis=(0, 1))[None, None, :, :] mdn = np.nanmedian(d, axis=(0, 1))[None, None, :, :]
std = np.nanstd(d, axis=(0, 1))[None, None, :, :] std = np.nanstd(d, axis=(0, 1))[None, None, :, :]
idx = (d > badpixel_threshold_sigma*std+mdn) | (d < (-badpixel_threshold_sigma)*std+mdn) idx = (d > badpixel_threshold_sigma*std+mdn) | (d < (-badpixel_threshold_sigma)*std+mdn)
return idx return idx
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
step_timer.start() step_timer.start()
for mod in karabo_da: for mod in karabo_da:
display(Markdown(f"### Badpixels for module {mod}:")) display(Markdown(f"### Badpixels for module {mod}:"))
offset_abs_threshold = np.array(offset_abs_threshold) offset_abs_threshold = np.array(offset_abs_threshold)
bad_pixels_map[mod][eval_bpidx(offset_map[mod])] |= BadPixels.OFFSET_OUT_OF_THRESHOLD.value bad_pixels_map[mod][eval_bpidx(offset_map[mod])] |= BadPixels.OFFSET_OUT_OF_THRESHOLD.value
bad_pixels_map[mod][~np.isfinite(offset_map[mod])] |= BadPixels.OFFSET_NOISE_EVAL_ERROR.value bad_pixels_map[mod][~np.isfinite(offset_map[mod])] |= BadPixels.OFFSET_NOISE_EVAL_ERROR.value
bad_pixels_map[mod][eval_bpidx(noise_map[mod])] |= BadPixels.NOISE_OUT_OF_THRESHOLD.value bad_pixels_map[mod][eval_bpidx(noise_map[mod])] |= BadPixels.NOISE_OUT_OF_THRESHOLD.value
bad_pixels_map[mod][~np.isfinite(noise_map[mod])] |= BadPixels.OFFSET_NOISE_EVAL_ERROR.value bad_pixels_map[mod][~np.isfinite(noise_map[mod])] |= BadPixels.OFFSET_NOISE_EVAL_ERROR.value
bad_pixels_map[mod][(offset_map[mod] < offset_abs_threshold[0][None, None, None, :]) | (offset_map[mod] > offset_abs_threshold[1][None, None, None, :])] |= BadPixels.OFFSET_OUT_OF_THRESHOLD.value # noqa bad_pixels_map[mod][(offset_map[mod] < offset_abs_threshold[0][None, None, None, :]) | (offset_map[mod] > offset_abs_threshold[1][None, None, None, :])] |= BadPixels.OFFSET_OUT_OF_THRESHOLD.value # noqa
for g_idx in gains: for g_idx in gains:
for cell in range(memory_cells): for cell in range(memory_cells):
bad_pixels = bad_pixels_map[mod][:, :, cell, g_idx] bad_pixels = bad_pixels_map[mod][:, :, cell, g_idx]
fn_0 = heatmapPlot( fn_0 = heatmapPlot(
np.swapaxes(bad_pixels, 0, 1), np.swapaxes(bad_pixels, 0, 1),
y_label="Row", y_label="Row",
x_label="Column", x_label="Column",
lut_label=f"Badpixels {g_name[g_idx]} [ADCu]", lut_label=f"Badpixels {g_name[g_idx]} [ADCu]",
aspect=1., aspect=1.,
vmin=0, vmax=5, vmin=0, vmax=5,
title=f'G{g_idx} Bad pixel map - Cell {cell:02d} - Module {mod}') title=f'G{g_idx} Bad pixel map - Cell {cell:02d} - Module {mod}')
step_timer.done_step(f'Creating bad pixels constant and plotting it for a module.') step_timer.done_step(f'Creating bad pixels constant and plotting it for a module.')
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# set the operating condition # set the operating condition
condition = Conditions.Dark.jungfrau( condition = Conditions.Dark.jungfrau(
memory_cells=memory_cells, memory_cells=memory_cells,
bias_voltage=bias_voltage, bias_voltage=bias_voltage,
integration_time=integration_time, integration_time=integration_time,
gain_setting=gain_setting, gain_setting=gain_setting,
gain_mode=gain_mode, gain_mode=gain_mode,
) )
db_modules = get_pdu_from_db( db_modules = get_pdu_from_db(
karabo_id=karabo_id, karabo_id=karabo_id,
karabo_da=karabo_da, karabo_da=karabo_da,
constant=Constants.jungfrau.Offset(), constant=Constants.jungfrau.Offset(),
condition=condition, condition=condition,
cal_db_interface=cal_db_interface, cal_db_interface=cal_db_interface,
snapshot_at=creation_time) snapshot_at=creation_time)
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Inject and save calibration constants ## Inject and save calibration constants
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
step_timer.start() step_timer.start()
for mod, db_mod in zip(karabo_da, db_modules): for mod, db_mod in zip(karabo_da, db_modules):
constants = { constants = {
'Offset': np.moveaxis(offset_map[mod], 0, 1), 'Offset': np.moveaxis(offset_map[mod], 0, 1),
'Noise': np.moveaxis(noise_map[mod], 0, 1), 'Noise': np.moveaxis(noise_map[mod], 0, 1),
'BadPixelsDark': np.moveaxis(bad_pixels_map[mod], 0, 1), 'BadPixelsDark': np.moveaxis(bad_pixels_map[mod], 0, 1),
} }
md = None md = None
for key, const_data in constants.items(): for key, const_data in constants.items():
const = getattr(Constants.jungfrau, key)() const = getattr(Constants.jungfrau, key)()
const.data = const_data const.data = const_data
for parm in condition.parameters: for parm in condition.parameters:
if parm.name == "Integration Time": if parm.name == "Integration Time":
parm.lower_deviation = time_limits parm.lower_deviation = time_limits
parm.upper_deviation = time_limits parm.upper_deviation = time_limits
if db_output: if db_output:
md = send_to_db( md = send_to_db(
db_module=db_mod, db_module=db_mod,
karabo_id=karabo_id, karabo_id=karabo_id,
constant=const, constant=const,
condition=condition, condition=condition,
file_loc=file_loc, file_loc=file_loc,
report_path=report, report_path=report,
cal_db_interface=cal_db_interface, cal_db_interface=cal_db_interface,
creation_time=creation_time, creation_time=creation_time,
timeout=cal_db_timeout, timeout=cal_db_timeout,
) )
if local_output: if local_output:
md = save_const_to_h5( md = save_const_to_h5(
db_module=db_mod, db_module=db_mod,
karabo_id=karabo_id, karabo_id=karabo_id,
constant=const, constant=const,
condition=condition, condition=condition,
data=const.data, data=const.data,
file_loc=file_loc, file_loc=file_loc,
report=report, report=report,
creation_time=creation_time, creation_time=creation_time,
out_folder=out_folder, out_folder=out_folder,
) )
print(f"Calibration constant {key} is stored locally at {out_folder}.\n") print(f"Calibration constant {key} is stored locally at {out_folder}.\n")
print("Constants parameter conditions are:\n") print("Constants parameter conditions are:\n")
print( print(
f"• Bias voltage: {bias_voltage}\n" f"• Bias voltage: {bias_voltage}\n"
f"• Memory cells: {memory_cells}\n" f"• Memory cells: {memory_cells}\n"
f"• Integration time: {integration_time}\n" f"• Integration time: {integration_time}\n"
f"• Gain setting: {gain_setting}\n" f"• Gain setting: {gain_setting}\n"
f"• Gain mode: {gain_mode}\n" f"• Gain mode: {gain_mode}\n"
f"• Creation time: {md.calibration_constant_version.begin_at if md is not None else creation_time}\n") # noqa f"• Creation time: {md.calibration_constant_version.begin_at if md is not None else creation_time}\n") # noqa
step_timer.done_step("Injecting constants.") step_timer.done_step("Injecting constants.")
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
print(f"Total processing time {step_timer.timespan():.01f} s") print(f"Total processing time {step_timer.timespan():.01f} s")
step_timer.print_summary() step_timer.print_summary()
``` ```
......
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
#Author: K. Ahmed, M. Karnevsky, Version: 0.1 # Author: European XFEL Detector Group, Version: 1.0
#The following is a summary for the processing of dark images and calibration constants production.
# Summary for processed of dark calibration constants and a comparison with previous injected constants.
out_folder = "/gpfs/exfel/data/scratch/ahmedk/test/fixed_gain/SPB_summary_fix2" # path to output to, required out_folder = "/gpfs/exfel/data/scratch/ahmedk/test/fixed_gain/SPB_summary_fix2" # path to output to, required
karabo_id = "SPB_DET_AGIPD1M-1" # detector instance karabo_id = "SPB_DET_AGIPD1M-1" # detector instance
gain_names = ['High gain', 'Medium gain', 'Low gain'] # a list of gain names to be used in plotting gain_names = ['High gain', 'Medium gain', 'Low gain'] # a list of gain names to be used in plotting
threshold_names = ['HG-MG threshold', 'MG_LG threshold'] # a list of gain names to be used in plotting threshold_names = ['HG-MG threshold', 'MG_LG threshold'] # a list of gain names to be used in plotting
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
import copy import copy
import os import os
import warnings import warnings
from collections import OrderedDict from collections import OrderedDict
from datetime import datetime
from pathlib import Path from pathlib import Path
warnings.filterwarnings('ignore') warnings.filterwarnings('ignore')
import glob import glob
import h5py import h5py
import matplotlib import matplotlib
import numpy as np import numpy as np
import yaml import yaml
from IPython.display import Latex, Markdown, display from IPython.display import Latex, Markdown, display
matplotlib.use("agg") matplotlib.use("agg")
import matplotlib.gridspec as gridspec import matplotlib.gridspec as gridspec
import matplotlib.patches as patches
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
%matplotlib inline %matplotlib inline
import extra_geom import extra_geom
import tabulate import tabulate
from cal_tools.ana_tools import get_range from cal_tools.ana_tools import get_range
from cal_tools.plotting import show_processed_modules from cal_tools.plotting import show_processed_modules
from cal_tools.tools import CalibrationMetadata, module_index_to_qm from cal_tools.tools import CalibrationMetadata, module_index_to_qm
from iCalibrationDB import Detectors
from XFELDetAna.plotting.heatmap import heatmapPlot
from XFELDetAna.plotting.simpleplot import simplePlot from XFELDetAna.plotting.simpleplot import simplePlot
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Note: this notebook assumes that local_output was set to True in the preceding characterization notebook # Note: this notebook assumes that local_output was set to True in the preceding characterization notebook
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
if "AGIPD" in karabo_id: if "AGIPD" in karabo_id:
if "SPB" in karabo_id: if "SPB" in karabo_id:
dinstance = "AGIPD1M1" dinstance = "AGIPD1M1"
nmods = 16 nmods = 16
elif "MID" in karabo_id: elif "MID" in karabo_id:
dinstance = "AGIPD1M2" dinstance = "AGIPD1M2"
nmods = 16 nmods = 16
elif "HED" in karabo_id: elif "HED" in karabo_id:
dinstance = "AGIPD500K" dinstance = "AGIPD500K"
nmods = 8 nmods = 8
display(Markdown(""" display(Markdown("""
# Summary of AGIPD dark characterization # # Summary of AGIPD dark characterization #
The following report shows a set of dark images taken with the AGIPD detector to deduce detector offsets, noise, bad-pixel maps and thresholding. All four types of constants are evaluated per-pixel and per-memory cell. The following report shows a set of dark images taken with the AGIPD detector to deduce detector offsets, noise, bad-pixel maps and thresholding. All four types of constants are evaluated per-pixel and per-memory cell.
**The offset** ($O$) is defined as the median ($M$) of the dark signal ($Ds$) over trains ($t$) for a given pixel ($x,y$) and memory cell ($c$). **The offset** ($O$) is defined as the median ($M$) of the dark signal ($Ds$) over trains ($t$) for a given pixel ($x,y$) and memory cell ($c$).
**The noise** $N$ is the standard deviation $\sigma$ of the dark signal. **The noise** $N$ is the standard deviation $\sigma$ of the dark signal.
$$ O_{x,y,c} = M(Ds)_{t} ,\,\,\,\,\,\, N_{x,y,c} = \sigma(Ds)_{t}$$ $$ O_{x,y,c} = M(Ds)_{t} ,\,\,\,\,\,\, N_{x,y,c} = \sigma(Ds)_{t}$$
**The bad pixel** mask is encoded as a bit mask. **The bad pixel** mask is encoded as a bit mask.
**"OFFSET_OUT_OF_THRESHOLD":** **"OFFSET_OUT_OF_THRESHOLD":**
Offset outside of bounds: Offset outside of bounds:
$$M(O)_{x,y} - \sigma(O)_{x,y} * \mathrm{thresholds\_offset\_sigma} < O < M(O)_{x,y} + \sigma(O)_{x,y} * \mathrm{thresholds\_offset\_sigma} $$ $$M(O)_{x,y} - \sigma(O)_{x,y} * \mathrm{thresholds\_offset\_sigma} < O < M(O)_{x,y} + \sigma(O)_{x,y} * \mathrm{thresholds\_offset\_sigma} $$
or offset outside of hard limits or offset outside of hard limits
$$ \mathrm{thresholds\_offset\_hard}_\mathrm{low} < O < \mathrm{thresholds\_offset\_hard}_\mathrm{high} $$ $$ \mathrm{thresholds\_offset\_hard}_\mathrm{low} < O < \mathrm{thresholds\_offset\_hard}_\mathrm{high} $$
**"NOISE_OUT_OF_THRESHOLD":** **"NOISE_OUT_OF_THRESHOLD":**
Noise outside of bounds: Noise outside of bounds:
$$M(N)_{x,y} - \sigma(N)_{x,y} * \mathrm{thresholds\_noise\_sigma} < N < M(N)_{x,y} + \sigma(N)_{x,y} * \mathrm{thresholds\_noise\_sigma} $$ $$M(N)_{x,y} - \sigma(N)_{x,y} * \mathrm{thresholds\_noise\_sigma} < N < M(N)_{x,y} + \sigma(N)_{x,y} * \mathrm{thresholds\_noise\_sigma} $$
or noise outside of hard limits or noise outside of hard limits
$$\mathrm{thresholds\_noise\_hard}_\mathrm{low} < N < \mathrm{thresholds\_noise\_hard}_\mathrm{high} $$ $$\mathrm{thresholds\_noise\_hard}_\mathrm{low} < N < \mathrm{thresholds\_noise\_hard}_\mathrm{high} $$
**"OFFSET_NOISE_EVAL_ERROR":** **"OFFSET_NOISE_EVAL_ERROR":**
Offset and Noise both not $nan$ values Offset and Noise both not $nan$ values
Values: $\mathrm{thresholds\_offset\_sigma}$, $\mathrm{thresholds\_offset\_hard}$, $\mathrm{thresholds\_noise\_sigma}$, $\mathrm{thresholds\_noise\_hard}$ are given as parameters. Values: $\mathrm{thresholds\_offset\_sigma}$, $\mathrm{thresholds\_offset\_hard}$, $\mathrm{thresholds\_noise\_sigma}$, $\mathrm{thresholds\_noise\_hard}$ are given as parameters.
"**\"GAIN_THRESHOLDING_ERROR\":** "**\"GAIN_THRESHOLDING_ERROR\":**
Bad gain separated pixels with sigma separation less than gain_separation_sigma_threshold Bad gain separated pixels with sigma separation less than gain_separation_sigma_threshold
$$ sigma\_separation = \\frac{\mathrm{gain\_offset} - \mathrm{previous\_gain\_offset}}{\sqrt{\mathrm{gain\_offset_{std}}^\mathrm{2} + \mathrm{previuos\_gain\_offset_{std}}^\mathrm{2}}}$$ $$ sigma\_separation = \\frac{\mathrm{gain\_offset} - \mathrm{previous\_gain\_offset}}{\sqrt{\mathrm{gain\_offset_{std}}^\mathrm{2} + \mathrm{previuos\_gain\_offset_{std}}^\mathrm{2}}}$$
$$ Bad\_separation = sigma\_separation < \mathrm{gain\_separation\_sigma\_threshold} $$ $$ Bad\_separation = sigma\_separation < \mathrm{gain\_separation\_sigma\_threshold} $$
""")) """))
elif "LPD" in karabo_id: elif "LPD" in karabo_id:
dinstance = "LPD1M1" dinstance = "LPD1M1"
nmods = 16 nmods = 16
display(Markdown(""" display(Markdown("""
# Summary of LPD dark characterization # # Summary of LPD dark characterization #
The following report shows a set of dark images taken with the LPD detector to deduce detector offsets, noise, bad-pixel maps. All three types of constants are evaluated per-pixel and per-memory cell. The following report shows a set of dark images taken with the LPD detector to deduce detector offsets, noise, bad-pixel maps. All three types of constants are evaluated per-pixel and per-memory cell.
**The offset** ($O$) is defined as the median ($M$) of the dark signal ($Ds$) over trains ($t$) for a given pixel ($x,y$) and memory cell ($c$). **The offset** ($O$) is defined as the median ($M$) of the dark signal ($Ds$) over trains ($t$) for a given pixel ($x,y$) and memory cell ($c$).
**The noise** $N$ is the standard deviation $\sigma$ of the dark signal. **The noise** $N$ is the standard deviation $\sigma$ of the dark signal.
$$ O_{x,y,c} = M(Ds)_{t} ,\,\,\,\,\,\, N_{x,y,c} = \sigma(Ds)_{t}$$ $$ O_{x,y,c} = M(Ds)_{t} ,\,\,\,\,\,\, N_{x,y,c} = \sigma(Ds)_{t}$$
**The bad pixel** mask is encoded as a bit mask. **The bad pixel** mask is encoded as a bit mask.
**"OFFSET_OUT_OF_THRESHOLD":** **"OFFSET_OUT_OF_THRESHOLD":**
Offset outside of bounds: Offset outside of bounds:
$$M(O)_{x,y} - \sigma(O)_{x,y} * \mathrm{thresholds\_offset\_sigma} < O < M(O)_{x,y} + \sigma(O)_{x,y} * \mathrm{thresholds\_offset\_sigma} $$ $$M(O)_{x,y} - \sigma(O)_{x,y} * \mathrm{thresholds\_offset\_sigma} < O < M(O)_{x,y} + \sigma(O)_{x,y} * \mathrm{thresholds\_offset\_sigma} $$
or offset outside of hard limits or offset outside of hard limits
$$ \mathrm{thresholds\_offset\_hard}_\mathrm{low} < O < \mathrm{thresholds\_offset\_hard}_\mathrm{high} $$ $$ \mathrm{thresholds\_offset\_hard}_\mathrm{low} < O < \mathrm{thresholds\_offset\_hard}_\mathrm{high} $$
**"NOISE_OUT_OF_THRESHOLD":** **"NOISE_OUT_OF_THRESHOLD":**
Noise outside of bounds: Noise outside of bounds:
$$M(N)_{x,y} - \sigma(N)_{x,y} * \mathrm{thresholds\_noise\_sigma} < N < M(N)_{x,y} + \sigma(N)_{x,y} * \mathrm{thresholds\_noise\_sigma} $$ $$M(N)_{x,y} - \sigma(N)_{x,y} * \mathrm{thresholds\_noise\_sigma} < N < M(N)_{x,y} + \sigma(N)_{x,y} * \mathrm{thresholds\_noise\_sigma} $$
or noise outside of hard limits or noise outside of hard limits
$$\mathrm{thresholds\_noise\_hard}_\mathrm{low} < N < \mathrm{thresholds\_noise\_hard}_\mathrm{high} $$ $$\mathrm{thresholds\_noise\_hard}_\mathrm{low} < N < \mathrm{thresholds\_noise\_hard}_\mathrm{high} $$
**"OFFSET_NOISE_EVAL_ERROR":** **"OFFSET_NOISE_EVAL_ERROR":**
Offset and Noise both not $nan$ values Offset and Noise both not $nan$ values
"Values: $\\mathrm{thresholds\\_offset\\_sigma}$, $\\mathrm{thresholds\\_offset\\_hard}$, $\\mathrm{thresholds\\_noise\\_sigma}$, $\\mathrm{thresholds\\_noise\\_hard}$ are given as parameters.\n", "Values: $\\mathrm{thresholds\\_offset\\_sigma}$, $\\mathrm{thresholds\\_offset\\_hard}$, $\\mathrm{thresholds\\_noise\\_sigma}$, $\\mathrm{thresholds\\_noise\\_hard}$ are given as parameters.\n",
""")) """))
elif "DSSC" in karabo_id: elif "DSSC" in karabo_id:
dinstance = "DSSC1M1" dinstance = "DSSC1M1"
nmods = 16 nmods = 16
display(Markdown(""" display(Markdown("""
# Summary of DSSC dark characterization # # Summary of DSSC dark characterization #
""")) """))
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
out_folder = Path(out_folder) out_folder = Path(out_folder)
metadata = CalibrationMetadata(out_folder) metadata = CalibrationMetadata(out_folder)
mod_mapping = metadata.setdefault("modules-mapping", {}) mod_mapping = metadata.setdefault("modules-mapping", {})
old_constant_metadata = {} old_constant_metadata = {}
for fn in out_folder.glob("module_metadata_*.yml"): for fn in out_folder.glob("module_metadata_*.yml"):
with fn.open("r") as fd: with fn.open("r") as fd:
fdict = yaml.safe_load(fd) fdict = yaml.safe_load(fd)
module = fdict["module"] module = fdict["module"]
mod_mapping[module] = fdict["pdu"] mod_mapping[module] = fdict["pdu"]
old_constant_metadata[module] = fdict["old-constants"] old_constant_metadata[module] = fdict["old-constants"]
fn.unlink() fn.unlink()
metadata.save() metadata.save()
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
Preparing newly injected and previous constants from produced local folder in out_folder. Preparing newly injected and previous constants from produced local folder in out_folder.
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# TODO: After changes in the Cal DB interface read files from cal repository # TODO: After changes in the Cal DB interface read files from cal repository
# Load constants from local files # Load constants from local files
data = OrderedDict() data = OrderedDict()
old_cons = OrderedDict() old_cons = OrderedDict()
mod_names = [] mod_names = []
# Loop over modules # Loop over modules
for i in range(nmods): for i in range(nmods):
qm = module_index_to_qm(i) qm = module_index_to_qm(i)
if not mod_mapping.get(qm): if not mod_mapping.get(qm):
continue continue
mod_pdu = mod_mapping[qm] mod_pdu = mod_mapping[qm]
# loop over constants # loop over constants
for const in ['Offset', 'Noise', 'ThresholdsDark', 'BadPixelsDark']: for const in ['Offset', 'Noise', 'ThresholdsDark', 'BadPixelsDark']:
# first load new constant # first load new constant
fpath = out_folder / f"const_{const}_{mod_pdu}.h5" fpath = out_folder / f"const_{const}_{mod_pdu}.h5"
if not fpath.exists(): if not fpath.exists():
print(f"No local output file {fpath} found") print(f"No local output file {fpath} found")
continue continue
with h5py.File(fpath, 'r') as f: with h5py.File(fpath, 'r') as f:
if qm not in data: if qm not in data:
mod_names.append(qm) mod_names.append(qm)
data[qm] = OrderedDict() data[qm] = OrderedDict()
data[qm][const] = f["data"][()] data[qm][const] = f["data"][()]
# try finding old constants using paths from CalCat store # try finding old constants using paths from CalCat store
qm_mdata = old_constant_metadata[qm] qm_mdata = old_constant_metadata[qm]
if const not in qm_mdata: if const not in qm_mdata:
continue continue
filepath = qm_mdata[const]["filepath"] filepath = qm_mdata[const]["filepath"]
h5path = qm_mdata[const]["h5path"] h5path = qm_mdata[const]["h5path"]
if not filepath or not h5path: if not filepath or not h5path:
continue continue
with h5py.File(filepath, "r") as fd: with h5py.File(filepath, "r") as fd:
old_cons.setdefault(qm, OrderedDict())[const] = fd[f"{h5path}/data"][:] old_cons.setdefault(qm, OrderedDict())[const] = fd[f"{h5path}/data"][:]
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
cons_shape = {} cons_shape = {}
# extracting constant shape. # extracting constant shape.
for qm, constant in data.items(): for qm, constant in data.items():
for cname, cons in constant.items(): for cname, cons in constant.items():
shape = data[qm][cname].shape shape = data[qm][cname].shape
if cname not in cons_shape: if cname not in cons_shape:
cons_shape[cname] = shape cons_shape[cname] = shape
break break
constants = {} constants = {}
prev_const = {} prev_const = {}
for cname, sh in cons_shape.items(): for cname, sh in cons_shape.items():
constants[cname]= np.zeros((len(mod_names),) + sh[:]) constants[cname]= np.zeros((len(mod_names),) + sh[:])
prev_const[cname]= np.zeros((len(mod_names),) + sh[:]) prev_const[cname]= np.zeros((len(mod_names),) + sh[:])
for i in range(len(mod_names)): for i in range(len(mod_names)):
for cname, cval in constants.items(): for cname, cval in constants.items():
cval[i] = data[mod_names[i]][cname] cval[i] = data[mod_names[i]][cname]
if mod_names[i] in old_cons.keys(): if mod_names[i] in old_cons.keys():
prev_const[cname][i] = old_cons[mod_names[i]][cname] prev_const[cname][i] = old_cons[mod_names[i]][cname]
else: else:
print(f"No previous {cname} found for {mod_names[i]}") print(f"No previous {cname} found for {mod_names[i]}")
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
display(Markdown('## Processed modules')) display(Markdown('## Processed modules'))
show_processed_modules(dinstance, constants, mod_names, mode="processed") show_processed_modules(dinstance, constants, mod_names, mode="processed")
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Summary figures across Modules ## ## Summary figures across Modules ##
The following plots give an overview of calibration constants averaged across pixels and memory cells. A bad pixel mask is applied. The following plots give an overview of calibration constants averaged across pixels and memory cells. A bad pixel mask is applied.
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
if "LPD" in dinstance: if "LPD" in dinstance:
geom = extra_geom.LPD_1MGeometry.from_quad_positions(quad_pos=[(11.4, 299), geom = extra_geom.LPD_1MGeometry.from_quad_positions(quad_pos=[(11.4, 299),
(-11.5, 8), (-11.5, 8),
(254.5, -16), (254.5, -16),
(278.5, 275)]) (278.5, 275)])
pixels_y = 256 pixels_y = 256
pixels_x = 256 pixels_x = 256
elif dinstance in ('AGIPD1M1', 'AGIPD1M2'): elif dinstance in ('AGIPD1M1', 'AGIPD1M2'):
geom = extra_geom.AGIPD_1MGeometry.from_quad_positions(quad_pos=[(-525, 625), geom = extra_geom.AGIPD_1MGeometry.from_quad_positions(quad_pos=[(-525, 625),
(-550, -10), (-550, -10),
(520, -160), (520, -160),
(542.5, 475)]) (542.5, 475)])
pixels_y = 128 pixels_y = 128
pixels_x = 512 pixels_x = 512
elif dinstance == "AGIPD500K": elif dinstance == "AGIPD500K":
geom = extra_geom.AGIPD_500K2GGeometry.from_origin() geom = extra_geom.AGIPD_500K2GGeometry.from_origin()
pixels_y = 128 pixels_y = 128
pixels_x = 512 pixels_x = 512
elif "DSSC" in dinstance: elif "DSSC" in dinstance:
pixels_y = 512 pixels_y = 512
pixels_x = 128 pixels_x = 128
quadpos = [(-130, 5), (-130, -125), (5, -125), (5, 5)] quadpos = [(-130, 5), (-130, -125), (5, -125), (5, 5)]
extrageom_pth = os.path.dirname(extra_geom.__file__) extrageom_pth = os.path.dirname(extra_geom.__file__)
geom = extra_geom.DSSC_1MGeometry.from_h5_file_and_quad_positions( geom = extra_geom.DSSC_1MGeometry.from_h5_file_and_quad_positions(
f"{extrageom_pth}/tests/dssc_geo_june19.h5", positions=quadpos) f"{extrageom_pth}/tests/dssc_geo_june19.h5", positions=quadpos)
Mod_data=OrderedDict() Mod_data=OrderedDict()
gainstages = 1 gainstages = 1
for const_name, const in constants.items(): for const_name, const in constants.items():
if const_name == 'BadPixelsDark': if const_name == 'BadPixelsDark':
continue continue
# Check if constant gain available in constant e.g. AGIPD, LPD # Check if constant gain available in constant e.g. AGIPD, LPD
if len(const.shape) == 5: if len(const.shape) == 5:
gainstages = 3 gainstages = 3
else: else:
gainstages = 1 gainstages = 1
for dname in ['{}', 'd-{}', 'dpct-{}']: for dname in ['{}', 'd-{}', 'dpct-{}']:
Mod_data[dname.format(const_name)] = OrderedDict() Mod_data[dname.format(const_name)] = OrderedDict()
display(Markdown(f'##### {const_name}')) display(Markdown(f'##### {const_name}'))
print_once = True print_once = True
for gain in range(gainstages): for gain in range(gainstages):
if const_name == 'ThresholdsDark': if const_name == 'ThresholdsDark':
if gain > 1: if gain > 1:
continue continue
glabel = threshold_names glabel = threshold_names
else: else:
glabel = gain_names glabel = gain_names
for i in range(nmods): for i in range(nmods):
qm = module_index_to_qm(i) qm = module_index_to_qm(i)
if qm in mod_names: if qm in mod_names:
m_idx = mod_names.index(qm) m_idx = mod_names.index(qm)
# Check if constant shape of 5 indices e.g. AGIPD, LPD # Check if constant shape of 5 indices e.g. AGIPD, LPD
if len(const.shape) == 5: if len(const.shape) == 5:
values = np.nanmean(const[m_idx, :, :, :, gain], axis=2) values = np.nanmean(const[m_idx, :, :, :, gain], axis=2)
dval = np.nanmean(prev_const[const_name][m_idx, :, :, :, gain], axis=2) dval = np.nanmean(prev_const[const_name][m_idx, :, :, :, gain], axis=2)
else: else:
values = np.nanmean(const[m_idx, :, :, :], axis=2) values = np.nanmean(const[m_idx, :, :, :], axis=2)
dval = np.nanmean(prev_const[const_name][m_idx, :, :, :], axis=2) dval = np.nanmean(prev_const[const_name][m_idx, :, :, :], axis=2)
values[values == 0] = np.nan values[values == 0] = np.nan
dval[dval == 0] = np.nan dval[dval == 0] = np.nan
dval = values - dval dval = values - dval
dval_pct = dval/values * 100 dval_pct = dval/values * 100
values = np.moveaxis(values, 0, -1).reshape(1, values.shape[1], values.shape[0]) values = np.moveaxis(values, 0, -1).reshape(1, values.shape[1], values.shape[0])
dval = np.moveaxis(dval, 0, -1).reshape(1, dval.shape[1], dval.shape[0]) dval = np.moveaxis(dval, 0, -1).reshape(1, dval.shape[1], dval.shape[0])
dval_pct = np.moveaxis(dval_pct, 0, -1).reshape(1, dval_pct.shape[1], dval_pct.shape[0]) dval_pct = np.moveaxis(dval_pct, 0, -1).reshape(1, dval_pct.shape[1], dval_pct.shape[0])
else: else:
# if module not available fill arrays with nan # if module not available fill arrays with nan
values = np.zeros((1, pixels_x, pixels_y),dtype=np.float64) values = np.zeros((1, pixels_x, pixels_y),dtype=np.float64)
values[values == 0] = np.nan values[values == 0] = np.nan
dval = values dval = values
dval_pct = dval dval_pct = dval
for k, v in {'{}': values, 'd-{}': dval , 'dpct-{}': dval_pct}.items(): for k, v in {'{}': values, 'd-{}': dval , 'dpct-{}': dval_pct}.items():
try: try:
Mod_data[k.format(const_name)][gain_names[gain]] = \ Mod_data[k.format(const_name)][gain_names[gain]] = \
np.concatenate((Mod_data[k.format(const_name)][gain_names[gain]], np.concatenate((Mod_data[k.format(const_name)][gain_names[gain]],
v), axis=0) v), axis=0)
except: except:
Mod_data[k.format(const_name)][gain_names[gain]] = v Mod_data[k.format(const_name)][gain_names[gain]] = v
if np.count_nonzero(dval) == 0 and print_once: if np.count_nonzero(dval) == 0 and print_once:
display(Markdown(f'New and previous {const_name} are the same, hence there is no difference.')) display(Markdown(f'New and previous {const_name} are the same, hence there is no difference.'))
print_once = False print_once = False
# Plotting constant overall modules. # Plotting constant overall modules.
display(Markdown(f'###### {glabel[gain]} ######')) display(Markdown(f'###### {glabel[gain]} ######'))
gs = gridspec.GridSpec(2, 2) gs = gridspec.GridSpec(2, 2)
fig = plt.figure(figsize=(24, 32)) fig = plt.figure(figsize=(24, 32))
axis = OrderedDict() axis = OrderedDict()
axis = {"ax0": {"cname": "{}" ,"gs": gs[0, :], "shrink": 0.9, "pad": 0.05, "label": "ADUs", "title": '{}'}, axis = {"ax0": {"cname": "{}" ,"gs": gs[0, :], "shrink": 0.9, "pad": 0.05, "label": "ADUs", "title": '{}'},
"ax1": {"cname": "d-{}","gs": gs[1, 0], "shrink": 0.6, "pad": 0.1, "label": "ADUs", "title": 'Difference with previous {}'}, "ax1": {"cname": "d-{}","gs": gs[1, 0], "shrink": 0.6, "pad": 0.1, "label": "ADUs", "title": 'Difference with previous {}'},
"ax2": {"cname": "dpct-{}", "gs": gs[1, 1], "shrink": 0.6, "pad": 0.1, "label": "%", "title": 'Difference with previous {} %'}} "ax2": {"cname": "dpct-{}", "gs": gs[1, 1], "shrink": 0.6, "pad": 0.1, "label": "%", "title": 'Difference with previous {} %'}}
for ax, axv in axis.items(): for ax, axv in axis.items():
# Add the min and max plot values for each axis. # Add the min and max plot values for each axis.
vmin, vmax = get_range(Mod_data[axv["cname"].format(const_name)][gain_names[gain]], 2) vmin, vmax = get_range(Mod_data[axv["cname"].format(const_name)][gain_names[gain]], 2)
ax = fig.add_subplot(axv["gs"]) ax = fig.add_subplot(axv["gs"])
geom.plot_data_fast(Mod_data[axv["cname"].format(const_name)][gain_names[gain]], geom.plot_data_fast(Mod_data[axv["cname"].format(const_name)][gain_names[gain]],
vmin=vmin, vmax=vmax, ax=ax, vmin=vmin, vmax=vmax, ax=ax,
colorbar={'shrink': axv["shrink"], colorbar={'shrink': axv["shrink"],
'pad': axv["pad"] 'pad': axv["pad"]
} }
) )
colorbar = ax.images[0].colorbar colorbar = ax.images[0].colorbar
colorbar.set_label(axv["label"]) colorbar.set_label(axv["label"])
ax.set_title(axv["title"].format(f"{const_name} {glabel[gain]}"), fontsize=15) ax.set_title(axv["title"].format(f"{const_name} {glabel[gain]}"), fontsize=15)
ax.set_xlabel('Columns', fontsize=15) ax.set_xlabel('Columns', fontsize=15)
ax.set_ylabel('Rows', fontsize=15) ax.set_ylabel('Rows', fontsize=15)
plt.show() plt.show()
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Loop over modules and constants # Loop over modules and constants
for const_name, const in constants.items(): for const_name, const in constants.items():
display(Markdown(f'### Summary across Modules - {const_name}')) display(Markdown(f'### Summary across Modules - {const_name}'))
for gain in range(gainstages): for gain in range(gainstages):
if const_name == 'ThresholdsDark': if const_name == 'ThresholdsDark':
if gain == 2: if gain == 2:
continue continue
glabel = threshold_names glabel = threshold_names
else: else:
glabel = gain_names glabel = gain_names
if len(const.shape) == 5: if len(const.shape) == 5:
data = np.copy(const[:, :, :, :, gain]) data = np.copy(const[:, :, :, :, gain])
else: else:
data = np.copy(const[:, :, :, :]) data = np.copy(const[:, :, :, :])
if const_name != 'BadPixelsDark': if const_name != 'BadPixelsDark':
if "BadPixelsDark" in constants.keys(): if "BadPixelsDark" in constants.keys():
label = f'{const_name} value [ADU], good pixels only' label = f'{const_name} value [ADU], good pixels only'
if len(const.shape) == 5: if len(const.shape) == 5:
data[constants['BadPixelsDark'][:, :, :, :, gain] > 0] = np.nan data[constants['BadPixelsDark'][:, :, :, :, gain] > 0] = np.nan
else: else:
data[constants['BadPixelsDark'][:, :, :, :] > 0] = np.nan data[constants['BadPixelsDark'][:, :, :, :] > 0] = np.nan
else: else:
label = f'{const_name} value [ADU], good and bad pixels' label = f'{const_name} value [ADU], good and bad pixels'
datamean = np.nanmean(data, axis=(1, 2)) datamean = np.nanmean(data, axis=(1, 2))
fig = plt.figure(figsize=(15, 6), tight_layout={ fig = plt.figure(figsize=(15, 6), tight_layout={
'pad': 0.2, 'w_pad': 1.3, 'h_pad': 1.3}) 'pad': 0.2, 'w_pad': 1.3, 'h_pad': 1.3})
ax = fig.add_subplot(121) ax = fig.add_subplot(121)
else: else:
label = 'Fraction of bad pixels' label = 'Fraction of bad pixels'
data[data > 0] = 1.0 data[data > 0] = 1.0
datamean = np.nanmean(data, axis=(1, 2)) datamean = np.nanmean(data, axis=(1, 2))
datamean[datamean == 1.0] = np.nan datamean[datamean == 1.0] = np.nan
fig = plt.figure(figsize=(15, 6), tight_layout={ fig = plt.figure(figsize=(15, 6), tight_layout={
'pad': 0.2, 'w_pad': 1.3, 'h_pad': 1.3}) 'pad': 0.2, 'w_pad': 1.3, 'h_pad': 1.3})
ax = fig.add_subplot(111) ax = fig.add_subplot(111)
d = [] d = []
for im, mod in enumerate(datamean): for im, mod in enumerate(datamean):
d.append({'x': np.arange(mod.shape[0]), d.append({'x': np.arange(mod.shape[0]),
'y': mod, 'y': mod,
'drawstyle': 'steps-pre', 'drawstyle': 'steps-pre',
'label': mod_names[im], 'label': mod_names[im],
}) })
_ = simplePlot(d, figsize=(10, 10), xrange=(-12, 510), _ = simplePlot(d, figsize=(10, 10), xrange=(-12, 510),
x_label='Memory Cell ID', x_label='Memory Cell ID',
y_label=label, y_label=label,
use_axis=ax, use_axis=ax,
title='{}'.format(glabel[gain]), title='{}'.format(glabel[gain]),
title_position=[0.5, 1.18], title_position=[0.5, 1.18],
legend='outside-top-ncol6-frame', legend_size='18%', legend='outside-top-ncol6-frame', legend_size='18%',
legend_pad=0.00) legend_pad=0.00)
if const_name != 'BadPixelsDark': if const_name != 'BadPixelsDark':
ax = fig.add_subplot(122) ax = fig.add_subplot(122)
if "BadPixelsDark" in constants.keys(): if "BadPixelsDark" in constants.keys():
label = f'$\sigma$ {const_name} [ADU], good pixels only' label = f'$\sigma$ {const_name} [ADU], good pixels only'
else: else:
label = f'$\sigma$ {const_name} [ADU], good and bad pixels' label = f'$\sigma$ {const_name} [ADU], good and bad pixels'
d = [] d = []
for im, mod in enumerate(np.nanstd(data, axis=(1, 2))): for im, mod in enumerate(np.nanstd(data, axis=(1, 2))):
d.append({'x': np.arange(mod.shape[0]), d.append({'x': np.arange(mod.shape[0]),
'y': mod, 'y': mod,
'drawstyle': 'steps-pre', 'drawstyle': 'steps-pre',
'label': mod_names[im], 'label': mod_names[im],
}) })
_ = simplePlot(d, figsize=(10, 10), xrange=(-12, 510), _ = simplePlot(d, figsize=(10, 10), xrange=(-12, 510),
x_label='Memory Cell ID', x_label='Memory Cell ID',
y_label=label, y_label=label,
use_axis=ax, use_axis=ax,
title='{} $\sigma$'.format(glabel[gain]), title='{} $\sigma$'.format(glabel[gain]),
title_position=[0.5, 1.18], title_position=[0.5, 1.18],
legend='outside-top-ncol6-frame', legend_size='18%', legend='outside-top-ncol6-frame', legend_size='18%',
legend_pad=0.00) legend_pad=0.00)
plt.show() plt.show()
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Summary tables across Modules ## ## Summary tables across Modules ##
Tables show values averaged across all pixels and memory cells of a given detector module. Tables show values averaged across all pixels and memory cells of a given detector module.
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
if u'$' in tabulate.LATEX_ESCAPE_RULES: if u'$' in tabulate.LATEX_ESCAPE_RULES:
del(tabulate.LATEX_ESCAPE_RULES[u'$']) del(tabulate.LATEX_ESCAPE_RULES[u'$'])
if u'\\' in tabulate.LATEX_ESCAPE_RULES: if u'\\' in tabulate.LATEX_ESCAPE_RULES:
del(tabulate.LATEX_ESCAPE_RULES[u'\\']) del(tabulate.LATEX_ESCAPE_RULES[u'\\'])
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
head = ['Module', 'High gain', 'Medium gain', 'Low gain'] head = ['Module', 'High gain', 'Medium gain', 'Low gain']
head_th = ['Module', 'HG_MG threshold', 'MG_LG threshold'] head_th = ['Module', 'HG_MG threshold', 'MG_LG threshold']
for const_name, const in constants.items(): for const_name, const in constants.items():
table = [] table = []
for i_mod, mod in enumerate(mod_names): for i_mod, mod in enumerate(mod_names):
t_line = [mod] t_line = [mod]
for gain in range(gainstages): for gain in range(gainstages):
if const_name == 'ThresholdsDark': if const_name == 'ThresholdsDark':
if gain == 2: if gain == 2:
continue continue
header = head_th header = head_th
else: else:
header = head header = head
if len(const.shape) == 5: if len(const.shape) == 5:
data = np.copy(const[i_mod, :, :, :, gain]) data = np.copy(const[i_mod, :, :, :, gain])
else: else:
data = np.copy(const[i_mod, :, :, :]) data = np.copy(const[i_mod, :, :, :])
if const_name == 'BadPixelsDark': if const_name == 'BadPixelsDark':
data[data > 0] = 1.0 data[data > 0] = 1.0
datasum = np.nansum(data) datasum = np.nansum(data)
datamean = np.nanmean(data) datamean = np.nanmean(data)
if datamean == 1.0: if datamean == 1.0:
datamean = np.nan datamean = np.nan
datasum = np.nan datasum = np.nan
t_line.append(f'{datasum:6.0f} ({datamean:6.3f}) ') t_line.append(f'{datasum:6.0f} ({datamean:6.3f}) ')
label = '## Number(fraction) of bad pixels' label = '## Number(fraction) of bad pixels'
else: else:
if "BadPixelsDark" in constants.keys(): if "BadPixelsDark" in constants.keys():
data[constants['BadPixelsDark'] data[constants['BadPixelsDark']
[i_mod, :, :, :, gain] > 0] = np.nan [i_mod, :, :, :, gain] > 0] = np.nan
label = f'### Average {const_name} [ADU], good pixels only' label = f'### Average {const_name} [ADU], good pixels only'
else: else:
label = f'### Average {const_name} [ADU], good and bad pixels' label = f'### Average {const_name} [ADU], good and bad pixels'
t_line.append(f'{np.nanmean(data):6.1f} $\\pm$ {np.nanstd(data):6.1f}') t_line.append(f'{np.nanmean(data):6.1f} $\\pm$ {np.nanstd(data):6.1f}')
label = f'## Average {const_name} [ADU], good pixels only' label = f'## Average {const_name} [ADU], good pixels only'
table.append(t_line) table.append(t_line)
display(Markdown(label)) display(Markdown(label))
md = display(Latex(tabulate.tabulate( md = display(Latex(tabulate.tabulate(
table, tablefmt='latex', headers=header))) table, tablefmt='latex', headers=header)))
``` ```
......
...@@ -51,7 +51,7 @@ install_requires = [ ...@@ -51,7 +51,7 @@ install_requires = [
"dill==0.3.0", "dill==0.3.0",
"docutils==0.17.1", "docutils==0.17.1",
"dynaconf==3.1.4", "dynaconf==3.1.4",
"extra_data==1.8.0", "extra_data==1.9.1",
"extra_geom==1.6.0", "extra_geom==1.6.0",
"gitpython==3.1.0", "gitpython==3.1.0",
"h5py==3.5.0", "h5py==3.5.0",
...@@ -78,6 +78,7 @@ install_requires = [ ...@@ -78,6 +78,7 @@ install_requires = [
"pasha==0.1.0", "pasha==0.1.0",
"prettytable==0.7.2", "prettytable==0.7.2",
"princess==0.5", "princess==0.5",
"psutil==5.9.0",
"pypandoc==1.4", "pypandoc==1.4",
"python-dateutil==2.8.1", "python-dateutil==2.8.1",
"pyyaml==5.3", "pyyaml==5.3",
......
This diff is collapsed.