Skip to content
Snippets Groups Projects
Commit 45592638 authored by Karim Ahmed's avatar Karim Ahmed
Browse files

small refactors

parent 24d22c47
No related branches found
No related tags found
1 merge request!551add HIBEF AGIPD500K and fix some issue with retrieval of conditions
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
# Gain Characterization Summary # # Gain Characterization Summary #
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
in_folder = "" # in this notebook, in_folder is not used as the data source is in the destination folder in_folder = "" # in this notebook, in_folder is not used as the data source is in the destination folder
out_folder = "" # the folder to output to, required out_folder = "" # the folder to output to, required
hist_file_template = "hists_m{:02d}_sum.h5" hist_file_template = "hists_m{:02d}_sum.h5"
proc_folder = "" # Path to corrected image data used to create histograms and validation plots proc_folder = "" # Path to corrected image data used to create histograms and validation plots
raw_folder = "/gpfs/exfel/exp/MID/202030/p900137/raw" # folder of raw data. This is used to save information of source data of generated constants, required raw_folder = "/gpfs/exfel/exp/MID/202030/p900137/raw" # folder of raw data. This is used to save information of source data of generated constants, required
run = 449 # runs of image data used to create histograms run = 449 # runs of image data used to create histograms
karabo_id = "MID_DET_AGIPD1M-1" # karabo karabo_id karabo_id = "MID_DET_AGIPD1M-1" # karabo karabo_id
receiver_id = "{}CH0" # inset for receiver devices receiver_id = "{}CH0" # inset for receiver devices
path_template = 'RAW-R{:04d}-{}-S{:05d}.h5' # the template to use to access data path_template = 'RAW-R{:04d}-{}-S{:05d}.h5' # the template to use to access data
h5path = 'INSTRUMENT/{}/DET/{}:xtdf/' # path in the HDF5 file to images h5path = 'INSTRUMENT/{}/DET/{}:xtdf/' # path in the HDF5 file to images
h5path_idx = 'INDEX/{}/DET/{}:xtdf/' # path in the HDF5 file to images h5path_idx = 'INDEX/{}/DET/{}:xtdf/' # path in the HDF5 file to images
h5path_ctrl = '/CONTROL/{}/MDL/FPGA_COMP' # path to control information h5path_ctrl = '/CONTROL/{}/MDL/FPGA_COMP' # path to control information
karabo_id_control = "MID_IRU_AGIPD1M1" # karabo-id for control device karabo_id_control = "MID_IRU_AGIPD1M1" # karabo-id for control device
karabo_da_control = 'AGIPD1MCTRL00' # karabo DA for control infromation karabo_da_control = 'AGIPD1MCTRL00' # karabo DA for control infromation
use_dir_creation_date = True # use the creation data of the input dir for database queries use_dir_creation_date = True # use the creation data of the input dir for database queries
cal_db_interface = "tcp://max-exfl016:8015#8045" # the database interface to use cal_db_interface = "tcp://max-exfl016:8015#8045" # the database interface to use
cal_db_timeout = 30000 # in milli seconds cal_db_timeout = 30000 # in milli seconds
local_output = True # output constants locally local_output = True # output constants locally
db_output = False # output constants to database db_output = False # output constants to database
# Fit parameters # Fit parameters
peak_range = [-30,30,35,65,80,130,145,200] # where to look for the peaks, [a0, b0, a1, b1, ...] exactly 8 elements peak_range = [-30,30,35,65,80,130,145,200] # where to look for the peaks, [a0, b0, a1, b1, ...] exactly 8 elements
peak_width_range = [0, 30, 0, 35, 0, 40, 0, 45] # fit limits on the peak widths, [a0, b0, a1, b1, ...] exactly 8 elements peak_width_range = [0, 30, 0, 35, 0, 40, 0, 45] # fit limits on the peak widths, [a0, b0, a1, b1, ...] exactly 8 elements
# Bad-pixel thresholds # Bad-pixel thresholds
d0_lim = [10, 70] # hard limits for d0 value (distance between noise and first peak) d0_lim = [10, 70] # hard limits for d0 value (distance between noise and first peak)
peak_width_lim = [0.97, 1.43, 1.03, 1.57] # hard limits on the peak widths, [a0, b0, a1, b1, ...] in units of the noise peak. 4 parameters. peak_width_lim = [0.97, 1.43, 1.03, 1.57] # hard limits on the peak widths, [a0, b0, a1, b1, ...] in units of the noise peak. 4 parameters.
chi2_lim = [0,3.0] # Hard limit on chi2/nDOF value chi2_lim = [0,3.0] # Hard limit on chi2/nDOF value
gain_lim = [0.80, 1.2] # Threshold on gain in relative number. Contribute to BadPixel bit "Gain_deviation" gain_lim = [0.80, 1.2] # Threshold on gain in relative number. Contribute to BadPixel bit "Gain_deviation"
cell_range = [1,5] # range of cell to be considered, [0,0] for all cell_range = [1,5] # range of cell to be considered, [0,0] for all
pixel_range = [0,0,512,128] # range of pixels x1,y1,x2,y2 to consider [0,0,512,128] for all pixel_range = [0,0,512,128] # range of pixels x1,y1,x2,y2 to consider [0,0,512,128] for all
max_bins = 250 # Maximum number of bins to consider max_bins = 250 # Maximum number of bins to consider
batch_size = [1,8,8] # batch size: [cell,x,y] batch_size = [1,8,8] # batch size: [cell,x,y]
n_peaks_fit = 4 # Number of gaussian peaks to fit including noise peak n_peaks_fit = 4 # Number of gaussian peaks to fit including noise peak
fix_peaks = True # Fix distance between photon peaks fix_peaks = True # Fix distance between photon peaks
# Detector conditions # Detector conditions
max_cells = 0 # number of memory cells used, set to 0 to automatically infer max_cells = 0 # number of memory cells used, set to 0 to automatically infer
bias_voltage = 300 # Bias voltage bias_voltage = 300 # Bias voltage
acq_rate = 0. # the detector acquisition rate, use 0 to try to auto-determine acq_rate = 0. # the detector acquisition rate, use 0 to try to auto-determine
gain_setting = 0.1 # the gain setting, use 0.1 to try to auto-determine gain_setting = 0.1 # the gain setting, use 0.1 to try to auto-determine
photon_energy = 8.05 # photon energy in keV photon_energy = 8.05 # photon energy in keV
integration_time = -1 # integration time, negative values for auto-detection. integration_time = -1 # integration time, negative values for auto-detection.
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
import glob import glob
import os import os
import re import re
import traceback import traceback
import warnings import warnings
from multiprocessing import Pool from multiprocessing import Pool
import h5py import h5py
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import numpy as np import numpy as np
import tabulate import tabulate
from cal_tools.agipdlib import ( from cal_tools.agipdlib import (
get_acq_rate, get_acq_rate,
get_bias_voltage, get_bias_voltage,
get_gain_setting, get_gain_setting,
get_integration_time, get_integration_time,
get_num_cells, get_num_cells,
) )
from cal_tools.agipdutils_ff import ( from cal_tools.agipdutils_ff import (
any_in, any_in,
fit_n_peaks, fit_n_peaks,
gaussian_sum, gaussian_sum,
get_starting_parameters, get_starting_parameters,
) )
from cal_tools.ana_tools import get_range, save_dict_to_hdf5 from cal_tools.ana_tools import get_range, save_dict_to_hdf5
from cal_tools.enums import BadPixels, BadPixelsFF from cal_tools.enums import BadPixels, BadPixelsFF
from cal_tools.tools import ( from cal_tools.tools import (
get_dir_creation_date, get_dir_creation_date,
get_pdu_from_db, get_pdu_from_db,
get_report, get_report,
module_index_to_qm, module_index_to_qm,
send_to_db send_to_db
) )
from dateutil import parser from dateutil import parser
from extra_data import RunDirectory, stack_detector_data from extra_data import RunDirectory, stack_detector_data
from extra_geom import AGIPD_1MGeometry, AGIPD_500K2GGeometry from extra_geom import AGIPD_1MGeometry, AGIPD_500K2GGeometry
from iCalibrationDB import Conditions, Constants, Detectors from iCalibrationDB import Conditions, Constants, Detectors
from iminuit import Minuit from iminuit import Minuit
from IPython.display import HTML, Latex, Markdown, display from IPython.display import HTML, Latex, Markdown, display
from XFELDetAna.plotting.heatmap import heatmapPlot from XFELDetAna.plotting.heatmap import heatmapPlot
from XFELDetAna.plotting.simpleplot import simplePlot from XFELDetAna.plotting.simpleplot import simplePlot
%matplotlib inline %matplotlib inline
warnings.filterwarnings('ignore') warnings.filterwarnings('ignore')
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
peak_range = np.reshape(peak_range,(4,2)) peak_range = np.reshape(peak_range,(4,2))
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Get operation conditions # Get operation conditions
filename = glob.glob(f"{raw_folder}/r{run:04d}/*-AGIPD[0-1][0-9]-*")[0] filename = glob.glob(f"{raw_folder}/r{run:04d}/*-AGIPD[0-1][0-9]-*")[0]
channel = int(re.findall(r".*-AGIPD([0-9]+)-.*", filename)[0]) channel = int(re.findall(r".*-AGIPD([0-9]+)-.*", filename)[0])
control_fname = f'{raw_folder}/r{run:04d}/RAW-R{run:04d}-{karabo_da_control}-S00000.h5' control_fname = f'{raw_folder}/r{run:04d}/RAW-R{run:04d}-{karabo_da_control}-S00000.h5'
h5path_ctrl = h5path_ctrl.format(karabo_id_control) h5path_ctrl = h5path_ctrl.format(karabo_id_control)
# Evaluate number of memory cells # Evaluate number of memory cells
mem_cells = get_num_cells(filename, karabo_id, channel) mem_cells = get_num_cells(filename, karabo_id, channel)
if mem_cells is None: if mem_cells is None:
raise ValueError(f"No raw images found in {filename}") raise ValueError(f"No raw images found in {filename}")
# Evaluate aquisition rate # Evaluate aquisition rate
fast_paths = (filename, karabo_id, channel) fast_paths = (filename, karabo_id, channel)
slow_paths = (control_fname, karabo_id_control) slow_paths = (control_fname, karabo_id_control)
if acq_rate == 0.: if acq_rate == 0.:
acq_rate = get_acq_rate(fast_paths,slow_paths) acq_rate = get_acq_rate(fast_paths,slow_paths)
# Evaluate creation time # Evaluate creation time
creation_time = None creation_time = None
if use_dir_creation_date: if use_dir_creation_date:
creation_time = get_dir_creation_date(raw_folder, run) creation_time = get_dir_creation_date(raw_folder, run)
# Evaluate gain setting # Evaluate gain setting
if gain_setting == 0.1: if gain_setting == 0.1:
if creation_time.replace(tzinfo=None) < parser.parse('2020-01-31'): if creation_time.replace(tzinfo=None) < parser.parse('2020-01-31'):
print("Set gain-setting to None for runs taken before 2020-01-31") print("Set gain-setting to None for runs taken before 2020-01-31")
gain_setting = None gain_setting = None
else: else:
try: try:
gain_setting = get_gain_setting(control_fname, h5path_ctrl) gain_setting = get_gain_setting(control_fname, h5path_ctrl)
except Exception as e: except Exception as e:
print(f'Error while reading gain setting from: \n{control_fname}') print(f'Error while reading gain setting from: \n{control_fname}')
print(e) print(e)
print("Set gain settion to 0") print("Set gain settion to 0")
gain_setting = 0 gain_setting = 0
# Evaluate integration time # Evaluate integration time
if integration_time < 0: if integration_time < 0:
integration_time = get_integration_time(control_fname, h5path_ctrl) integration_time = get_integration_time(control_fname, h5path_ctrl)
# Evaluate detector instance for mapping # Evaluate detector instance for mapping
instrument = karabo_id.split("_")[0] instrument = karabo_id.split("_")[0]
if instrument == "HED": if instrument == "HED":
nmods = 8 nmods = 8
else: else:
nmods = 16 nmods = 16
print(f"Using {creation_time} as creation time") print(f"Using {creation_time} as creation time")
print(f"Operating conditions are:\n• Bias voltage: {bias_voltage}\n• Memory cells: {mem_cells}\n" print(f"Operating conditions are:\n• Bias voltage: {bias_voltage}\n• Memory cells: {mem_cells}\n"
f"• Acquisition rate: {acq_rate}\n• Gain setting: {gain_setting}\n• Integration time: {integration_time}\n" f"• Acquisition rate: {acq_rate}\n• Gain setting: {gain_setting}\n• Integration time: {integration_time}\n"
f"• Photon Energy: {photon_energy}\n") f"• Photon Energy: {photon_energy}\n")
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Load constants for all modules # Load constants for all modules
keys = ['g0mean', 'g1mean', 'gain', 'chi2_ndof', 'mask'] keys = ['g0mean', 'g1mean', 'gain', 'chi2_ndof', 'mask']
all_keys = set(keys) all_keys = set(keys)
for i in range(n_peaks_fit) : for i in range(n_peaks_fit) :
all_keys.add(f'g{i}mean') all_keys.add(f'g{i}mean')
all_keys.add(f'g{i}sigma') all_keys.add(f'g{i}sigma')
fit_data = {} fit_data = {}
labels = {'g0mean': 'Noise peak position [ADU]', labels = {'g0mean': 'Noise peak position [ADU]',
'g1mean': 'First photon peak [ADU]', 'g1mean': 'First photon peak [ADU]',
'gain': f"Gain [ADU/keV], $\gamma$={photon_energy} [keV]", 'gain': f"Gain [ADU/keV], $\gamma$={photon_energy} [keV]",
'chi2_ndof': '$\chi^2$/nDOF', 'chi2_ndof': '$\chi^2$/nDOF',
'mask': 'Fraction of bad pixels over cells' } 'mask': 'Fraction of bad pixels over cells' }
modules = [] modules = []
karabo_da = [] karabo_da = []
for mod in range(nmods): for mod in range(nmods):
qm = module_index_to_qm(mod) qm = module_index_to_qm(mod)
fit_data[mod] = {} fit_data[mod] = {}
try: try:
hf = h5py.File(f'{out_folder}/fits_m{mod:02d}.h5', 'r') hf = h5py.File(f'{out_folder}/fits_m{mod:02d}.h5', 'r')
shape = hf['data/g0mean'].shape shape = hf['data/g0mean'].shape
for key in keys: for key in keys:
fit_data[mod][key] = hf[f'data/{key}'][()] fit_data[mod][key] = hf[f'data/{key}'][()]
print(f"{in_folder}/{hist_file_template.format(mod)}") print(f"{in_folder}/{hist_file_template.format(mod)}")
modules.append(mod) modules.append(mod)
karabo_da.append(f"AGIPD{mod:02d}") karabo_da.append(f"AGIPD{mod:02d}")
except Exception as e: except Exception as e:
err = f"Error: {e}\nError traceback: {traceback.format_exc()}" err = f"Error: {e}\nError traceback: {traceback.format_exc()}"
print(f"No fit data available for module {qm}") print(f"No fit data available for module {qm}")
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Calculate SlopesFF and BadPixels to be send to DB # Calculate SlopesFF and BadPixels to be send to DB
bpmask = {} bpmask = {}
slopesFF = {} slopesFF = {}
for mod in modules: for mod in modules:
bpmask[mod] = np.zeros(fit_data[mod]['mask'].shape).astype(np.int32) bpmask[mod] = np.zeros(fit_data[mod]['mask'].shape).astype(np.int32)
bpmask[mod][ any_in(fit_data[mod]['mask'], BadPixelsFF.NO_ENTRY.value) ] = BadPixels.FF_NO_ENTRIES.value bpmask[mod][ any_in(fit_data[mod]['mask'], BadPixelsFF.NO_ENTRY.value) ] = BadPixels.FF_NO_ENTRIES.value
bpmask[mod][ any_in(fit_data[mod]['mask'], bpmask[mod][ any_in(fit_data[mod]['mask'],
BadPixelsFF.GAIN_DEVIATION.value) ] |= BadPixels.FF_GAIN_DEVIATION.value BadPixelsFF.GAIN_DEVIATION.value) ] |= BadPixels.FF_GAIN_DEVIATION.value
bpmask[mod][ any_in(fit_data[mod]['mask'], bpmask[mod][ any_in(fit_data[mod]['mask'],
BadPixelsFF.FIT_FAILED.value | BadPixelsFF.ACCURATE_COVAR.value | BadPixelsFF.FIT_FAILED.value | BadPixelsFF.ACCURATE_COVAR.value |
BadPixelsFF.CHI2_THRESHOLD.value | BadPixelsFF.GAIN_THRESHOLD.value | BadPixelsFF.CHI2_THRESHOLD.value | BadPixelsFF.GAIN_THRESHOLD.value |
BadPixelsFF.NOISE_PEAK_THRESHOLD.value | BadPixelsFF.PEAK_WIDTH_THRESHOLD.value) ] |= BadPixels.FF_GAIN_EVAL_ERROR.value BadPixelsFF.NOISE_PEAK_THRESHOLD.value | BadPixelsFF.PEAK_WIDTH_THRESHOLD.value) ] |= BadPixels.FF_GAIN_EVAL_ERROR.value
# Set value for bad pixel to average across pixels for a given module # Set value for bad pixel to average across pixels for a given module
slopesFF[mod] = np.copy(fit_data[mod]['gain']) slopesFF[mod] = np.copy(fit_data[mod]['gain'])
slopesFF[mod][fit_data[mod]['mask']>0] = np.nan slopesFF[mod][fit_data[mod]['mask']>0] = np.nan
gain_mean = np.nanmean(slopesFF[mod], axis=(1,2)) gain_mean = np.nanmean(slopesFF[mod], axis=(1,2))
for i in range(slopesFF[mod].shape[0]): for i in range(slopesFF[mod].shape[0]):
slopesFF[mod][i][ fit_data[mod]['mask'][i] > 0 ] = gain_mean[i] slopesFF[mod][i][ fit_data[mod]['mask'][i] > 0 ] = gain_mean[i]
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Read report path and create file location tuple to add with the injection # Read report path and create file location tuple to add with the injection
proposal = list(filter(None, raw_folder.strip('/').split('/')))[-2] proposal = list(filter(None, raw_folder.strip('/').split('/')))[-2]
file_loc = f'Proposal: {proposal}, Run: {run}' file_loc = f'Proposal: {proposal}, Run: {run}'
report = get_report(out_folder) report = get_report(out_folder)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# set the operating condition # set the operating condition
condition = Conditions.Illuminated.AGIPD(mem_cells, bias_voltage, 9.2, condition = Conditions.Illuminated.AGIPD(mem_cells, bias_voltage, 9.2,
pixels_x=512, pixels_y=128, beam_energy=None, pixels_x=512, pixels_y=128, beam_energy=None,
acquisition_rate=acq_rate, gain_setting=gain_setting, acquisition_rate=acq_rate, gain_setting=gain_setting,
integration_time=integration_time) integration_time=integration_time)
# Modify acceptable deviations for integration time condition if and only if # Modify acceptable deviations for integration time condition if and only if
# the integration time is not using the standard value (12). # the integration time is not using the standard value (12).
if integration_time != 12: if integration_time != 12:
for p in condition.parameters: for p in condition.parameters:
if p.name == 'Integration Time': if p.name == 'Integration Time':
p.lower_deviation = 5 p.lower_deviation = 5
p.upper_deviation = 5 p.upper_deviation = 5
# Retrieve a list of all modules corresponding to processed karabo_das # Retrieve a list of all modules corresponding to processed karabo_das
db_modules = get_pdu_from_db(karabo_id, karabo_da, Constants.AGIPD.SlopesFF(), db_modules = get_pdu_from_db(karabo_id, karabo_da, Constants.AGIPD.SlopesFF(),
condition, cal_db_interface, condition, cal_db_interface,
snapshot_at=creation_time) snapshot_at=creation_time)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Send constants to DB # Send constants to DB
def send_const(mod, pdu): def send_const(mod, pdu):
try: try:
# gain # gain
constant = Constants.AGIPD.SlopesFF() constant = Constants.AGIPD.SlopesFF()
constant.data = np.moveaxis(np.moveaxis(slopesFF[mod], 0, 2), 0, 1) constant.data = np.moveaxis(np.moveaxis(slopesFF[mod], 0, 2), 0, 1)
send_to_db( send_to_db(
pdu, karabo_id, constant, condition, file_loc, pdu, karabo_id, constant, condition, file_loc,
report, cal_db_interface, creation_time, report, cal_db_interface, creation_time,
timeout=cal_db_timeout, timeout=cal_db_timeout,
) )
# bad pixels # bad pixels
constant_bp = Constants.AGIPD.BadPixelsFF() constant_bp = Constants.AGIPD.BadPixelsFF()
constant_bp.data = np.moveaxis(np.moveaxis(bpmask[mod], 0, 2), 0, 1) constant_bp.data = np.moveaxis(np.moveaxis(bpmask[mod], 0, 2), 0, 1)
send_to_db( send_to_db(
pdu, karabo_id, constant_bp, condition, file_loc, pdu, karabo_id, constant_bp, condition, file_loc,
report, cal_db_interface, creation_time, report, cal_db_interface, creation_time,
timeout=cal_db_timeout, timeout=cal_db_timeout,
) )
except Exception as e: except Exception as e:
err = f"Error: {e}\nError traceback: {traceback.format_exc()}" err = f"Error: {e}\nError traceback: {traceback.format_exc()}"
when = None when = None
# Check, if we have a shape we expect # Check, if we have a shape we expect
if db_output: if db_output:
if slopesFF[modules[0]].shape == (mem_cells, 512, 128): if slopesFF[modules[0]].shape == (mem_cells, 512, 128):
with Pool(processes=len(modules)) as pool: with Pool(processes=len(modules)) as pool:
const_out = pool.starmap(send_const, zip(modules, db_modules)) const_out = pool.starmap(send_const, zip(modules, db_modules))
else: else:
print(f"Constants are not sent to the DB because of the shape mismatsh") print(f"Constants are not sent to the DB because of the shape mismatsh")
print(f"Expected {(mem_cells, 512, 128)}, observed {slopesFF[modules[0]].shape}") print(f"Expected {(mem_cells, 512, 128)}, observed {slopesFF[modules[0]].shape}")
condition_dict ={} condition_dict ={}
for entry in condition.to_dict()['parameters']: for entry in condition.to_dict()['parameters']:
key = entry.pop('parameter_name') key = entry.pop('parameter_name')
del entry['description'] del entry['description']
del entry['flg_available'] del entry['flg_available']
condition_dict[key] = entry condition_dict[key] = entry
# Create the same file structure as database constants files, in which # Create the same file structure as database constants files, in which
# each constant type has its corresponding condition and data. # each constant type has its corresponding condition and data.
if local_output: if local_output:
for mod, pdu in zip(modules, db_modules): for mod, pdu in zip(modules, db_modules):
qm = module_index_to_qm(mod) qm = module_index_to_qm(mod)
file = f"{out_folder}/slopesff_bpmask_module_{qm}.h5" file = f"{out_folder}/slopesff_bpmask_module_{qm}.h5"
dic = { dic = {
pdu:{ pdu:{
'SlopesFF': { 'SlopesFF': {
0:{ 0:{
'condition': condition_dict, 'condition': condition_dict,
'data': np.moveaxis(np.moveaxis(slopesFF[mod],0,2),0,1)} 'data': np.moveaxis(np.moveaxis(slopesFF[mod],0,2),0,1)}
}, },
'BadPixelsFF':{ 'BadPixelsFF':{
0:{ 0:{
'condition': condition_dict, 'condition': condition_dict,
'data': np.moveaxis(np.moveaxis(bpmask[mod],0,2),0,1)} 'data': np.moveaxis(np.moveaxis(bpmask[mod],0,2),0,1)}
}, },
} }
} }
save_dict_to_hdf5(dic, file) save_dict_to_hdf5(dic, file)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
#Define AGIPD geometry #Define AGIPD geometry
#To do: find the better way to do it? #To do: find the better way to do it?
if instrument == "HED": if instrument == "HED":
geom = AGIPD_500K2GGeometry.from_origin() geom = AGIPD_500K2GGeometry.from_origin()
else: else:
geom = AGIPD_1MGeometry.from_quad_positions(quad_pos=[ geom = AGIPD_1MGeometry.from_quad_positions(quad_pos=[
(-525, 625), (-525, 625),
(-550, -10), (-550, -10),
(520, -160), (520, -160),
(542.5, 475), (542.5, 475),
]) ])
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Create the arrays that will be used for figures. # Create the arrays that will be used for figures.
# A dictionary contains all the data for each of the processing stages (gains, mean, slopesFF...). # A dictionary contains all the data for each of the processing stages (gains, mean, slopesFF...).
# Each array correponds to the data for all processed modules. # Each array correponds to the data for all processed modules.
# These are updated with their fit/slopes data in the following loops. # These are updated with their fit/slopes data in the following loops.
if cell_range==[0,0]: if cell_range==[0,0]:
cell_range[1] = shape[0] cell_range[1] = shape[0]
const_data = {} const_data = {}
for key in keys: for key in keys:
const_data[key] = np.full((nmods, shape[0],512,128), np.nan) const_data[key] = np.full((nmods, shape[0],512,128), np.nan)
for i in range(nmods): for i in range(nmods):
if key in fit_data[i]: if key in fit_data[i]:
const_data[key][i,:,pixel_range[0]:pixel_range[2], const_data[key][i,:,pixel_range[0]:pixel_range[2],
pixel_range[1]:pixel_range[3]] = fit_data[i][key] pixel_range[1]:pixel_range[3]] = fit_data[i][key]
const_data['slopesFF'] = np.full((nmods, shape[0],512,128), np.nan) const_data['slopesFF'] = np.full((nmods, shape[0],512,128), np.nan)
labels['slopesFF'] = f'slopesFF [ADU/keV], $\gamma$={photon_energy} [keV]' labels['slopesFF'] = f'slopesFF [ADU/keV], $\gamma$={photon_energy} [keV]'
for i in range(nmods): for i in range(nmods):
if i in slopesFF: if i in slopesFF:
const_data['slopesFF'][i,:,pixel_range[0]:pixel_range[2], const_data['slopesFF'][i,:,pixel_range[0]:pixel_range[2],
pixel_range[1]:pixel_range[3]] = slopesFF[i] pixel_range[1]:pixel_range[3]] = slopesFF[i]
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Summary across pixels ## ## Summary across pixels ##
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
for key in const_data.keys(): for key in const_data.keys():
fig = plt.figure(figsize=(20,20)) fig = plt.figure(figsize=(20,20))
ax = fig.add_subplot(111) ax = fig.add_subplot(111)
if key=='mask': if key=='mask':
data = np.nanmean(const_data[key]>0, axis=1) data = np.nanmean(const_data[key]>0, axis=1)
vmin, vmax = (0,1) vmin, vmax = (0,1)
else: else:
data = np.nanmean(const_data[key], axis=1) data = np.nanmean(const_data[key], axis=1)
vmin, vmax = get_range(data, 5) vmin, vmax = get_range(data, 5)
ax = geom.plot_data_fast(data, ax=ax, cmap="jet", vmin=vmin, vmax=vmax, figsize=(20,20)) ax = geom.plot_data_fast(data, ax=ax, cmap="jet", vmin=vmin, vmax=vmax, figsize=(20,20))
_ = ax.set_title(labels[key]) _ = ax.set_title(labels[key])
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Summary histograms ## ## Summary histograms ##
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
sel = (const_data['mask'] == 0) sel = (const_data['mask'] == 0)
module_mean = np.nanmean(const_data[f"gain"],axis=(1,2,3)) module_mean = np.nanmean(const_data[f"gain"],axis=(1,2,3))
module_mean = module_mean[:,np.newaxis,np.newaxis,np.newaxis] module_mean = module_mean[:,np.newaxis,np.newaxis,np.newaxis]
dsets = {'d01 [ADU]':const_data[f"g1mean"]-const_data[f"g0mean"], dsets = {'d01 [ADU]':const_data[f"g1mean"]-const_data[f"g0mean"],
'gain [ADU/keV]':const_data[f"gain"], 'gain [ADU/keV]':const_data[f"gain"],
'gain relative to module mean':const_data[f"gain"]/module_mean, 'gain relative to module mean':const_data[f"gain"]/module_mean,
} }
fig = plt.figure(figsize=(16,5)) fig = plt.figure(figsize=(16,5))
for i, (par, data) in enumerate(dsets.items()): for i, (par, data) in enumerate(dsets.items()):
ax = fig.add_subplot(1, 3, i+1) ax = fig.add_subplot(1, 3, i+1)
plt_range= np.nanmin(data), np.nanmax(data) plt_range= np.nanmin(data), np.nanmax(data)
if 'd01' in par : if 'd01' in par :
ax.axvline(d0_lim[0]) ax.axvline(d0_lim[0])
ax.axvline(d0_lim[1]) ax.axvline(d0_lim[1])
elif 'rel' in par : elif 'rel' in par :
ax.axvline(gain_lim[0]) ax.axvline(gain_lim[0])
ax.axvline(gain_lim[1]) ax.axvline(gain_lim[1])
num_bins = 100 num_bins = 100
_ = ax.hist(data.flatten(), _ = ax.hist(data.flatten(),
bins= num_bins,range=plt_range, bins= num_bins,range=plt_range,
log=True,color='red', log=True,color='red',
label='all fits',) label='all fits',)
a = ax.hist(data[sel].flatten(), a = ax.hist(data[sel].flatten(),
bins=num_bins, range=plt_range, bins=num_bins, range=plt_range,
log=True,color='g', log=True,color='g',
label='good fits only', label='good fits only',
) )
ax.set_xlabel(f"{par}") ax.set_xlabel(f"{par}")
ax.legend() ax.legend()
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Summary across cells ## ## Summary across cells ##
Good pixels only. Good pixels only.
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
for key in const_data.keys(): for key in const_data.keys():
data = np.copy(const_data[key]) data = np.copy(const_data[key])
if key=='mask': if key=='mask':
data = data>0 data = data>0
else: else:
data[const_data['mask']>0] = np.nan data[const_data['mask']>0] = np.nan
d = [] d = []
for i in range(nmods): for i in range(nmods):
d.append({'x': np.arange(data[i].shape[0]), d.append({'x': np.arange(data[i].shape[0]),
'y': np.nanmean(data[i], axis=(1,2)), 'y': np.nanmean(data[i], axis=(1,2)),
'drawstyle': 'steps-pre', 'drawstyle': 'steps-pre',
'label': f'{i}', 'label': f'{i}',
'linewidth': 2, 'linewidth': 2,
'linestyle': '--' if i>7 else '-' 'linestyle': '--' if i>7 else '-'
}) })
fig = plt.figure(figsize=(15, 6)) fig = plt.figure(figsize=(15, 6))
ax = fig.add_subplot(111) ax = fig.add_subplot(111)
_ = simplePlot(d, xrange=(-12, 510), _ = simplePlot(d, xrange=(-12, 510),
x_label='Memory Cell ID', x_label='Memory Cell ID',
y_label=labels[key], y_label=labels[key],
use_axis=ax, use_axis=ax,
legend='top-left-frame-ncol8',) legend='top-left-frame-ncol8',)
ylim = ax.get_ylim() ylim = ax.get_ylim()
ax.set_ylim(ylim[0], ylim[1] + np.abs(ylim[1]-ylim[0])*0.2) ax.set_ylim(ylim[0], ylim[1] + np.abs(ylim[1]-ylim[0])*0.2)
ax.grid() ax.grid()
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Summary table ## ## Summary table ##
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
table = [] table = []
for i in modules: for i in modules:
table.append((i, table.append((i,
f"{np.nanmean(slopesFF[i]):0.1f} +- {np.nanstd(slopesFF[i]):0.2f}", f"{np.nanmean(slopesFF[i]):0.1f} +- {np.nanstd(slopesFF[i]):0.2f}",
f"{np.nanmean(bpmask[i]>0)*100:0.1f} ({np.nansum(bpmask[i]>0)})" f"{np.nanmean(bpmask[i]>0)*100:0.1f} ({np.nansum(bpmask[i]>0)})"
)) ))
all_SFF = np.array([list(sff) for sff in slopesFF.values()]) all_SFF = np.array([list(sff) for sff in slopesFF.values()])
all_MSK = np.array([list(msk) for msk in bpmask.values()]) all_MSK = np.array([list(msk) for msk in bpmask.values()])
table.append(('overall', table.append(('overall',
f"{np.nanmean(all_SFF):0.1f} +- {np.nanstd(all_SFF):0.2f}", f"{np.nanmean(all_SFF):0.1f} +- {np.nanstd(all_SFF):0.2f}",
f"{np.nanmean(all_MSK>0)*100:0.1f} ({np.nansum(all_MSK>0)})" f"{np.nanmean(all_MSK>0)*100:0.1f} ({np.nansum(all_MSK>0)})"
)) ))
md = display(Latex(tabulate.tabulate(table, tablefmt='latex', md = display(Latex(tabulate.tabulate(table, tablefmt='latex',
headers=["Module", "Gain [ADU/keV]", "Bad pixels [%(Count)]"]))) headers=["Module", "Gain [ADU/keV]", "Bad pixels [%(Count)]"])))
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Performance plots ## Performance plots
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
def get_trains_data(run_folder, source, include, tid=None): def get_trains_data(run_folder, source, include, tid=None):
""" """
Load single train for all module Load single train for all module
:param run_folder: Path to folder with data :param run_folder: Path to folder with data
:param source: Data source to be loaded :param source: Data source to be loaded
:param include: Inset of file name to be considered :param include: Inset of file name to be considered
:param tid: Train Id to be loaded. First train is considered if None is given :param tid: Train Id to be loaded. First train is considered if None is given
""" """
run_data = RunDirectory(run_folder, include) run_data = RunDirectory(run_folder, include)
if tid: if tid:
tid, data = run_data.select('*/DET/*', source).train_from_id(tid) tid, data = run_data.select('*/DET/*', source).train_from_id(tid)
return tid, stack_detector_data(data, source,modules=nmods) return tid, stack_detector_data(data, source, modules=nmods)
else: else:
for tid, data in run_data.select('*/DET/*', source).trains(require_all=True): for tid, data in run_data.select('*/DET/*', source).trains(require_all=True):
return tid, stack_detector_data(data, source,modules=nmods) return tid, stack_detector_data(data, source, modules=nmods)
return None, None return None, None
include = '*S00000*' include = '*S00000*'
tid, orig = get_trains_data(f'{proc_folder}/r{run:04d}/', 'image.data', include) tid, orig = get_trains_data(f'{proc_folder}/r{run:04d}/', 'image.data', include)
orig = orig[cell_range[0]:cell_range[1], ...] orig = orig[cell_range[0]:cell_range[1], ...]
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# FIXME: mask bad pixels from median # FIXME: mask bad pixels from median
# mask = const_data['BadPixelsFF'] # mask = const_data['BadPixelsFF']
corrections = const_data['slopesFF'] # (16,shape[0],512,128) shape[0]= cell_range[1]-cell_range[0] / corrections = const_data['slopesFF'] # (16,shape[0],512,128) shape[0]= cell_range[1]-cell_range[0] /
corrections = np.moveaxis(corrections, 1, 0) # (shape[0],16,512,128) corrections = np.moveaxis(corrections, 1, 0) # (shape[0],16,512,128)
rel_corr = corrections/np.nanmedian(corrections) rel_corr = corrections/np.nanmedian(corrections)
corrected = orig / rel_corr corrected = orig / rel_corr
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
### Mean value not corrected (train 0) ### Mean value not corrected (train 0)
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
fig = plt.figure(figsize=(20,20)) fig = plt.figure(figsize=(20,20))
ax = fig.add_subplot(111) ax = fig.add_subplot(111)
odata = np.nanmean(orig, axis=0) odata = np.nanmean(orig, axis=0)
vmin, vmax = get_range(odata, 5) vmin, vmax = get_range(odata, 5)
ax = geom.plot_data_fast(odata, ax=ax, cmap="jet", vmin=vmin, vmax=vmax, figsize=(20,20)) ax = geom.plot_data_fast(odata, ax=ax, cmap="jet", vmin=vmin, vmax=vmax, figsize=(20,20))
_ = ax.set_title("Original data, mean across one train") _ = ax.set_title("Original data, mean across one train")
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
### Mean value corrected (train 0) ### Mean value corrected (train 0)
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
fig = plt.figure(figsize=(20,20)) fig = plt.figure(figsize=(20,20))
ax = fig.add_subplot(111) ax = fig.add_subplot(111)
cdata = np.nanmean(corrected, axis=0) cdata = np.nanmean(corrected, axis=0)
#Remove me: the scale for comparision should be the same as for orig data
#vmin, vmax = get_range(cdata, 5)
ax = geom.plot_data_fast(cdata, ax=ax, cmap="jet", vmin=vmin, vmax=vmax, figsize=(20,20)) ax = geom.plot_data_fast(cdata, ax=ax, cmap="jet", vmin=vmin, vmax=vmax, figsize=(20,20))
_ = ax.set_title("Corrected data, mean across one train") _ = ax.set_title("Corrected data, mean across one train")
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
### Laplace transform of mean image ### Laplace transform of mean image
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
from scipy.ndimage import laplace from scipy.ndimage import laplace
cmax = np.max(cdata) cmax = np.max(cdata)
omax = np.max(odata) omax = np.max(odata)
clap = np.zeros_like(cdata) clap = np.zeros_like(cdata)
olap = np.zeros_like(odata) olap = np.zeros_like(odata)
for i in range(nmods) : for i in range(nmods) :
clap[i] = np.abs(laplace(cdata[i].astype(float)/cmax)) clap[i] = np.abs(laplace(cdata[i].astype(float)/cmax))
olap[i] = np.abs(laplace(odata[i].astype(float)/omax)) olap[i] = np.abs(laplace(odata[i].astype(float)/omax))
fig = plt.figure(figsize=(20,10)) fig = plt.figure(figsize=(20,10))
vmin, vmax = get_range(olap, 2) vmin, vmax = get_range(olap, 2)
ax = fig.add_subplot(121) ax = fig.add_subplot(121)
ax = geom.plot_data_fast(olap, ax=ax, cmap="jet", vmin=vmin, vmax=vmax, ) ax = geom.plot_data_fast(olap, ax=ax, cmap="jet", vmin=vmin, vmax=vmax, )
_ = ax.set_title("Laplace (original data)") _ = ax.set_title("Laplace (original data)")
ax = fig.add_subplot(122) ax = fig.add_subplot(122)
ax = geom.plot_data_fast(clap, ax=ax, cmap="jet", vmin=vmin, vmax=vmax, ) ax = geom.plot_data_fast(clap, ax=ax, cmap="jet", vmin=vmin, vmax=vmax, )
_ = ax.set_title("Laplace (gain corrected data)") _ = ax.set_title("Laplace (gain corrected data)")
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
### Histogram of corrected and uncorrected spectrum (train 0) ### Histogram of corrected and uncorrected spectrum (train 0)
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
###################################### ######################################
# FIT PEAKS # FIT PEAKS
###################################### ######################################
x_range = [peak_range[0][0], peak_range[-1][-1]] x_range = [peak_range[0][0], peak_range[-1][-1]]
nb = x_range[1] - x_range[0]+1 nb = x_range[1] - x_range[0]+1
sel = ~np.isnan(corrected) sel = ~np.isnan(corrected)
fig = plt.figure(figsize=(10, 5)) fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot(111) ax = fig.add_subplot(111)
y,xe, _ = ax.hist(corrected[sel].flatten(), bins=nb, range=x_range, label='corrected', alpha=0.5) y,xe, _ = ax.hist(corrected[sel].flatten(), bins=nb, range=x_range, label='corrected', alpha=0.5)
# get the bin centers from the bin edges # get the bin centers from the bin edges
xc=xe[:-1]+(xe[1]-xe[0])/2 xc=xe[:-1]+(xe[1]-xe[0])/2
pars, _ = get_starting_parameters(xc, y, peak_range,4) pars, _ = get_starting_parameters(xc, y, peak_range,4)
minuit = fit_n_peaks(xc, y, pars, x_range,fix_d01=False,sigma_limit=1) minuit = fit_n_peaks(xc, y, pars, x_range,fix_d01=False,sigma_limit=1)
pc = minuit.args pc = minuit.args
resc=minuit.fitarg resc=minuit.fitarg
yfc = gaussian_sum(xc,4, *pc) yfc = gaussian_sum(xc,4, *pc)
plt.plot(xc, yfc, label='corrected fit') plt.plot(xc, yfc, label='corrected fit')
y,_, _ = ax.hist(orig[sel].flatten(), bins=nb, range=x_range, label='original',alpha=0.5) y,_, _ = ax.hist(orig[sel].flatten(), bins=nb, range=x_range, label='original',alpha=0.5)
pars, _ = get_starting_parameters(xc, y, peak_range,4) pars, _ = get_starting_parameters(xc, y, peak_range,4)
minuit = fit_n_peaks(xc, y, pars, x_range,fix_d01=False,sigma_limit=1) minuit = fit_n_peaks(xc, y, pars, x_range,fix_d01=False,sigma_limit=1)
po = minuit.args po = minuit.args
reso=minuit.fitarg reso=minuit.fitarg
yfo = gaussian_sum(xc,4, *po) yfo = gaussian_sum(xc,4, *po)
plt.plot(xc, yfo, label='original fit') plt.plot(xc, yfo, label='original fit')
plt.title(f"Signal spectrum, first train") plt.title(f"Signal spectrum, first train")
plt.xlabel('[ADU]') plt.xlabel('[ADU]')
plt.legend() plt.legend()
plt.show() plt.show()
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
### Summary table ## ### Summary table ##
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
from scipy.stats import median_absolute_deviation as mad from scipy.stats import median_absolute_deviation as mad
table = [] table = []
headers = ["Parameter", headers = ["Parameter",
"Value (original data)", "Value (original data)",
"Value (gain corrected data)", "Value (gain corrected data)",
"Relative difference"] "Relative difference"]
for i in range(4): for i in range(4):
table.append((f"Sigma{i} (ADU)", table.append((f"Sigma{i} (ADU)",
f"{reso[f'g{i}sigma']:0.2f} ", f"{reso[f'g{i}sigma']:0.2f} ",
f"{resc[f'g{i}sigma']:0.2f} ", f"{resc[f'g{i}sigma']:0.2f} ",
f"{(reso[f'g{i}sigma']-resc[f'g{i}sigma'])/reso[f'g{i}sigma']:0.2f} ", f"{(reso[f'g{i}sigma']-resc[f'g{i}sigma'])/reso[f'g{i}sigma']:0.2f} ",
)) ))
ovar = np.std(odata) ovar = np.std(odata)
cvar = np.std(cdata) cvar = np.std(cdata)
table.append((f"RMS of mean image", table.append((f"RMS of mean image",
f"{ovar:0.3f} ", f"{ovar:0.3f} ",
f"{cvar:0.3f} ", f"{cvar:0.3f} ",
f"{(ovar-cvar)/ovar:0.3f} ", f"{(ovar-cvar)/ovar:0.3f} ",
)) ))
omin, omax = get_range(odata, 5) omin, omax = get_range(odata, 5)
cmin, cmax = get_range(cdata, 5) cmin, cmax = get_range(cdata, 5)
ovar = np.std(odata[(odata > omin) & (odata<omax)]) ovar = np.std(odata[(odata > omin) & (odata<omax)])
cvar = np.std(cdata[(cdata > cmin) & (cdata<cmax)]) cvar = np.std(cdata[(cdata > cmin) & (cdata<cmax)])
table.append((f"RMS of mean image (mu+-5sigma)", table.append((f"RMS of mean image (mu+-5sigma)",
f"{ovar:0.3f} ", f"{ovar:0.3f} ",
f"{cvar:0.3f} ", f"{cvar:0.3f} ",
f"{(ovar-cvar)/ovar:0.3f} ", f"{(ovar-cvar)/ovar:0.3f} ",
)) ))
ovar = mad(odata.flatten()) ovar = mad(odata.flatten())
cvar = mad(cdata.flatten()) cvar = mad(cdata.flatten())
table.append((f"MAD of mean image", table.append((f"MAD of mean image",
f"{ovar:0.3f} ", f"{ovar:0.3f} ",
f"{cvar:0.3f} ", f"{cvar:0.3f} ",
f"{(ovar-cvar)/ovar:0.3f} ", f"{(ovar-cvar)/ovar:0.3f} ",
)) ))
ovar = np.median(olap) ovar = np.median(olap)
cvar = np.median(clap) cvar = np.median(clap)
table.append((f"Median Laplace", table.append((f"Median Laplace",
f"{ovar:0.3f} ", f"{ovar:0.3f} ",
f"{cvar:0.3f} ", f"{cvar:0.3f} ",
f"{(ovar-cvar)/ovar:0.3f} ", f"{(ovar-cvar)/ovar:0.3f} ",
)) ))
md = display(Latex(tabulate.tabulate(table, md = display(Latex(tabulate.tabulate(table,
tablefmt='latex', tablefmt='latex',
headers=headers))) headers=headers)))
``` ```
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment