Skip to content
Snippets Groups Projects
Commit 4f84d489 authored by Karim Ahmed's avatar Karim Ahmed
Browse files

Merge branch 'fix/cleanup_FFNB_firstcell' into 'master'

[AGIPD][FF] Styling modification for both FF notebooks:1st nb cell, and removing uneeded imports

See merge request detectors/pycalibration!736
parents 6514225c 0584b751
No related branches found
No related tags found
1 merge request!736[AGIPD][FF] Styling modification for both FF notebooks:1st nb cell, and removing uneeded imports
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
# Gain Characterization # # Gain Characterization #
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
in_folder = "/gpfs/exfel/exp/SPB/202030/p900138/scratch/karnem/r0203_r0204_v01/" # the folder to read histograms from, required in_folder = "/gpfs/exfel/exp/SPB/202030/p900138/scratch/karnem/r0203_r0204_v01/" # the folder to read histograms from, required
out_folder = "" # the folder to output to, required out_folder = "" # the folder to output to, required
hist_file_template = "hists_m{:02d}_sum.h5" # the template to use to access histograms hist_file_template = "hists_m{:02d}_sum.h5" # the template to use to access histograms
modules = [10] # modules to correct, set to -1 for all, range allowed modules = [10] # modules to correct, set to -1 for all, range allowed
raw_folder = "/gpfs/exfel/exp/MID/202030/p900137/raw" # Path to raw image data used to create histograms raw_folder = "/gpfs/exfel/exp/MID/202030/p900137/raw" # Path to raw image data used to create histograms
proc_folder = "" # Path to corrected image data used to create histograms proc_folder = "" # Path to corrected image data used to create histograms
run = 449 # of the run of image data used to create histograms run = 449 # of the run of image data used to create histograms
karabo_id = "MID_DET_AGIPD1M-1" # karabo karabo_id karabo_id = "MID_DET_AGIPD1M-1" # karabo karabo_id
karabo_da = ['-1'] # a list of data aggregators names, Default [-1] for selecting all data aggregators karabo_da = ['-1'] # a list of data aggregators names, Default [-1] for selecting all data aggregators
receiver_id = "{}CH0" # inset for receiver devices ctrl_source_template = '{}/MDL/FPGA_COMP' # path to control information
path_template = 'RAW-R{:04d}-{}-S{:05d}.h5' # the template to use to access data
h5path = 'INSTRUMENT/{}/DET/{}:xtdf/' # path in the HDF5 file to images
h5path_idx = 'INDEX/{}/DET/{}:xtdf/' # path in the HDF5 file to images
h5path_ctrl = '/CONTROL/{}/MDL/FPGA_COMP' # path to control information
karabo_id_control = "MID_IRU_AGIPD1M1" # karabo-id for control device karabo_id_control = "MID_IRU_AGIPD1M1" # karabo-id for control device
karabo_da_control = 'AGIPD1MCTRL00' # karabo DA for control infromation karabo_da_control = 'AGIPD1MCTRL00' # karabo DA for control infromation
use_dir_creation_date = True # use the creation data of the input dir for database queries use_dir_creation_date = True # use the creation data of the input dir for database queries
cal_db_interface = "tcp://max-exfl016:8015#8045" # the database interface to use cal_db_interface = "tcp://max-exfl016:8015#8045" # the database interface to use
cal_db_timeout = 30000 # in milli seconds cal_db_timeout = 30000 # in milli seconds
local_output = True # output constants locally local_output = True # output constants locally
db_output = False # output constants to database db_output = False # output constants to database
# Fit parameters # Fit parameters
peak_range = [-30, 30, 35, 70, 95, 135, 145, 220] # where to look for the peaks, [a0, b0, a1, b1, ...] exactly 8 elements peak_range = [-30, 30, 35, 70, 95, 135, 145, 220] # where to look for the peaks, [a0, b0, a1, b1, ...] exactly 8 elements
peak_width_range = [0, 30, 0, 35, 0, 40, 0, 45] # fit limits on the peak widths, [a0, b0, a1, b1, ...] exactly 8 elements peak_width_range = [0, 30, 0, 35, 0, 40, 0, 45] # fit limits on the peak widths, [a0, b0, a1, b1, ...] exactly 8 elements
peak_norm_range = [0.0, -1, 0, -1, 0, -1, 0, -1] # peak_norm_range = [0.0, -1, 0, -1, 0, -1, 0, -1] #
# Bad-pixel thresholds (gain evaluation error). Contribute to BadPixel bit "Gain_Evaluation_Error" # Bad-pixel thresholds (gain evaluation error). Contribute to BadPixel bit "Gain_Evaluation_Error"
peak_lim = [-30, 30] # Limit of position of noise peak peak_lim = [-30, 30] # Limit of position of noise peak
d0_lim = [10, 80] # hard limits for distance between noise and first peak d0_lim = [10, 80] # hard limits for distance between noise and first peak
peak_width_lim = [0.9, 1.55, 0.95, 1.65] # hard limits on the peak widths for first and second peak, in units of the noise peak. 4 parameters. peak_width_lim = [0.9, 1.55, 0.95, 1.65] # hard limits on the peak widths for first and second peak, in units of the noise peak. 4 parameters.
chi2_lim = [0, 3.0] # Hard limit on chi2/nDOF value chi2_lim = [0, 3.0] # Hard limit on chi2/nDOF value
intensity_lim = 15 # Threshold on standard deviation of a histogram in ADU. Contribute to BadPixel bit "No_Entry" intensity_lim = 15 # Threshold on standard deviation of a histogram in ADU. Contribute to BadPixel bit "No_Entry"
gain_lim = [0.8, 1.2] # Threshold on gain in relative number. Contribute to BadPixel bit "Gain_deviation" gain_lim = [0.8, 1.2] # Threshold on gain in relative number. Contribute to BadPixel bit "Gain_deviation"
cell_range = [1, 3] # range of cell to be considered, [0,0] for all cell_range = [1, 3] # range of cell to be considered, [0,0] for all
pixel_range = [0, 0, 32, 32] # range of pixels x1,y1,x2,y2 to consider [0,0,512,128] for all pixel_range = [0, 0, 32, 32] # range of pixels x1,y1,x2,y2 to consider [0,0,512,128] for all
max_bins = 0 # Maximum number of bins to consider, 0 for all bins max_bins = 0 # Maximum number of bins to consider, 0 for all bins
batch_size = [1, 8, 8] # batch size: [cell,x,y] batch_size = [1, 8, 8] # batch size: [cell,x,y]
fit_range = [0, 0] # range of a histogram considered for fitting in ADU. Dynamically evaluated in case [0,0] fit_range = [0, 0] # range of a histogram considered for fitting in ADU. Dynamically evaluated in case [0,0]
n_peaks_fit = 4 # Number of gaussian peaks to fit including noise peak n_peaks_fit = 4 # Number of gaussian peaks to fit including noise peak
fix_peaks = False # Fix distance between photon peaks fix_peaks = False # Fix distance between photon peaks
do_minos = False # This is additional feature of minuit to evaluate errors. do_minos = False # This is additional feature of minuit to evaluate errors.
sigma_limit = 0. # If >0, repeat fit keeping only bins within mu +- sigma_limit*sigma sigma_limit = 0. # If >0, repeat fit keeping only bins within mu +- sigma_limit*sigma
# Detector conditions # Detector conditions
# NOTE: The below parameters are needed for the summary notebook when running through xfel-calibrate. # NOTE: The below parameters are needed for the summary notebook when running through xfel-calibrate.
mem_cells = -1 # number of memory cells used, negative values for auto-detection. mem_cells = -1 # number of memory cells used, negative values for auto-detection.
bias_voltage = 300 # Bias voltage. bias_voltage = 300 # Bias voltage.
acq_rate = 0. # the detector acquisition rate, use 0 to try to auto-determine. acq_rate = 0. # the detector acquisition rate, use 0 to try to auto-determine.
gain_setting = -1 # the gain setting, negative values for auto-detection. gain_setting = -1 # the gain setting, negative values for auto-detection.
photon_energy = 8.05 # photon energy in keV. photon_energy = 8.05 # photon energy in keV.
integration_time = -1 # integration time, negative values for auto-detection. integration_time = -1 # integration time, negative values for auto-detection.
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
import glob import glob
import os import os
import traceback import traceback
import warnings import warnings
from multiprocessing import Pool from multiprocessing import Pool
import h5py import h5py
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import numpy as np import numpy as np
import sharedmem import sharedmem
import XFELDetAna.xfelpyanatools as xana import XFELDetAna.xfelpyanatools as xana
from cal_tools.agipdutils_ff import ( from cal_tools.agipdutils_ff import (
BadPixelsFF, BadPixelsFF,
any_in, any_in,
fit_n_peaks, fit_n_peaks,
gaussian, gaussian,
gaussian_sum, gaussian_sum,
get_mask, get_mask,
get_starting_parameters, get_starting_parameters,
set_par_limits, set_par_limits,
) )
from cal_tools.ana_tools import get_range, save_dict_to_hdf5 from cal_tools.ana_tools import get_range, save_dict_to_hdf5
from iminuit import Minuit from iminuit import Minuit
from XFELDetAna.plotting.heatmap import heatmapPlot from XFELDetAna.plotting.heatmap import heatmapPlot
from XFELDetAna.plotting.simpleplot import simplePlot from XFELDetAna.plotting.simpleplot import simplePlot
# %load_ext autotime # %load_ext autotime
%matplotlib inline %matplotlib inline
warnings.filterwarnings('ignore') warnings.filterwarnings('ignore')
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
peak_range = np.reshape(peak_range,(4,2)) peak_range = np.reshape(peak_range,(4,2))
peak_width_range = np.reshape(peak_width_range,(4,2)) peak_width_range = np.reshape(peak_width_range,(4,2))
peak_width_lim = np.reshape(peak_width_lim,(2,2)) peak_width_lim = np.reshape(peak_width_lim,(2,2))
peak_norm_range = [None if x == -1 else x for x in peak_norm_range] peak_norm_range = [None if x == -1 else x for x in peak_norm_range]
peak_norm_range = np.reshape(peak_norm_range,(4,2)) peak_norm_range = np.reshape(peak_norm_range,(4,2))
module = modules[0] module = modules[0]
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
def idx_gen(batch_start, batch_size): def idx_gen(batch_start, batch_size):
""" """
This generator iterate across pixels and memory cells starting This generator iterate across pixels and memory cells starting
from batch_start until batch_start+batch_size from batch_start until batch_start+batch_size
""" """
for c_idx in range(batch_start[0], batch_start[0]+batch_size[0]): for c_idx in range(batch_start[0], batch_start[0]+batch_size[0]):
for x_idx in range(batch_start[1], batch_start[1]+batch_size[1]): for x_idx in range(batch_start[1], batch_start[1]+batch_size[1]):
for y_idx in range(batch_start[2], batch_start[2]+batch_size[2]): for y_idx in range(batch_start[2], batch_start[2]+batch_size[2]):
yield(c_idx, x_idx, y_idx) yield(c_idx, x_idx, y_idx)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
n_pixels_x = pixel_range[2]-pixel_range[0] n_pixels_x = pixel_range[2]-pixel_range[0]
n_pixels_y = pixel_range[3]-pixel_range[1] n_pixels_y = pixel_range[3]-pixel_range[1]
hist_data = {} hist_data = {}
with h5py.File(f"{in_folder}/{hist_file_template.format(module)}", 'r') as hf: with h5py.File(f"{in_folder}/{hist_file_template.format(module)}", 'r') as hf:
hist_data['cellId'] = np.array(hf['cellId'][()]) hist_data['cellId'] = np.array(hf['cellId'][()])
hist_data['hRange'] = np.array(hf['hRange'][()]) hist_data['hRange'] = np.array(hf['hRange'][()])
hist_data['nBins'] = np.array(hf['nBins'][()]) hist_data['nBins'] = np.array(hf['nBins'][()])
if cell_range == [0,0]: if cell_range == [0,0]:
cell_range[1] = hist_data['cellId'].shape[0] cell_range[1] = hist_data['cellId'].shape[0]
if max_bins == 0: if max_bins == 0:
max_bins = hist_data['nBins'] max_bins = hist_data['nBins']
hist_data['cellId'] = hist_data['cellId'][cell_range[0]:cell_range[1]] hist_data['cellId'] = hist_data['cellId'][cell_range[0]:cell_range[1]]
hist_data['hist'] = np.array(hf['hist'][cell_range[0]:cell_range[1], :max_bins, :]) hist_data['hist'] = np.array(hf['hist'][cell_range[0]:cell_range[1], :max_bins, :])
n_cells = cell_range[1]-cell_range[0] n_cells = cell_range[1]-cell_range[0]
hist_data['hist'] = hist_data['hist'].reshape(n_cells, max_bins, 512, 128) hist_data['hist'] = hist_data['hist'].reshape(n_cells, max_bins, 512, 128)
hist_data['hist'] = hist_data['hist'][:,:,pixel_range[0]:pixel_range[2],pixel_range[1]:pixel_range[3]] hist_data['hist'] = hist_data['hist'][:,:,pixel_range[0]:pixel_range[2],pixel_range[1]:pixel_range[3]]
print(f'Data shape {hist_data["hist"].shape}') print(f'Data shape {hist_data["hist"].shape}')
bin_edges = np.linspace(hist_data['hRange'][0], hist_data['hRange'][1], int(hist_data['nBins']+1)) bin_edges = np.linspace(hist_data['hRange'][0], hist_data['hRange'][1], int(hist_data['nBins']+1))
x = (bin_edges[1:] + bin_edges[:-1])[:max_bins] * 0.5 x = (bin_edges[1:] + bin_edges[:-1])[:max_bins] * 0.5
batches = [] batches = []
for c_idx in range(0, n_cells, batch_size[0]): for c_idx in range(0, n_cells, batch_size[0]):
for x_idx in range(0, n_pixels_x, batch_size[1]): for x_idx in range(0, n_pixels_x, batch_size[1]):
for y_idx in range(0, n_pixels_y, batch_size[2]): for y_idx in range(0, n_pixels_y, batch_size[2]):
batches.append([c_idx,x_idx,y_idx]) batches.append([c_idx,x_idx,y_idx])
print(f'Number of batches {len(batches)}') print(f'Number of batches {len(batches)}')
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
def fit_batch(batch_start): def fit_batch(batch_start):
current_result = {} current_result = {}
prev = None prev = None
for c_idx, x_idx, y_idx in idx_gen(batch_start, batch_size): for c_idx, x_idx, y_idx in idx_gen(batch_start, batch_size):
try: try:
y = hist_data['hist'][c_idx, :, x_idx, y_idx] y = hist_data['hist'][c_idx, :, x_idx, y_idx]
if prev is None: if prev is None:
prev, _ = get_starting_parameters(x, y, peak_range, n_peaks=n_peaks_fit) prev, _ = get_starting_parameters(x, y, peak_range, n_peaks=n_peaks_fit)
if fit_range == [0, 0]: if fit_range == [0, 0]:
frange = (prev[f'g0mean']-2*prev[f'g0sigma'], frange = (prev[f'g0mean']-2*prev[f'g0sigma'],
prev[f'g{n_peaks_fit-1}mean'] + prev[f'g{n_peaks_fit-1}sigma']) prev[f'g{n_peaks_fit-1}mean'] + prev[f'g{n_peaks_fit-1}sigma'])
else: else:
frange = fit_range frange = fit_range
set_par_limits(prev, peak_range, peak_norm_range, set_par_limits(prev, peak_range, peak_norm_range,
peak_width_range, n_peaks_fit) peak_width_range, n_peaks_fit)
minuit = fit_n_peaks(x, y, prev, frange, minuit = fit_n_peaks(x, y, prev, frange,
do_minos=do_minos, n_peaks=n_peaks_fit, do_minos=do_minos, n_peaks=n_peaks_fit,
fix_d01=fix_peaks, sigma_limit=sigma_limit,) fix_d01=fix_peaks, sigma_limit=sigma_limit,)
ndof = np.rint(frange[1]-frange[0])-len(minuit.args) ## FIXME: this line is wrong if fix_peaks is True ndof = np.rint(frange[1]-frange[0])-len(minuit.args) ## FIXME: this line is wrong if fix_peaks is True
current_result['chi2_ndof'] = minuit.fval/ndof current_result['chi2_ndof'] = minuit.fval/ndof
res = minuit.fitarg res = minuit.fitarg
if fix_peaks : ## set g2 and g3 mean correctly if fix_peaks : ## set g2 and g3 mean correctly
for i in range(2,n_peaks_fit): for i in range(2,n_peaks_fit):
d = res[f'g1mean'] - res[f'g0mean'] d = res[f'g1mean'] - res[f'g0mean']
res[f'g{i}mean'] = res[f'g0mean'] + d*i res[f'g{i}mean'] = res[f'g0mean'] + d*i
current_result.update(res) current_result.update(res)
current_result.update(minuit.get_fmin()) current_result.update(minuit.get_fmin())
fit_result['chi2_ndof'][c_idx, x_idx, y_idx] = current_result['chi2_ndof'] fit_result['chi2_ndof'][c_idx, x_idx, y_idx] = current_result['chi2_ndof']
for key in res.keys(): for key in res.keys():
if key in fit_result: if key in fit_result:
fit_result[key][c_idx, x_idx, y_idx] = res[key] fit_result[key][c_idx, x_idx, y_idx] = res[key]
fit_result['mask'][c_idx, x_idx, y_idx] = get_mask(current_result, fit_result['mask'][c_idx, x_idx, y_idx] = get_mask(current_result,
peak_lim, peak_lim,
d0_lim, chi2_lim, d0_lim, chi2_lim,
peak_width_lim) peak_width_lim)
except Exception as e: except Exception as e:
fit_result['mask'][c_idx, x_idx, fit_result['mask'][c_idx, x_idx,
y_idx] = BadPixelsFF.FIT_FAILED.value y_idx] = BadPixelsFF.FIT_FAILED.value
print(c_idx, x_idx, y_idx, e, traceback.format_exc()) print(c_idx, x_idx, y_idx, e, traceback.format_exc())
if fit_result['mask'][c_idx, x_idx, y_idx] == 0: if fit_result['mask'][c_idx, x_idx, y_idx] == 0:
prev = res prev = res
else: else:
prev = None prev = None
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
# Single fit ## # Single fit ##
Left plot shows starting parameters for fitting. Right plot shows result of the fit. Errors are evaluated with minos. Left plot shows starting parameters for fitting. Right plot shows result of the fit. Errors are evaluated with minos.
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
hist = hist_data['hist'][1,:,1, 1] hist = hist_data['hist'][1,:,1, 1]
prev, shapes = get_starting_parameters(x, hist, peak_range, n_peaks=n_peaks_fit) prev, shapes = get_starting_parameters(x, hist, peak_range, n_peaks=n_peaks_fit)
if fit_range == [0, 0]: if fit_range == [0, 0]:
frange = (prev[f'g0mean']-2*prev[f'g0sigma'], frange = (prev[f'g0mean']-2*prev[f'g0sigma'],
prev[f'g3mean'] + prev[f'g3sigma']) prev[f'g3mean'] + prev[f'g3sigma'])
else: else:
frange = fit_range frange = fit_range
set_par_limits(prev, peak_range, peak_norm_range, set_par_limits(prev, peak_range, peak_norm_range,
peak_width_range, n_peaks=n_peaks_fit) peak_width_range, n_peaks=n_peaks_fit)
minuit = fit_n_peaks(x, hist, prev, frange, minuit = fit_n_peaks(x, hist, prev, frange,
do_minos=True, n_peaks=n_peaks_fit, do_minos=True, n_peaks=n_peaks_fit,
fix_d01=fix_peaks, fix_d01=fix_peaks,
sigma_limit=sigma_limit, sigma_limit=sigma_limit,
) )
print (minuit.get_fmin()) print (minuit.get_fmin())
minuit.print_matrix() minuit.print_matrix()
print(minuit.get_param_states()) print(minuit.get_param_states())
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
res = minuit.fitarg res = minuit.fitarg
if fix_peaks : if fix_peaks :
for i in range(2,n_peaks_fit): for i in range(2,n_peaks_fit):
d = res[f'g1mean'] - res[f'g0mean'] d = res[f'g1mean'] - res[f'g0mean']
res[f'g{i}mean'] = res[f'g0mean'] + d*i res[f'g{i}mean'] = res[f'g0mean'] + d*i
err = minuit.errors err = minuit.errors
p = minuit.args p = minuit.args
ya = np.arange(0,1e4) ya = np.arange(0,1e4)
y = gaussian_sum(x,n_peaks_fit, *p) y = gaussian_sum(x,n_peaks_fit, *p)
peak_colors = ['g', 'y', 'b', 'orange'] peak_colors = ['g', 'y', 'b', 'orange']
peak_hist = hist.copy() peak_hist = hist.copy()
d=[] d=[]
if sigma_limit > 0 : if sigma_limit > 0 :
sel2 = (np.abs(x - res['g0mean']) < sigma_limit*res['g0sigma']) | \ sel2 = (np.abs(x - res['g0mean']) < sigma_limit*res['g0sigma']) | \
(np.abs(x - res['g1mean']) < sigma_limit*res['g1sigma']) | \ (np.abs(x - res['g1mean']) < sigma_limit*res['g1sigma']) | \
(np.abs(x - res['g2mean']) < sigma_limit*res['g2sigma']) | \ (np.abs(x - res['g2mean']) < sigma_limit*res['g2sigma']) | \
(np.abs(x - res['g3mean']) < sigma_limit*res['g3sigma']) (np.abs(x - res['g3mean']) < sigma_limit*res['g3sigma'])
peak_hist[~sel2] = 0 peak_hist[~sel2] = 0
valley_hist = hist.copy() valley_hist = hist.copy()
valley_hist[sel2] = 0 valley_hist[sel2] = 0
d.append({'x': x, d.append({'x': x,
'y': valley_hist.astype(np.float64), 'y': valley_hist.astype(np.float64),
'y_err': np.sqrt(valley_hist), 'y_err': np.sqrt(valley_hist),
'drawstyle': 'bars', 'drawstyle': 'bars',
'errorstyle': 'bars', 'errorstyle': 'bars',
'transparency': '95%', 'transparency': '95%',
'errorcoarsing': 3, 'errorcoarsing': 3,
'label': f'X-ray Data)' 'label': f'X-ray Data)'
}) })
htitle = f'X-ray Data, (μ±{sigma_limit:0.1f}σ)' htitle = f'X-ray Data, (μ±{sigma_limit:0.1f}σ)'
else : else :
htitle = 'X-ray Data' htitle = 'X-ray Data'
d.append({'x': x, d.append({'x': x,
'y': peak_hist.astype(np.float64), 'y': peak_hist.astype(np.float64),
'y_err': np.sqrt(peak_hist), 'y_err': np.sqrt(peak_hist),
'drawstyle': 'bars', 'drawstyle': 'bars',
'errorstyle': 'bars', 'errorstyle': 'bars',
'errorcoarsing': 3, 'errorcoarsing': 3,
'label': htitle, 'label': htitle,
} }
) )
d.append({'x': x, d.append({'x': x,
'y': y, 'y': y,
'y2': (hist-y)/np.sqrt(hist), 'y2': (hist-y)/np.sqrt(hist),
'drawstyle':'line', 'drawstyle':'line',
'drawstyle2': 'steps-mid', 'drawstyle2': 'steps-mid',
'label': 'Fit' 'label': 'Fit'
} }
) )
for i in range(n_peaks_fit): for i in range(n_peaks_fit):
d.append({'x': x, d.append({'x': x,
'y': gaussian(x, res[f'g{i}n'], res[f'g{i}mean'], res[f'g{i}sigma']), 'y': gaussian(x, res[f'g{i}n'], res[f'g{i}mean'], res[f'g{i}sigma']),
'drawstyle':'line', 'drawstyle':'line',
'color': peak_colors[i], 'color': peak_colors[i],
}) })
d.append({'x': np.full_like(ya, res[f'g{i}mean']), d.append({'x': np.full_like(ya, res[f'g{i}mean']),
'y': ya, 'y': ya,
'drawstyle': 'line', 'drawstyle': 'line',
'linestyle': 'dashed', 'linestyle': 'dashed',
'color': peak_colors[i], 'color': peak_colors[i],
'label': f'peak {i} = {res[f"g{i}mean"]:0.1f} $ \pm $ {err[f"g{i}mean"]:0.2f} ADU' }) 'label': f'peak {i} = {res[f"g{i}mean"]:0.1f} $ \pm $ {err[f"g{i}mean"]:0.2f} ADU' })
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
fig, (ax1, ax2) = plt.subplots(1, 2) fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(16, 7) fig.set_size_inches(16, 7)
for i, shape in enumerate(shapes): for i, shape in enumerate(shapes):
idx = shape[3] idx = shape[3]
ax1.errorbar( ax1.errorbar(
x[idx], hist[idx], x[idx], hist[idx],
np.sqrt(hist[idx]), np.sqrt(hist[idx]),
marker='+', ls='', marker='+', ls='',
) )
yg = gaussian(x[idx], *shape[:3]) yg = gaussian(x[idx], *shape[:3])
l = f'Peak {i}: {shape[1]:0.1f} $ \pm $ {shape[2]:0.2f} ADU' l = f'Peak {i}: {shape[1]:0.1f} $ \pm $ {shape[2]:0.2f} ADU'
ax1.plot(x[idx], yg, label=l) ax1.plot(x[idx], yg, label=l)
ax1.grid(True) ax1.grid(True)
ax1.set_xlabel("Signal [ADU]") ax1.set_xlabel("Signal [ADU]")
ax1.set_ylabel("Counts") ax1.set_ylabel("Counts")
ax1.legend(ncol=2) ax1.legend(ncol=2)
_ = xana.simplePlot( _ = xana.simplePlot(
d, d,
use_axis=ax2, use_axis=ax2,
x_label='Signal [ADU]', x_label='Signal [ADU]',
y_label='Counts', y_label='Counts',
secondpanel=True, y_log=False, secondpanel=True, y_log=False,
x_range=(frange[0], frange[1]), x_range=(frange[0], frange[1]),
y_range=(1., np.max(hist)*1.6), y_range=(1., np.max(hist)*1.6),
legend='top-left-frame-ncol2', legend='top-left-frame-ncol2',
) )
plt.show() plt.show()
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## All fits ## ## All fits ##
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Allocate memory for fit results # Allocate memory for fit results
fit_result = {} fit_result = {}
keys = list(minuit.fitarg.keys()) keys = list(minuit.fitarg.keys())
keys = [x for x in keys if 'limit_' not in x and 'fix_' not in x] keys = [x for x in keys if 'limit_' not in x and 'fix_' not in x]
keys += ['chi2_ndof', 'mask', 'gain'] keys += ['chi2_ndof', 'mask', 'gain']
for key in keys: for key in keys:
dtype = 'f4' dtype = 'f4'
if key == 'mask': if key == 'mask':
dtype = 'i4' dtype = 'i4'
fit_result[key] = sharedmem.empty([n_cells, n_pixels_x, n_pixels_y], dtype=dtype) fit_result[key] = sharedmem.empty([n_cells, n_pixels_x, n_pixels_y], dtype=dtype)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Perform fitting # Perform fitting
with Pool() as pool: with Pool() as pool:
const_out = pool.map(fit_batch, batches) const_out = pool.map(fit_batch, batches)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Evaluate bad pixels # Evaluate bad pixels
fit_result['gain'] = (fit_result['g1mean'] - fit_result['g0mean'])/photon_energy fit_result['gain'] = (fit_result['g1mean'] - fit_result['g0mean'])/photon_energy
# Calculate histogram width and evaluate cut # Calculate histogram width and evaluate cut
h_sums = np.sum(hist_data['hist'], axis=1) h_sums = np.sum(hist_data['hist'], axis=1)
hist_norm = hist_data['hist'] / h_sums[:, None, :, :] hist_norm = hist_data['hist'] / h_sums[:, None, :, :]
hist_mean = np.sum(hist_norm[:, :max_bins, ...] * hist_mean = np.sum(hist_norm[:, :max_bins, ...] *
x[None, :, None, None], axis=1) x[None, :, None, None], axis=1)
hist_sqr = (x[None, :, None, None] - hist_mean[:, None, ...])**2 hist_sqr = (x[None, :, None, None] - hist_mean[:, None, ...])**2
hist_std = np.sqrt(np.sum(hist_norm[:, :max_bins, ...] * hist_sqr, axis=1)) hist_std = np.sqrt(np.sum(hist_norm[:, :max_bins, ...] * hist_sqr, axis=1))
fit_result['mask'][hist_std<intensity_lim] |= BadPixelsFF.NO_ENTRY.value fit_result['mask'][hist_std<intensity_lim] |= BadPixelsFF.NO_ENTRY.value
# Bad pixel on gain deviation # Bad pixel on gain deviation
gains = np.copy(fit_result['gain']) gains = np.copy(fit_result['gain'])
gains[fit_result['mask']>0] = np.nan gains[fit_result['mask']>0] = np.nan
gain_mean = np.nanmean(gains, axis=(1,2)) gain_mean = np.nanmean(gains, axis=(1,2))
fit_result['mask'][fit_result['gain'] > gain_mean[:,None,None]*gain_lim[1] ] |= BadPixelsFF.GAIN_DEVIATION.value fit_result['mask'][fit_result['gain'] > gain_mean[:,None,None]*gain_lim[1] ] |= BadPixelsFF.GAIN_DEVIATION.value
fit_result['mask'][fit_result['gain'] < gain_mean[:,None,None]*gain_lim[0] ] |= BadPixelsFF.GAIN_DEVIATION.value fit_result['mask'][fit_result['gain'] < gain_mean[:,None,None]*gain_lim[0] ] |= BadPixelsFF.GAIN_DEVIATION.value
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Save fit results # Save fit results
os.makedirs(out_folder, exist_ok=True) os.makedirs(out_folder, exist_ok=True)
out_name = f'{out_folder}/fits_m{module:02d}.h5' out_name = f'{out_folder}/fits_m{module:02d}.h5'
print(f'Save to file: {out_name}') print(f'Save to file: {out_name}')
save_dict_to_hdf5({'data': fit_result}, out_name) save_dict_to_hdf5({'data': fit_result}, out_name)
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Summary across cells ## ## Summary across cells ##
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
labels = [ labels = [
"Noise peak [ADU]", "Noise peak [ADU]",
"First photon peak [ADU]", "First photon peak [ADU]",
f"gain [ADU/keV] $\gamma$={photon_energy} [keV]", f"gain [ADU/keV] $\gamma$={photon_energy} [keV]",
"$\chi^2$/nDOF", "$\chi^2$/nDOF",
"Fraction of bad pixels", "Fraction of bad pixels",
] ]
for i, key in enumerate(['g0mean', 'g1mean', 'gain', 'chi2_ndof', 'mask']): for i, key in enumerate(['g0mean', 'g1mean', 'gain', 'chi2_ndof', 'mask']):
fig = plt.figure(figsize=(20,5)) fig = plt.figure(figsize=(20,5))
ax = fig.add_subplot(121) ax = fig.add_subplot(121)
data = fit_result[key] data = fit_result[key]
if key == 'mask': if key == 'mask':
data = data > 0 data = data > 0
vmin, vmax = [0, 1] vmin, vmax = [0, 1]
else: else:
vmin, vmax = get_range(data, 5) vmin, vmax = get_range(data, 5)
_ = heatmapPlot( _ = heatmapPlot(
np.mean(data, axis=0).T, np.mean(data, axis=0).T,
add_panels=False, cmap='viridis', use_axis=ax, add_panels=False, cmap='viridis', use_axis=ax,
vmin=vmin, vmax=vmax, lut_label=labels[i] vmin=vmin, vmax=vmax, lut_label=labels[i]
) )
if key != 'mask': if key != 'mask':
vmin, vmax = get_range(data, 7) vmin, vmax = get_range(data, 7)
ax = fig.add_subplot(122) ax = fig.add_subplot(122)
_ = xana.histPlot( _ = xana.histPlot(
ax, data.flatten(), ax, data.flatten(),
bins=45,range=[vmin, vmax], bins=45,range=[vmin, vmax],
log=True,color='red',histtype='stepfilled' log=True,color='red',histtype='stepfilled'
) )
ax.set_xlabel(labels[i]) ax.set_xlabel(labels[i])
ax.set_ylabel("Counts") ax.set_ylabel("Counts")
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## histograms of fit parameters ## ## histograms of fit parameters ##
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
fig = plt.figure(figsize=(10, 5)) fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot(111) ax = fig.add_subplot(111)
a = ax.hist(hist_std.flatten(), bins=100, range=(0,100) ) a = ax.hist(hist_std.flatten(), bins=100, range=(0,100) )
ax.plot([intensity_lim, intensity_lim], [0, np.nanmax(a[0])], linewidth=1.5, color='red' ) ax.plot([intensity_lim, intensity_lim], [0, np.nanmax(a[0])], linewidth=1.5, color='red' )
ax.set_xlabel('Histogram width [ADU]', fontsize=14) ax.set_xlabel('Histogram width [ADU]', fontsize=14)
ax.set_ylabel('Number of histograms', fontsize=14) ax.set_ylabel('Number of histograms', fontsize=14)
ax.set_title(f'{hist_std[hist_std<intensity_lim].shape[0]} histograms below threshold in {intensity_lim} ADU', ax.set_title(f'{hist_std[hist_std<intensity_lim].shape[0]} histograms below threshold in {intensity_lim} ADU',
fontsize=14, fontweight='bold') fontsize=14, fontweight='bold')
ax.grid() ax.grid()
ax.set_yscale('log') ax.set_yscale('log')
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
def plot_par_distr(par): def plot_par_distr(par):
fig = plt.figure(figsize=(16, 5)) fig = plt.figure(figsize=(16, 5))
sel = fit_result['mask'] == 0 sel = fit_result['mask'] == 0
for i in range(n_peaks_fit) : for i in range(n_peaks_fit) :
data=fit_result[f"g{i}{par}"] data=fit_result[f"g{i}{par}"]
plt_range=(-1,50) plt_range=(-1,50)
if par =='mean': if par =='mean':
plt_range=[peak_range[i][0] ,peak_range[i][1]] plt_range=[peak_range[i][0] ,peak_range[i][1]]
num_bins = int(plt_range[1] - plt_range[0]) num_bins = int(plt_range[1] - plt_range[0])
ax = fig.add_subplot(1,n_peaks_fit,i+1) ax = fig.add_subplot(1,n_peaks_fit,i+1)
_ = xana.histPlot(ax,data.flatten(), _ = xana.histPlot(ax,data.flatten(),
bins= num_bins,range=plt_range, bins= num_bins,range=plt_range,
log=True,color='red', log=True,color='red',
label='all fits',) label='all fits',)
a = ax.hist(data[sel].flatten(), a = ax.hist(data[sel].flatten(),
bins=num_bins, range=plt_range, bins=num_bins, range=plt_range,
log=True,color='g', log=True,color='g',
label='good fits only', label='good fits only',
) )
ax.set_xlabel(f"g{i} {par} [ADU]") ax.set_xlabel(f"g{i} {par} [ADU]")
ax.legend() ax.legend()
plot_par_distr('mean') plot_par_distr('mean')
plot_par_distr('sigma') plot_par_distr('sigma')
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
sel = fit_result['mask'] == 0 sel = fit_result['mask'] == 0
dsets = {'d01 [ADU]':fit_result[f"g1mean"]-fit_result[f"g0mean"], dsets = {'d01 [ADU]':fit_result[f"g1mean"]-fit_result[f"g0mean"],
'gain [ADU/keV]':fit_result[f"gain"], 'gain [ADU/keV]':fit_result[f"gain"],
'gain relative to module mean':fit_result[f"gain"]/np.nanmean(gain_mean), 'gain relative to module mean':fit_result[f"gain"]/np.nanmean(gain_mean),
} }
fig = plt.figure(figsize=(16,5)) fig = plt.figure(figsize=(16,5))
for i, (par, data) in enumerate(dsets.items()): for i, (par, data) in enumerate(dsets.items()):
ax = fig.add_subplot(1, 3, i+1) ax = fig.add_subplot(1, 3, i+1)
plt_range=get_range(data, 10) plt_range=get_range(data, 10)
num_bins = 100 num_bins = 100
_ = xana.histPlot(ax,data.flatten(), _ = xana.histPlot(ax,data.flatten(),
bins= num_bins,range=plt_range, bins= num_bins,range=plt_range,
log=True,color='red', log=True,color='red',
label='all fits',) label='all fits',)
a = ax.hist(data[sel].flatten(), a = ax.hist(data[sel].flatten(),
bins=num_bins, range=plt_range, bins=num_bins, range=plt_range,
log=True,color='g', log=True,color='g',
label='good fits only', label='good fits only',
) )
ax.set_xlabel(f"{par}") ax.set_xlabel(f"{par}")
ax.legend() ax.legend()
if 'd01' in par : if 'd01' in par :
ax.axvline(d0_lim[0]) ax.axvline(d0_lim[0])
ax.axvline(d0_lim[1]) ax.axvline(d0_lim[1])
if 'rel' in par : if 'rel' in par :
ax.axvline(gain_lim[0]) ax.axvline(gain_lim[0])
ax.axvline(gain_lim[1]) ax.axvline(gain_lim[1])
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Summary across pixels ## ## Summary across pixels ##
Mean and median values are calculated across all pixels for each memory cell. Mean and median values are calculated across all pixels for each memory cell.
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
def plot_error_band(key, x, ax): def plot_error_band(key, x, ax):
cdata = np.copy(fit_result[key]) cdata = np.copy(fit_result[key])
cdata[fit_result['mask']>0] = np.nan cdata[fit_result['mask']>0] = np.nan
mean = np.nanmean(cdata, axis=(1,2)) mean = np.nanmean(cdata, axis=(1,2))
median = np.nanmedian(cdata, axis=(1,2)) median = np.nanmedian(cdata, axis=(1,2))
std = np.nanstd(cdata, axis=(1,2)) std = np.nanstd(cdata, axis=(1,2))
mad = np.nanmedian(np.abs(cdata - median[:,None,None]), axis=(1,2)) mad = np.nanmedian(np.abs(cdata - median[:,None,None]), axis=(1,2))
ax.plot(x, mean, 'k', color='#3F7F4C', label=" mean value ") ax.plot(x, mean, 'k', color='#3F7F4C', label=" mean value ")
ax.plot(x, median, 'o', color='red', label=" median value ") ax.plot(x, median, 'o', color='red', label=" median value ")
ax.fill_between(x, mean-std, mean+std, ax.fill_between(x, mean-std, mean+std,
alpha=0.6, edgecolor='#3F7F4C', facecolor='#7EFF99', alpha=0.6, edgecolor='#3F7F4C', facecolor='#7EFF99',
linewidth=1, linestyle='dashdot', antialiased=True, linewidth=1, linestyle='dashdot', antialiased=True,
label=" mean value $ \pm $ std ") label=" mean value $ \pm $ std ")
ax.fill_between(x, median-mad, median+mad, ax.fill_between(x, median-mad, median+mad,
alpha=0.3, edgecolor='red', facecolor='red', alpha=0.3, edgecolor='red', facecolor='red',
linewidth=1, linestyle='dashdot', antialiased=True, linewidth=1, linestyle='dashdot', antialiased=True,
label=" median value $ \pm $ mad ") label=" median value $ \pm $ mad ")
if f'error_{key}' in fit_result: if f'error_{key}' in fit_result:
cerr = np.copy(fit_result[f'error_{key}']) cerr = np.copy(fit_result[f'error_{key}'])
cerr[fit_result['mask']>0] = np.nan cerr[fit_result['mask']>0] = np.nan
meanerr = np.nanmean(cerr, axis=(1,2)) meanerr = np.nanmean(cerr, axis=(1,2))
ax.fill_between(x, mean-meanerr, mean+meanerr, ax.fill_between(x, mean-meanerr, mean+meanerr,
alpha=0.6, edgecolor='#089FFF', facecolor='#089FFF', alpha=0.6, edgecolor='#089FFF', facecolor='#089FFF',
linewidth=1, linestyle='dashdot', antialiased=True, linewidth=1, linestyle='dashdot', antialiased=True,
label=" mean fit error ") label=" mean fit error ")
x = np.linspace(*cell_range, n_cells) x = np.linspace(*cell_range, n_cells)
for i, key in enumerate(['g0mean', 'g1mean', 'gain', 'chi2_ndof']): for i, key in enumerate(['g0mean', 'g1mean', 'gain', 'chi2_ndof']):
fig = plt.figure(figsize=(10, 5)) fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot(111) ax = fig.add_subplot(111)
plot_error_band(key, x, ax) plot_error_band(key, x, ax)
ax.set_xlabel('Memory Cell ID', fontsize=14) ax.set_xlabel('Memory Cell ID', fontsize=14)
ax.set_ylabel(labels[i], fontsize=14) ax.set_ylabel(labels[i], fontsize=14)
ax.grid() ax.grid()
ax.legend() ax.legend()
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Cut flow ## ## Cut flow ##
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
fig, ax = plt.subplots() fig, ax = plt.subplots()
fig.set_size_inches(10, 5) fig.set_size_inches(10, 5)
n_bars = 8 n_bars = 8
x = np.arange(n_bars) x = np.arange(n_bars)
width = 0.3 width = 0.3
msk = fit_result['mask'] msk = fit_result['mask']
n_fits = np.prod(msk.shape) n_fits = np.prod(msk.shape)
y = [any_in(msk, BadPixelsFF.FIT_FAILED.value), y = [any_in(msk, BadPixelsFF.FIT_FAILED.value),
any_in(msk, BadPixelsFF.FIT_FAILED.value | BadPixelsFF.ACCURATE_COVAR.value), any_in(msk, BadPixelsFF.FIT_FAILED.value | BadPixelsFF.ACCURATE_COVAR.value),
any_in(msk, BadPixelsFF.FIT_FAILED.value | BadPixelsFF.ACCURATE_COVAR.value | any_in(msk, BadPixelsFF.FIT_FAILED.value | BadPixelsFF.ACCURATE_COVAR.value |
BadPixelsFF.CHI2_THRESHOLD.value), BadPixelsFF.CHI2_THRESHOLD.value),
any_in(msk, BadPixelsFF.FIT_FAILED.value | BadPixelsFF.ACCURATE_COVAR.value | any_in(msk, BadPixelsFF.FIT_FAILED.value | BadPixelsFF.ACCURATE_COVAR.value |
BadPixelsFF.CHI2_THRESHOLD.value | BadPixelsFF.GAIN_THRESHOLD.value), BadPixelsFF.CHI2_THRESHOLD.value | BadPixelsFF.GAIN_THRESHOLD.value),
any_in(msk, BadPixelsFF.FIT_FAILED.value | BadPixelsFF.ACCURATE_COVAR.value | any_in(msk, BadPixelsFF.FIT_FAILED.value | BadPixelsFF.ACCURATE_COVAR.value |
BadPixelsFF.CHI2_THRESHOLD.value | BadPixelsFF.GAIN_THRESHOLD.value | BadPixelsFF.CHI2_THRESHOLD.value | BadPixelsFF.GAIN_THRESHOLD.value |
BadPixelsFF.NOISE_PEAK_THRESHOLD.value), BadPixelsFF.NOISE_PEAK_THRESHOLD.value),
any_in(msk, BadPixelsFF.FIT_FAILED.value | BadPixelsFF.ACCURATE_COVAR.value | any_in(msk, BadPixelsFF.FIT_FAILED.value | BadPixelsFF.ACCURATE_COVAR.value |
BadPixelsFF.CHI2_THRESHOLD.value | BadPixelsFF.GAIN_THRESHOLD.value | BadPixelsFF.CHI2_THRESHOLD.value | BadPixelsFF.GAIN_THRESHOLD.value |
BadPixelsFF.NOISE_PEAK_THRESHOLD.value | BadPixelsFF.PEAK_WIDTH_THRESHOLD.value), BadPixelsFF.NOISE_PEAK_THRESHOLD.value | BadPixelsFF.PEAK_WIDTH_THRESHOLD.value),
any_in(msk, BadPixelsFF.FIT_FAILED.value | BadPixelsFF.ACCURATE_COVAR.value | any_in(msk, BadPixelsFF.FIT_FAILED.value | BadPixelsFF.ACCURATE_COVAR.value |
BadPixelsFF.CHI2_THRESHOLD.value | BadPixelsFF.GAIN_THRESHOLD.value | BadPixelsFF.CHI2_THRESHOLD.value | BadPixelsFF.GAIN_THRESHOLD.value |
BadPixelsFF.NOISE_PEAK_THRESHOLD.value | BadPixelsFF.PEAK_WIDTH_THRESHOLD.value BadPixelsFF.NOISE_PEAK_THRESHOLD.value | BadPixelsFF.PEAK_WIDTH_THRESHOLD.value
| BadPixelsFF.NO_ENTRY.value), | BadPixelsFF.NO_ENTRY.value),
any_in(msk, BadPixelsFF.FIT_FAILED.value | BadPixelsFF.ACCURATE_COVAR.value | any_in(msk, BadPixelsFF.FIT_FAILED.value | BadPixelsFF.ACCURATE_COVAR.value |
BadPixelsFF.CHI2_THRESHOLD.value | BadPixelsFF.GAIN_THRESHOLD.value | BadPixelsFF.CHI2_THRESHOLD.value | BadPixelsFF.GAIN_THRESHOLD.value |
BadPixelsFF.NOISE_PEAK_THRESHOLD.value | BadPixelsFF.PEAK_WIDTH_THRESHOLD.value BadPixelsFF.NOISE_PEAK_THRESHOLD.value | BadPixelsFF.PEAK_WIDTH_THRESHOLD.value
| BadPixelsFF.NO_ENTRY.value| BadPixelsFF.GAIN_DEVIATION.value) | BadPixelsFF.NO_ENTRY.value| BadPixelsFF.GAIN_DEVIATION.value)
] ]
y2 = [any_in(msk, BadPixelsFF.FIT_FAILED.value), y2 = [any_in(msk, BadPixelsFF.FIT_FAILED.value),
any_in(msk, BadPixelsFF.ACCURATE_COVAR.value), any_in(msk, BadPixelsFF.ACCURATE_COVAR.value),
any_in(msk, BadPixelsFF.CHI2_THRESHOLD.value), any_in(msk, BadPixelsFF.CHI2_THRESHOLD.value),
any_in(msk, BadPixelsFF.GAIN_THRESHOLD.value), any_in(msk, BadPixelsFF.GAIN_THRESHOLD.value),
any_in(msk, BadPixelsFF.NOISE_PEAK_THRESHOLD.value), any_in(msk, BadPixelsFF.NOISE_PEAK_THRESHOLD.value),
any_in(msk, BadPixelsFF.PEAK_WIDTH_THRESHOLD.value), any_in(msk, BadPixelsFF.PEAK_WIDTH_THRESHOLD.value),
any_in(msk, BadPixelsFF.NO_ENTRY.value), any_in(msk, BadPixelsFF.NO_ENTRY.value),
any_in(msk, BadPixelsFF.GAIN_DEVIATION.value) any_in(msk, BadPixelsFF.GAIN_DEVIATION.value)
] ]
y = (1 - np.sum(y, axis=(1,2,3))/n_fits)*100 y = (1 - np.sum(y, axis=(1,2,3))/n_fits)*100
y2 = (1 - np.sum(y2, axis=(1,2,3))/n_fits)*100 y2 = (1 - np.sum(y2, axis=(1,2,3))/n_fits)*100
labels = ['Fit failes', labels = ['Fit failes',
'Accurate covar', 'Accurate covar',
'Chi2/nDOF', 'Chi2/nDOF',
'Gain', 'Gain',
'Noise peak', 'Noise peak',
'Peak width', 'Peak width',
'No Entry', 'No Entry',
'Gain deviation'] 'Gain deviation']
ax.bar(x, y2, width, label='Only this cut') ax.bar(x, y2, width, label='Only this cut')
ax.bar(x, y, width, label='Cut flow') ax.bar(x, y, width, label='Cut flow')
ax.set_xticks(x) ax.set_xticks(x)
ax.set_xticklabels(labels, rotation=90) ax.set_xticklabels(labels, rotation=90)
ax.set_ylim(y[5]-0.5, 100) ax.set_ylim(y[5]-0.5, 100)
ax.grid(True) ax.grid(True)
ax.legend() ax.legend()
plt.show() plt.show()
``` ```
......
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
# Gain Characterization Summary # # Gain Characterization Summary #
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
in_folder = "" # in this notebook, in_folder is not used as the data source is in the destination folder in_folder = "" # in this notebook, in_folder is not used as the data source is in the destination folder
out_folder = "" # the folder to output to, required out_folder = "" # the folder to output to, required
metadata_folder = "" # Directory containing calibration_metadata.yml when run by xfel-calibrate metadata_folder = "" # Directory containing calibration_metadata.yml when run by xfel-calibrate
hist_file_template = "hists_m{:02d}_sum.h5" hist_file_template = "hists_m{:02d}_sum.h5"
proc_folder = "" # Path to corrected image data used to create histograms and validation plots proc_folder = "" # Path to corrected image data used to create histograms and validation plots
raw_folder = "/gpfs/exfel/exp/MID/202030/p900137/raw" # folder of raw data. This is used to save information of source data of generated constants, required raw_folder = "/gpfs/exfel/exp/MID/202030/p900137/raw" # folder of raw data. This is used to save information of source data of generated constants, required
run = 449 # runs of image data used to create histograms run = 449 # runs of image data used to create histograms
karabo_id = "MID_DET_AGIPD1M-1" # karabo karabo_id karabo_id = "MID_DET_AGIPD1M-1" # karabo karabo_id
ctrl_source_template = '{}/MDL/FPGA_COMP' # path to control information ctrl_source_template = '{}/MDL/FPGA_COMP' # path to control information
karabo_id_control = "MID_EXP_AGIPD1M1" # karabo-id for control device karabo_id_control = "MID_EXP_AGIPD1M1" # karabo-id for control device
use_dir_creation_date = True # use the creation data of the input dir for database queries use_dir_creation_date = True # use the creation data of the input dir for database queries
cal_db_interface = "tcp://max-exfl016:8015#8045" # the database interface to use cal_db_interface = "tcp://max-exfl016:8015#8045" # the database interface to use
cal_db_timeout = 30000 # in milli seconds cal_db_timeout = 30000 # in milli seconds
local_output = True # output constants locally local_output = True # output constants locally
db_output = False # output constants to database db_output = False # output constants to database
# Fit parameters # Fit parameters
peak_range = [-30,30,35,65,80,130,145,200] # where to look for the peaks, [a0, b0, a1, b1, ...] exactly 8 elements peak_range = [-30,30,35,65,80,130,145,200] # where to look for the peaks, [a0, b0, a1, b1, ...] exactly 8 elements
peak_width_range = [0, 30, 0, 35, 0, 40, 0, 45] # fit limits on the peak widths, [a0, b0, a1, b1, ...] exactly 8 elements
# Bad-pixel thresholds # Bad-pixel thresholds
d0_lim = [10, 70] # hard limits for d0 value (distance between noise and first peak) d0_lim = [10, 70] # hard limits for d0 value (distance between noise and first peak)
peak_width_lim = [0.97, 1.43, 1.03, 1.57] # hard limits on the peak widths, [a0, b0, a1, b1, ...] in units of the noise peak. 4 parameters.
chi2_lim = [0,3.0] # Hard limit on chi2/nDOF value
gain_lim = [0.80, 1.2] # Threshold on gain in relative number. Contribute to BadPixel bit "Gain_deviation" gain_lim = [0.80, 1.2] # Threshold on gain in relative number. Contribute to BadPixel bit "Gain_deviation"
cell_range = [1,5] # range of cell to be considered, [0,0] for all cell_range = [1,5] # range of cell to be considered, [0,0] for all
pixel_range = [0,0,512,128] # range of pixels x1,y1,x2,y2 to consider [0,0,512,128] for all pixel_range = [0,0,512,128] # range of pixels x1,y1,x2,y2 to consider [0,0,512,128] for all
max_bins = 250 # Maximum number of bins to consider
batch_size = [1,8,8] # batch size: [cell,x,y]
n_peaks_fit = 4 # Number of gaussian peaks to fit including noise peak n_peaks_fit = 4 # Number of gaussian peaks to fit including noise peak
fix_peaks = True # Fix distance between photon peaks
# Detector conditions # Detector conditions
mem_cells = -1 # number of memory cells used, negative values for auto-detection. mem_cells = -1 # number of memory cells used, negative values for auto-detection.
bias_voltage = 0. # Bias voltage bias_voltage = 0. # Bias voltage
acq_rate = 0. # the detector acquisition rate, use 0 to try to auto-determine acq_rate = 0. # the detector acquisition rate, use 0 to try to auto-determine
gain_setting = -1 # the gain setting, negative values for auto-detection. gain_setting = -1 # the gain setting, negative values for auto-detection.
photon_energy = 8.05 # photon energy in keV photon_energy = 8.05 # photon energy in keV
integration_time = -1 # integration time, negative values for auto-detection. integration_time = -1 # integration time, negative values for auto-detection.
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
import glob
import os import os
import re import re
import traceback import traceback
import warnings import warnings
from multiprocessing import Pool from multiprocessing import Pool
import h5py import h5py
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import numpy as np import numpy as np
import tabulate import tabulate
from cal_tools.agipdlib import AgipdCtrl from cal_tools.agipdlib import AgipdCtrl
from cal_tools.agipdutils_ff import ( from cal_tools.agipdutils_ff import (
BadPixelsFF, BadPixelsFF,
any_in, any_in,
fit_n_peaks, fit_n_peaks,
gaussian_sum, gaussian_sum,
get_starting_parameters, get_starting_parameters,
) )
from cal_tools.ana_tools import get_range, save_dict_to_hdf5 from cal_tools.ana_tools import get_range, save_dict_to_hdf5
from cal_tools.enums import BadPixels from cal_tools.enums import BadPixels
from cal_tools.tools import ( from cal_tools.tools import (
get_dir_creation_date, get_dir_creation_date,
get_pdu_from_db, get_pdu_from_db,
get_report, get_report,
module_index_to_qm, module_index_to_qm,
send_to_db send_to_db
) )
from dateutil import parser from extra_data import RunDirectory, stack_detector_data
from extra_data import H5File, RunDirectory, stack_detector_data
from extra_geom import AGIPD_1MGeometry, AGIPD_500K2GGeometry from extra_geom import AGIPD_1MGeometry, AGIPD_500K2GGeometry
from iCalibrationDB import Conditions, Constants, Detectors from iCalibrationDB import Conditions, Constants
from iminuit import Minuit from iminuit import Minuit
from IPython.display import HTML, Latex, Markdown, display from IPython.display import Latex, display
from XFELDetAna.plotting.heatmap import heatmapPlot
from XFELDetAna.plotting.simpleplot import simplePlot from XFELDetAna.plotting.simpleplot import simplePlot
%matplotlib inline %matplotlib inline
warnings.filterwarnings('ignore') warnings.filterwarnings('ignore')
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
peak_range = np.reshape(peak_range,(4,2)) peak_range = np.reshape(peak_range,(4,2))
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Get operation conditions # Get operation conditions
ctrl_source = ctrl_source_template.format(karabo_id_control) ctrl_source = ctrl_source_template.format(karabo_id_control)
run_folder = f'{raw_folder}/r{run:04d}/' run_folder = f'{raw_folder}/r{run:04d}/'
raw_dc = RunDirectory(run_folder) raw_dc = RunDirectory(run_folder)
# Read operating conditions from AGIPD00 files # Read operating conditions from AGIPD00 files
instrument_src_mod = [ instrument_src_mod = [
s for s in list(raw_dc.all_sources) if "0CH" in s][0] s for s in list(raw_dc.all_sources) if "0CH" in s][0]
ctrl_src = [ ctrl_src = [
s for s in list(raw_dc.all_sources) if ctrl_source in s][0] s for s in list(raw_dc.all_sources) if ctrl_source in s][0]
# Evaluate creation time # Evaluate creation time
creation_time = None creation_time = None
if use_dir_creation_date: if use_dir_creation_date:
creation_time = get_dir_creation_date(raw_folder, run) creation_time = get_dir_creation_date(raw_folder, run)
agipd_cond = AgipdCtrl( agipd_cond = AgipdCtrl(
run_dc=raw_dc, run_dc=raw_dc,
image_src=instrument_src_mod, image_src=instrument_src_mod,
ctrl_src=ctrl_src, ctrl_src=ctrl_src,
raise_error=False, # to be able to process very old data without mosetting value raise_error=False, # to be able to process very old data without mosetting value
) )
if mem_cells < 0: if mem_cells < 0:
mem_cells = agipd_cond.get_num_cells() mem_cells = agipd_cond.get_num_cells()
if mem_cells is None: if mem_cells is None:
raise ValueError(f"No raw images found in {run_folder}") raise ValueError(f"No raw images found in {run_folder}")
if acq_rate == 0.: if acq_rate == 0.:
acq_rate = agipd_cond.get_acq_rate() acq_rate = agipd_cond.get_acq_rate()
if gain_setting < 0: if gain_setting < 0:
gain_setting = agipd_cond.get_gain_setting(creation_time) gain_setting = agipd_cond.get_gain_setting(creation_time)
if bias_voltage == 0.: if bias_voltage == 0.:
bias_voltage = agipd_cond.get_bias_voltage(karabo_id_control) bias_voltage = agipd_cond.get_bias_voltage(karabo_id_control)
if integration_time < 0: if integration_time < 0:
integration_time = agipd_cond.get_integration_time() integration_time = agipd_cond.get_integration_time()
# Evaluate detector instance for mapping # Evaluate detector instance for mapping
instrument = karabo_id.split("_")[0] instrument = karabo_id.split("_")[0]
if instrument == "HED": if instrument == "HED":
nmods = 8 nmods = 8
else: else:
nmods = 16 nmods = 16
print(f"Using {creation_time} as creation time") print(f"Using {creation_time} as creation time")
print(f"Operating conditions are:\n• Bias voltage: {bias_voltage}\n• Memory cells: {mem_cells}\n" print(f"Operating conditions are:\n• Bias voltage: {bias_voltage}\n• Memory cells: {mem_cells}\n"
f"• Acquisition rate: {acq_rate}\n• Gain setting: {gain_setting}\n• Integration time: {integration_time}\n" f"• Acquisition rate: {acq_rate}\n• Gain setting: {gain_setting}\n• Integration time: {integration_time}\n"
f"• Photon Energy: {photon_energy}\n") f"• Photon Energy: {photon_energy}\n")
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Load constants for all modules # Load constants for all modules
keys = ['g0mean', 'g1mean', 'gain', 'chi2_ndof', 'mask'] keys = ['g0mean', 'g1mean', 'gain', 'chi2_ndof', 'mask']
all_keys = set(keys) all_keys = set(keys)
for i in range(n_peaks_fit) : for i in range(n_peaks_fit) :
all_keys.add(f'g{i}mean') all_keys.add(f'g{i}mean')
all_keys.add(f'g{i}sigma') all_keys.add(f'g{i}sigma')
fit_data = {} fit_data = {}
labels = {'g0mean': 'Noise peak position [ADU]', labels = {'g0mean': 'Noise peak position [ADU]',
'g1mean': 'First photon peak [ADU]', 'g1mean': 'First photon peak [ADU]',
'gain': f"Gain [ADU/keV], $\gamma$={photon_energy} [keV]", 'gain': f"Gain [ADU/keV], $\gamma$={photon_energy} [keV]",
'chi2_ndof': '$\chi^2$/nDOF', 'chi2_ndof': '$\chi^2$/nDOF',
'mask': 'Fraction of bad pixels over cells' } 'mask': 'Fraction of bad pixels over cells' }
modules = [] modules = []
karabo_da = [] karabo_da = []
for mod in range(nmods): for mod in range(nmods):
qm = module_index_to_qm(mod) qm = module_index_to_qm(mod)
fit_data[mod] = {} fit_data[mod] = {}
try: try:
hf = h5py.File(f'{out_folder}/fits_m{mod:02d}.h5', 'r') hf = h5py.File(f'{out_folder}/fits_m{mod:02d}.h5', 'r')
shape = hf['data/g0mean'].shape shape = hf['data/g0mean'].shape
for key in keys: for key in keys:
fit_data[mod][key] = hf[f'data/{key}'][()] fit_data[mod][key] = hf[f'data/{key}'][()]
print(f"{in_folder}/{hist_file_template.format(mod)}") print(f"{in_folder}/{hist_file_template.format(mod)}")
modules.append(mod) modules.append(mod)
karabo_da.append(f"AGIPD{mod:02d}") karabo_da.append(f"AGIPD{mod:02d}")
except Exception as e: except Exception as e:
err = f"Error: {e}\nError traceback: {traceback.format_exc()}" err = f"Error: {e}\nError traceback: {traceback.format_exc()}"
print(f"No fit data available for module {qm}") print(f"No fit data available for module {qm}")
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Calculate SlopesFF and BadPixels to be send to DB # Calculate SlopesFF and BadPixels to be send to DB
bpmask = {} bpmask = {}
slopesFF = {} slopesFF = {}
for mod in modules: for mod in modules:
bpmask[mod] = np.zeros(fit_data[mod]['mask'].shape).astype(np.int32) bpmask[mod] = np.zeros(fit_data[mod]['mask'].shape).astype(np.int32)
bpmask[mod][ any_in(fit_data[mod]['mask'], BadPixelsFF.NO_ENTRY.value) ] = BadPixels.FF_NO_ENTRIES.value bpmask[mod][ any_in(fit_data[mod]['mask'], BadPixelsFF.NO_ENTRY.value) ] = BadPixels.FF_NO_ENTRIES.value
bpmask[mod][ any_in(fit_data[mod]['mask'], bpmask[mod][ any_in(fit_data[mod]['mask'],
BadPixelsFF.GAIN_DEVIATION.value) ] |= BadPixels.FF_GAIN_DEVIATION.value BadPixelsFF.GAIN_DEVIATION.value) ] |= BadPixels.FF_GAIN_DEVIATION.value
bpmask[mod][ any_in(fit_data[mod]['mask'], bpmask[mod][ any_in(fit_data[mod]['mask'],
BadPixelsFF.FIT_FAILED.value | BadPixelsFF.ACCURATE_COVAR.value | BadPixelsFF.FIT_FAILED.value | BadPixelsFF.ACCURATE_COVAR.value |
BadPixelsFF.CHI2_THRESHOLD.value | BadPixelsFF.GAIN_THRESHOLD.value | BadPixelsFF.CHI2_THRESHOLD.value | BadPixelsFF.GAIN_THRESHOLD.value |
BadPixelsFF.NOISE_PEAK_THRESHOLD.value | BadPixelsFF.PEAK_WIDTH_THRESHOLD.value) ] |= BadPixels.FF_GAIN_EVAL_ERROR.value BadPixelsFF.NOISE_PEAK_THRESHOLD.value | BadPixelsFF.PEAK_WIDTH_THRESHOLD.value) ] |= BadPixels.FF_GAIN_EVAL_ERROR.value
# Set value for bad pixel to average across pixels for a given module # Set value for bad pixel to average across pixels for a given module
slopesFF[mod] = np.copy(fit_data[mod]['gain']) slopesFF[mod] = np.copy(fit_data[mod]['gain'])
slopesFF[mod][fit_data[mod]['mask']>0] = np.nan slopesFF[mod][fit_data[mod]['mask']>0] = np.nan
gain_mean = np.nanmean(slopesFF[mod], axis=(1,2)) gain_mean = np.nanmean(slopesFF[mod], axis=(1,2))
for i in range(slopesFF[mod].shape[0]): for i in range(slopesFF[mod].shape[0]):
slopesFF[mod][i][ fit_data[mod]['mask'][i] > 0 ] = gain_mean[i] slopesFF[mod][i][ fit_data[mod]['mask'][i] > 0 ] = gain_mean[i]
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Read report path and create file location tuple to add with the injection # Read report path and create file location tuple to add with the injection
proposal = list(filter(None, raw_folder.strip('/').split('/')))[-2] proposal = list(filter(None, raw_folder.strip('/').split('/')))[-2]
file_loc = f'Proposal: {proposal}, Run: {run}' file_loc = f'Proposal: {proposal}, Run: {run}'
report = get_report(metadata_folder) report = get_report(metadata_folder)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# set the operating condition # set the operating condition
condition = Conditions.Illuminated.AGIPD(mem_cells, bias_voltage, 9.2, condition = Conditions.Illuminated.AGIPD(mem_cells, bias_voltage, 9.2,
pixels_x=512, pixels_y=128, beam_energy=None, pixels_x=512, pixels_y=128, beam_energy=None,
acquisition_rate=acq_rate, gain_setting=gain_setting, acquisition_rate=acq_rate, gain_setting=gain_setting,
integration_time=integration_time) integration_time=integration_time)
# Modify acceptable deviations for integration time condition if and only if # Modify acceptable deviations for integration time condition if and only if
# the integration time is not using the standard value (12). # the integration time is not using the standard value (12).
if integration_time != 12: if integration_time != 12:
for p in condition.parameters: for p in condition.parameters:
if p.name == 'Integration Time': if p.name == 'Integration Time':
p.lower_deviation = 5 p.lower_deviation = 5
p.upper_deviation = 5 p.upper_deviation = 5
# Retrieve a list of all modules corresponding to processed karabo_das # Retrieve a list of all modules corresponding to processed karabo_das
db_modules = get_pdu_from_db(karabo_id, karabo_da, Constants.AGIPD.SlopesFF(), db_modules = get_pdu_from_db(karabo_id, karabo_da, Constants.AGIPD.SlopesFF(),
condition, cal_db_interface, condition, cal_db_interface,
snapshot_at=creation_time) snapshot_at=creation_time)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Send constants to DB # Send constants to DB
def send_const(mod, pdu): def send_const(mod, pdu):
try: try:
# gain # gain
constant = Constants.AGIPD.SlopesFF() constant = Constants.AGIPD.SlopesFF()
constant.data = np.moveaxis(np.moveaxis(slopesFF[mod], 0, 2), 0, 1) constant.data = np.moveaxis(np.moveaxis(slopesFF[mod], 0, 2), 0, 1)
send_to_db( send_to_db(
pdu, karabo_id, constant, condition, file_loc, pdu, karabo_id, constant, condition, file_loc,
report, cal_db_interface, creation_time, report, cal_db_interface, creation_time,
timeout=cal_db_timeout, timeout=cal_db_timeout,
) )
# bad pixels # bad pixels
constant_bp = Constants.AGIPD.BadPixelsFF() constant_bp = Constants.AGIPD.BadPixelsFF()
constant_bp.data = np.moveaxis(np.moveaxis(bpmask[mod], 0, 2), 0, 1) constant_bp.data = np.moveaxis(np.moveaxis(bpmask[mod], 0, 2), 0, 1)
send_to_db( send_to_db(
pdu, karabo_id, constant_bp, condition, file_loc, pdu, karabo_id, constant_bp, condition, file_loc,
report, cal_db_interface, creation_time, report, cal_db_interface, creation_time,
timeout=cal_db_timeout, timeout=cal_db_timeout,
) )
except Exception as e: except Exception as e:
err = f"Error: {e}\nError traceback: {traceback.format_exc()}" err = f"Error: {e}\nError traceback: {traceback.format_exc()}"
when = None when = None
# Check, if we have a shape we expect # Check, if we have a shape we expect
if db_output: if db_output:
if slopesFF[modules[0]].shape == (mem_cells, 512, 128): if slopesFF[modules[0]].shape == (mem_cells, 512, 128):
with Pool(processes=len(modules)) as pool: with Pool(processes=len(modules)) as pool:
const_out = pool.starmap(send_const, zip(modules, db_modules)) const_out = pool.starmap(send_const, zip(modules, db_modules))
else: else:
print(f"Constants are not sent to the DB because of the shape mismatsh") print(f"Constants are not sent to the DB because of the shape mismatsh")
print(f"Expected {(mem_cells, 512, 128)}, observed {slopesFF[modules[0]].shape}") print(f"Expected {(mem_cells, 512, 128)}, observed {slopesFF[modules[0]].shape}")
condition_dict ={} condition_dict ={}
for entry in condition.to_dict()['parameters']: for entry in condition.to_dict()['parameters']:
key = entry.pop('parameter_name') key = entry.pop('parameter_name')
del entry['description'] del entry['description']
del entry['flg_available'] del entry['flg_available']
condition_dict[key] = entry condition_dict[key] = entry
# Create the same file structure as database constants files, in which # Create the same file structure as database constants files, in which
# each constant type has its corresponding condition and data. # each constant type has its corresponding condition and data.
if local_output: if local_output:
for mod, pdu in zip(modules, db_modules): for mod, pdu in zip(modules, db_modules):
qm = module_index_to_qm(mod) qm = module_index_to_qm(mod)
file = f"{out_folder}/slopesff_bpmask_module_{qm}.h5" file = f"{out_folder}/slopesff_bpmask_module_{qm}.h5"
dic = { dic = {
pdu:{ pdu:{
'SlopesFF': { 'SlopesFF': {
0:{ 0:{
'condition': condition_dict, 'condition': condition_dict,
'data': np.moveaxis(np.moveaxis(slopesFF[mod],0,2),0,1)} 'data': np.moveaxis(np.moveaxis(slopesFF[mod],0,2),0,1)}
}, },
'BadPixelsFF':{ 'BadPixelsFF':{
0:{ 0:{
'condition': condition_dict, 'condition': condition_dict,
'data': np.moveaxis(np.moveaxis(bpmask[mod],0,2),0,1)} 'data': np.moveaxis(np.moveaxis(bpmask[mod],0,2),0,1)}
}, },
} }
} }
save_dict_to_hdf5(dic, file) save_dict_to_hdf5(dic, file)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
#Define AGIPD geometry #Define AGIPD geometry
#To do: find the better way to do it? #To do: find the better way to do it?
if instrument == "HED": if instrument == "HED":
geom = AGIPD_500K2GGeometry.from_origin() geom = AGIPD_500K2GGeometry.from_origin()
else: else:
geom = AGIPD_1MGeometry.from_quad_positions(quad_pos=[ geom = AGIPD_1MGeometry.from_quad_positions(quad_pos=[
(-525, 625), (-525, 625),
(-550, -10), (-550, -10),
(520, -160), (520, -160),
(542.5, 475), (542.5, 475),
]) ])
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Create the arrays that will be used for figures. # Create the arrays that will be used for figures.
# A dictionary contains all the data for each of the processing stages (gains, mean, slopesFF...). # A dictionary contains all the data for each of the processing stages (gains, mean, slopesFF...).
# Each array correponds to the data for all processed modules. # Each array correponds to the data for all processed modules.
# These are updated with their fit/slopes data in the following loops. # These are updated with their fit/slopes data in the following loops.
if cell_range==[0,0]: if cell_range==[0,0]:
cell_range[1] = shape[0] cell_range[1] = shape[0]
const_data = {} const_data = {}
for key in keys: for key in keys:
const_data[key] = np.full((nmods, shape[0],512,128), np.nan) const_data[key] = np.full((nmods, shape[0],512,128), np.nan)
for i in range(nmods): for i in range(nmods):
if key in fit_data[i]: if key in fit_data[i]:
const_data[key][i,:,pixel_range[0]:pixel_range[2], const_data[key][i,:,pixel_range[0]:pixel_range[2],
pixel_range[1]:pixel_range[3]] = fit_data[i][key] pixel_range[1]:pixel_range[3]] = fit_data[i][key]
const_data['slopesFF'] = np.full((nmods, shape[0],512,128), np.nan) const_data['slopesFF'] = np.full((nmods, shape[0],512,128), np.nan)
labels['slopesFF'] = f'slopesFF [ADU/keV], $\gamma$={photon_energy} [keV]' labels['slopesFF'] = f'slopesFF [ADU/keV], $\gamma$={photon_energy} [keV]'
for i in range(nmods): for i in range(nmods):
if i in slopesFF: if i in slopesFF:
const_data['slopesFF'][i,:,pixel_range[0]:pixel_range[2], const_data['slopesFF'][i,:,pixel_range[0]:pixel_range[2],
pixel_range[1]:pixel_range[3]] = slopesFF[i] pixel_range[1]:pixel_range[3]] = slopesFF[i]
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Summary across pixels ## ## Summary across pixels ##
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
for key in const_data.keys(): for key in const_data.keys():
fig = plt.figure(figsize=(20,20)) fig = plt.figure(figsize=(20,20))
ax = fig.add_subplot(111) ax = fig.add_subplot(111)
if key=='mask': if key=='mask':
data = np.nanmean(const_data[key]>0, axis=1) data = np.nanmean(const_data[key]>0, axis=1)
vmin, vmax = (0,1) vmin, vmax = (0,1)
else: else:
data = np.nanmean(const_data[key], axis=1) data = np.nanmean(const_data[key], axis=1)
vmin, vmax = get_range(data, 5) vmin, vmax = get_range(data, 5)
ax = geom.plot_data_fast(data, ax=ax, cmap="jet", vmin=vmin, vmax=vmax, figsize=(20,20)) ax = geom.plot_data_fast(data, ax=ax, cmap="jet", vmin=vmin, vmax=vmax, figsize=(20,20))
_ = ax.set_title(labels[key]) _ = ax.set_title(labels[key])
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Summary histograms ## ## Summary histograms ##
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
sel = (const_data['mask'] == 0) sel = (const_data['mask'] == 0)
module_mean = np.nanmean(const_data[f"gain"],axis=(1,2,3)) module_mean = np.nanmean(const_data[f"gain"],axis=(1,2,3))
module_mean = module_mean[:,np.newaxis,np.newaxis,np.newaxis] module_mean = module_mean[:,np.newaxis,np.newaxis,np.newaxis]
dsets = {'d01 [ADU]':const_data[f"g1mean"]-const_data[f"g0mean"], dsets = {'d01 [ADU]':const_data[f"g1mean"]-const_data[f"g0mean"],
'gain [ADU/keV]':const_data[f"gain"], 'gain [ADU/keV]':const_data[f"gain"],
'gain relative to module mean':const_data[f"gain"]/module_mean, 'gain relative to module mean':const_data[f"gain"]/module_mean,
} }
fig = plt.figure(figsize=(16,5)) fig = plt.figure(figsize=(16,5))
for i, (par, data) in enumerate(dsets.items()): for i, (par, data) in enumerate(dsets.items()):
ax = fig.add_subplot(1, 3, i+1) ax = fig.add_subplot(1, 3, i+1)
plt_range= np.nanmin(data), np.nanmax(data) plt_range= np.nanmin(data), np.nanmax(data)
if 'd01' in par : if 'd01' in par :
ax.axvline(d0_lim[0]) ax.axvline(d0_lim[0])
ax.axvline(d0_lim[1]) ax.axvline(d0_lim[1])
elif 'rel' in par : elif 'rel' in par :
ax.axvline(gain_lim[0]) ax.axvline(gain_lim[0])
ax.axvline(gain_lim[1]) ax.axvline(gain_lim[1])
num_bins = 100 num_bins = 100
_ = ax.hist(data.flatten(), _ = ax.hist(data.flatten(),
bins= num_bins,range=plt_range, bins= num_bins,range=plt_range,
log=True,color='red', log=True,color='red',
label='all fits',) label='all fits',)
a = ax.hist(data[sel].flatten(), a = ax.hist(data[sel].flatten(),
bins=num_bins, range=plt_range, bins=num_bins, range=plt_range,
log=True,color='g', log=True,color='g',
label='good fits only', label='good fits only',
) )
ax.set_xlabel(f"{par}") ax.set_xlabel(f"{par}")
ax.legend() ax.legend()
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Summary across cells ## ## Summary across cells ##
Good pixels only. Good pixels only.
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
for key in const_data.keys(): for key in const_data.keys():
data = np.copy(const_data[key]) data = np.copy(const_data[key])
if key=='mask': if key=='mask':
data = data>0 data = data>0
else: else:
data[const_data['mask']>0] = np.nan data[const_data['mask']>0] = np.nan
d = [] d = []
for i in range(nmods): for i in range(nmods):
d.append({'x': np.arange(data[i].shape[0]), d.append({'x': np.arange(data[i].shape[0]),
'y': np.nanmean(data[i], axis=(1,2)), 'y': np.nanmean(data[i], axis=(1,2)),
'drawstyle': 'steps-pre', 'drawstyle': 'steps-pre',
'label': f'{i}', 'label': f'{i}',
'linewidth': 2, 'linewidth': 2,
'linestyle': '--' if i>7 else '-' 'linestyle': '--' if i>7 else '-'
}) })
fig = plt.figure(figsize=(15, 6)) fig = plt.figure(figsize=(15, 6))
ax = fig.add_subplot(111) ax = fig.add_subplot(111)
_ = simplePlot(d, xrange=(-12, 510), _ = simplePlot(d, xrange=(-12, 510),
x_label='Memory Cell ID', x_label='Memory Cell ID',
y_label=labels[key], y_label=labels[key],
use_axis=ax, use_axis=ax,
legend='top-left-frame-ncol8',) legend='top-left-frame-ncol8',)
ylim = ax.get_ylim() ylim = ax.get_ylim()
ax.set_ylim(ylim[0], ylim[1] + np.abs(ylim[1]-ylim[0])*0.2) ax.set_ylim(ylim[0], ylim[1] + np.abs(ylim[1]-ylim[0])*0.2)
ax.grid() ax.grid()
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Summary table ## ## Summary table ##
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
table = [] table = []
for i in modules: for i in modules:
table.append((i, table.append((i,
f"{np.nanmean(slopesFF[i]):0.1f} +- {np.nanstd(slopesFF[i]):0.2f}", f"{np.nanmean(slopesFF[i]):0.1f} +- {np.nanstd(slopesFF[i]):0.2f}",
f"{np.nanmean(bpmask[i]>0)*100:0.1f} ({np.nansum(bpmask[i]>0)})" f"{np.nanmean(bpmask[i]>0)*100:0.1f} ({np.nansum(bpmask[i]>0)})"
)) ))
all_SFF = np.array([list(sff) for sff in slopesFF.values()]) all_SFF = np.array([list(sff) for sff in slopesFF.values()])
all_MSK = np.array([list(msk) for msk in bpmask.values()]) all_MSK = np.array([list(msk) for msk in bpmask.values()])
table.append(('overall', table.append(('overall',
f"{np.nanmean(all_SFF):0.1f} +- {np.nanstd(all_SFF):0.2f}", f"{np.nanmean(all_SFF):0.1f} +- {np.nanstd(all_SFF):0.2f}",
f"{np.nanmean(all_MSK>0)*100:0.1f} ({np.nansum(all_MSK>0)})" f"{np.nanmean(all_MSK>0)*100:0.1f} ({np.nansum(all_MSK>0)})"
)) ))
md = display(Latex(tabulate.tabulate(table, tablefmt='latex', md = display(Latex(tabulate.tabulate(table, tablefmt='latex',
headers=["Module", "Gain [ADU/keV]", "Bad pixels [%(Count)]"]))) headers=["Module", "Gain [ADU/keV]", "Bad pixels [%(Count)]"])))
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Performance plots ## Performance plots
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
def get_trains_data(run_folder, source, include, tid=None): def get_trains_data(run_folder, source, include, tid=None):
""" """
Load single train for all module Load single train for all module
:param run_folder: Path to folder with data :param run_folder: Path to folder with data
:param source: Data source to be loaded :param source: Data source to be loaded
:param include: Inset of file name to be considered :param include: Inset of file name to be considered
:param tid: Train Id to be loaded. First train is considered if None is given :param tid: Train Id to be loaded. First train is considered if None is given
""" """
run_data = RunDirectory(run_folder, include) run_data = RunDirectory(run_folder, include)
if tid: if tid:
tid, data = run_data.select('*/DET/*', source).train_from_id(tid) tid, data = run_data.select('*/DET/*', source).train_from_id(tid)
return tid, stack_detector_data(data, source, modules=nmods) return tid, stack_detector_data(data, source, modules=nmods)
else: else:
for tid, data in run_data.select('*/DET/*', source).trains(require_all=True): for tid, data in run_data.select('*/DET/*', source).trains(require_all=True):
return tid, stack_detector_data(data, source, modules=nmods) return tid, stack_detector_data(data, source, modules=nmods)
return None, None return None, None
include = '*S00000*' include = '*S00000*'
tid, orig = get_trains_data(f'{proc_folder}/r{run:04d}/', 'image.data', include) tid, orig = get_trains_data(f'{proc_folder}/r{run:04d}/', 'image.data', include)
orig = orig[cell_range[0]:cell_range[1], ...] orig = orig[cell_range[0]:cell_range[1], ...]
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# FIXME: mask bad pixels from median # FIXME: mask bad pixels from median
# mask = const_data['BadPixelsFF'] # mask = const_data['BadPixelsFF']
corrections = const_data['slopesFF'] # (16,shape[0],512,128) shape[0]= cell_range[1]-cell_range[0] / corrections = const_data['slopesFF'] # (16,shape[0],512,128) shape[0]= cell_range[1]-cell_range[0] /
corrections = np.moveaxis(corrections, 1, 0) # (shape[0],16,512,128) corrections = np.moveaxis(corrections, 1, 0) # (shape[0],16,512,128)
rel_corr = corrections/np.nanmedian(corrections) rel_corr = corrections/np.nanmedian(corrections)
corrected = orig / rel_corr corrected = orig / rel_corr
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
### Mean value not corrected (train 0) ### Mean value not corrected (train 0)
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
fig = plt.figure(figsize=(20,20)) fig = plt.figure(figsize=(20,20))
ax = fig.add_subplot(111) ax = fig.add_subplot(111)
odata = np.nanmean(orig, axis=0) odata = np.nanmean(orig, axis=0)
vmin, vmax = get_range(odata, 5) vmin, vmax = get_range(odata, 5)
ax = geom.plot_data_fast(odata, ax=ax, cmap="jet", vmin=vmin, vmax=vmax, figsize=(20,20)) ax = geom.plot_data_fast(odata, ax=ax, cmap="jet", vmin=vmin, vmax=vmax, figsize=(20,20))
_ = ax.set_title("Original data, mean across one train") _ = ax.set_title("Original data, mean across one train")
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
### Mean value corrected (train 0) ### Mean value corrected (train 0)
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
fig = plt.figure(figsize=(20,20)) fig = plt.figure(figsize=(20,20))
ax = fig.add_subplot(111) ax = fig.add_subplot(111)
cdata = np.nanmean(corrected, axis=0) cdata = np.nanmean(corrected, axis=0)
ax = geom.plot_data_fast(cdata, ax=ax, cmap="jet", vmin=vmin, vmax=vmax, figsize=(20,20)) ax = geom.plot_data_fast(cdata, ax=ax, cmap="jet", vmin=vmin, vmax=vmax, figsize=(20,20))
_ = ax.set_title("Corrected data, mean across one train") _ = ax.set_title("Corrected data, mean across one train")
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
### Laplace transform of mean image ### Laplace transform of mean image
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
from scipy.ndimage import laplace from scipy.ndimage import laplace
cmax = np.max(cdata) cmax = np.max(cdata)
omax = np.max(odata) omax = np.max(odata)
clap = np.zeros_like(cdata) clap = np.zeros_like(cdata)
olap = np.zeros_like(odata) olap = np.zeros_like(odata)
for i in range(nmods) : for i in range(nmods) :
clap[i] = np.abs(laplace(cdata[i].astype(float)/cmax)) clap[i] = np.abs(laplace(cdata[i].astype(float)/cmax))
olap[i] = np.abs(laplace(odata[i].astype(float)/omax)) olap[i] = np.abs(laplace(odata[i].astype(float)/omax))
fig = plt.figure(figsize=(20,10)) fig = plt.figure(figsize=(20,10))
vmin, vmax = get_range(olap, 2) vmin, vmax = get_range(olap, 2)
ax = fig.add_subplot(121) ax = fig.add_subplot(121)
ax = geom.plot_data_fast(olap, ax=ax, cmap="jet", vmin=vmin, vmax=vmax, ) ax = geom.plot_data_fast(olap, ax=ax, cmap="jet", vmin=vmin, vmax=vmax, )
_ = ax.set_title("Laplace (original data)") _ = ax.set_title("Laplace (original data)")
ax = fig.add_subplot(122) ax = fig.add_subplot(122)
ax = geom.plot_data_fast(clap, ax=ax, cmap="jet", vmin=vmin, vmax=vmax, ) ax = geom.plot_data_fast(clap, ax=ax, cmap="jet", vmin=vmin, vmax=vmax, )
_ = ax.set_title("Laplace (gain corrected data)") _ = ax.set_title("Laplace (gain corrected data)")
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
### Histogram of corrected and uncorrected spectrum (train 0) ### Histogram of corrected and uncorrected spectrum (train 0)
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
###################################### ######################################
# FIT PEAKS # FIT PEAKS
###################################### ######################################
x_range = [peak_range[0][0], peak_range[-1][-1]] x_range = [peak_range[0][0], peak_range[-1][-1]]
nb = x_range[1] - x_range[0]+1 nb = x_range[1] - x_range[0]+1
sel = ~np.isnan(corrected) sel = ~np.isnan(corrected)
fig = plt.figure(figsize=(10, 5)) fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot(111) ax = fig.add_subplot(111)
y,xe, _ = ax.hist(corrected[sel].flatten(), bins=nb, range=x_range, label='corrected', alpha=0.5) y,xe, _ = ax.hist(corrected[sel].flatten(), bins=nb, range=x_range, label='corrected', alpha=0.5)
# get the bin centers from the bin edges # get the bin centers from the bin edges
xc=xe[:-1]+(xe[1]-xe[0])/2 xc=xe[:-1]+(xe[1]-xe[0])/2
pars, _ = get_starting_parameters(xc, y, peak_range,4) pars, _ = get_starting_parameters(xc, y, peak_range,4)
minuit = fit_n_peaks(xc, y, pars, x_range,fix_d01=False,sigma_limit=1) minuit = fit_n_peaks(xc, y, pars, x_range,fix_d01=False,sigma_limit=1)
pc = minuit.args pc = minuit.args
resc=minuit.fitarg resc=minuit.fitarg
yfc = gaussian_sum(xc,4, *pc) yfc = gaussian_sum(xc,4, *pc)
plt.plot(xc, yfc, label='corrected fit') plt.plot(xc, yfc, label='corrected fit')
y,_, _ = ax.hist(orig[sel].flatten(), bins=nb, range=x_range, label='original',alpha=0.5) y,_, _ = ax.hist(orig[sel].flatten(), bins=nb, range=x_range, label='original',alpha=0.5)
pars, _ = get_starting_parameters(xc, y, peak_range,4) pars, _ = get_starting_parameters(xc, y, peak_range,4)
minuit = fit_n_peaks(xc, y, pars, x_range,fix_d01=False,sigma_limit=1) minuit = fit_n_peaks(xc, y, pars, x_range,fix_d01=False,sigma_limit=1)
po = minuit.args po = minuit.args
reso=minuit.fitarg reso=minuit.fitarg
yfo = gaussian_sum(xc,4, *po) yfo = gaussian_sum(xc,4, *po)
plt.plot(xc, yfo, label='original fit') plt.plot(xc, yfo, label='original fit')
plt.title(f"Signal spectrum, first train") plt.title(f"Signal spectrum, first train")
plt.xlabel('[ADU]') plt.xlabel('[ADU]')
plt.legend() plt.legend()
plt.show() plt.show()
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
### Summary table ## ### Summary table ##
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
from scipy.stats import median_absolute_deviation as mad from scipy.stats import median_absolute_deviation as mad
table = [] table = []
headers = ["Parameter", headers = ["Parameter",
"Value (original data)", "Value (original data)",
"Value (gain corrected data)", "Value (gain corrected data)",
"Relative difference"] "Relative difference"]
for i in range(4): for i in range(4):
table.append((f"Sigma{i} (ADU)", table.append((f"Sigma{i} (ADU)",
f"{reso[f'g{i}sigma']:0.2f} ", f"{reso[f'g{i}sigma']:0.2f} ",
f"{resc[f'g{i}sigma']:0.2f} ", f"{resc[f'g{i}sigma']:0.2f} ",
f"{(reso[f'g{i}sigma']-resc[f'g{i}sigma'])/reso[f'g{i}sigma']:0.2f} ", f"{(reso[f'g{i}sigma']-resc[f'g{i}sigma'])/reso[f'g{i}sigma']:0.2f} ",
)) ))
ovar = np.std(odata) ovar = np.std(odata)
cvar = np.std(cdata) cvar = np.std(cdata)
table.append((f"RMS of mean image", table.append((f"RMS of mean image",
f"{ovar:0.3f} ", f"{ovar:0.3f} ",
f"{cvar:0.3f} ", f"{cvar:0.3f} ",
f"{(ovar-cvar)/ovar:0.3f} ", f"{(ovar-cvar)/ovar:0.3f} ",
)) ))
omin, omax = get_range(odata, 5) omin, omax = get_range(odata, 5)
cmin, cmax = get_range(cdata, 5) cmin, cmax = get_range(cdata, 5)
ovar = np.std(odata[(odata > omin) & (odata<omax)]) ovar = np.std(odata[(odata > omin) & (odata<omax)])
cvar = np.std(cdata[(cdata > cmin) & (cdata<cmax)]) cvar = np.std(cdata[(cdata > cmin) & (cdata<cmax)])
table.append((f"RMS of mean image (mu+-5sigma)", table.append((f"RMS of mean image (mu+-5sigma)",
f"{ovar:0.3f} ", f"{ovar:0.3f} ",
f"{cvar:0.3f} ", f"{cvar:0.3f} ",
f"{(ovar-cvar)/ovar:0.3f} ", f"{(ovar-cvar)/ovar:0.3f} ",
)) ))
ovar = mad(odata.flatten()) ovar = mad(odata.flatten())
cvar = mad(cdata.flatten()) cvar = mad(cdata.flatten())
table.append((f"MAD of mean image", table.append((f"MAD of mean image",
f"{ovar:0.3f} ", f"{ovar:0.3f} ",
f"{cvar:0.3f} ", f"{cvar:0.3f} ",
f"{(ovar-cvar)/ovar:0.3f} ", f"{(ovar-cvar)/ovar:0.3f} ",
)) ))
ovar = np.median(olap) ovar = np.median(olap)
cvar = np.median(clap) cvar = np.median(clap)
table.append((f"Median Laplace", table.append((f"Median Laplace",
f"{ovar:0.3f} ", f"{ovar:0.3f} ",
f"{cvar:0.3f} ", f"{cvar:0.3f} ",
f"{(ovar-cvar)/ovar:0.3f} ", f"{(ovar-cvar)/ovar:0.3f} ",
)) ))
md = display(Latex(tabulate.tabulate(table, md = display(Latex(tabulate.tabulate(table,
tablefmt='latex', tablefmt='latex',
headers=headers))) headers=headers)))
``` ```
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment