Skip to content
Snippets Groups Projects
Commit 796eb02c authored by Nuno Duarte's avatar Nuno Duarte
Browse files

solving merge conflicts

parent 152f06bd
No related branches found
No related tags found
1 merge request!870[EPIX100] Feat: Compliance with update to receiver device
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
# ePix100 Dark Characterization # ePix100 Dark Characterization
Author: European XFEL Detector Group, Version: 2.0 Author: European XFEL Detector Group, Version: 2.0
The following notebook provides dark image analysis and calibration constants of the ePix100 detector. The following notebook provides dark image analysis and calibration constants of the ePix100 detector.
Dark characterization evaluates offset and noise of the detector and gives information about bad pixels. Dark characterization evaluates offset and noise of the detector and gives information about bad pixels.
Noise and bad pixels maps are calculated independently for each of the 4 ASICs of ePix100, since their noise behaviour can be significantly different. Noise and bad pixels maps are calculated independently for each of the 4 ASICs of ePix100, since their noise behaviour can be significantly different.
Common mode correction can be applied to increase sensitivity to noise related bad pixels. Common mode correction is achieved by subtracting the median of all pixels that are read out at the same time along a row/column. This is done in an iterative process, by which a new bad pixels map is calculated and used to mask data as the common mode values across the rows/columns is updated. Common mode correction can be applied to increase sensitivity to noise related bad pixels. Common mode correction is achieved by subtracting the median of all pixels that are read out at the same time along a row/column. This is done in an iterative process, by which a new bad pixels map is calculated and used to mask data as the common mode values across the rows/columns is updated.
Resulting maps are saved as .h5 files for a later use and injected to calibration DB. Resulting maps are saved as .h5 files for a later use and injected to calibration DB.
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
in_folder = '/gpfs/exfel/exp/MID/202330/p900329/raw' # input folder, required in_folder = '/gpfs/exfel/exp/MID/202330/p900329/raw' # input folder, required
out_folder = '' out_folder = '' # output folder, required
metadata_folder = '' # Directory containing calibration_metadata.yml when run by xfel-calibrate metadata_folder = '' # Directory containing calibration_metadata.yml when run by xfel-calibrate
sequence = 0 # sequence file to use sequence = 0 # sequence file to use
run = 106 # which run to read data from, required run = 106 # which run to read data from, required
# Parameters for accessing the raw data. # Parameters for accessing the raw data.
karabo_id = "MID_EXP_EPIX-1" # karabo karabo_id karabo_id = "MID_EXP_EPIX-1" # karabo karabo_id
karabo_da = ["EPIX01"] # data aggregators karabo_da = ["EPIX01"] # data aggregators
receiver_template = "RECEIVER" # detector receiver template for accessing raw data files receiver_template = "RECEIVER" # detector receiver template for accessing raw data files
path_template = 'RAW-R{:04d}-{}-S{{:05d}}.h5' # the template to use to access data path_template = 'RAW-R{:04d}-{}-S{{:05d}}.h5' # the template to use to access data
instrument_source_template = '{}/DET/{}:daqOutput' # instrument detector data source in h5files instrument_source_template = '{}/DET/{}:daqOutput' # instrument detector data source in h5files
# Parameters for the calibration database. # Parameters for the calibration database.
use_dir_creation_date = True use_dir_creation_date = True
cal_db_interface = "tcp://max-exfl-cal001:8020" # calibration DB interface to use cal_db_interface = "tcp://max-exfl-cal001:8020" # calibration DB interface to use
cal_db_timeout = 300000 # timeout on caldb requests cal_db_timeout = 300000 # timeout on caldb requests
db_output = False # Output constants to the calibration database db_output = False # Output constants to the calibration database
local_output = True # Output constants locally local_output = True # Output constants locally
# Conditions used for injected calibration constants. # Conditions used for injected calibration constants.
bias_voltage = 200 # Bias voltage bias_voltage = 200 # Bias voltage
in_vacuum = False # Detector operated in vacuum in_vacuum = False # Detector operated in vacuum
fix_integration_time = -1 # Integration time. Set to -1 to read from .h5 file fix_integration_time = -1 # Integration time. Set to -1 to read from .h5 file
fix_temperature = -1 # Fixed temperature in Kelvin. Set to -1 to read from .h5 file fix_temperature = -1 # Fixed temperature in Kelvin. Set to -1 to read from .h5 file
temp_limits = 5 # Limit for parameter Operational temperature temp_limits = 5 # Limit for parameter Operational temperature
badpixel_noise_sigma = 5 # Bad pixels defined by noise value outside n * std from median. Default: 5 badpixel_noise_sigma = 5 # Bad pixels defined by noise value outside n * std from median. Default: 5
badpixel_offset_sigma = 2 # Bad pixels defined by offset value outside n * std from median. Default: 2 badpixel_offset_sigma = 2 # Bad pixels defined by offset value outside n * std from median. Default: 2
CM_N_iterations = 2 # Number of iterations for common mode correction. Set to 0 to skip it CM_N_iterations = 2 # Number of iterations for common mode correction. Set to 0 to skip it
# Parameters used during selecting raw data trains. # Parameters used during selecting raw data trains.
min_trains = 1 # Minimum number of trains that should be available to process dark constants. Default 1. min_trains = 1 # Minimum number of trains that should be available to process dark constants. Default 1.
max_trains = 1000 # Maximum number of trains to use for processing dark constants. Set to 0 to use all available trains. max_trains = 1000 # Maximum number of trains to use for processing dark constants. Set to 0 to use all available trains.
# Don't delete! myMDC sends this by default. # Don't delete! myMDC sends this by default.
operation_mode = '' # Detector operation mode, optional operation_mode = '' # Detector operation mode, optional
# TODO: delete after removing from calibration_configurations # TODO: delete after removing from calibration_configurations
db_module = '' # ID of module in calibration database, this parameter is ignore in the notebook. TODO: remove from calibration_configurations. db_module = '' # ID of module in calibration database, this parameter is ignore in the notebook. TODO: remove from calibration_configurations.
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
import os import os
import warnings import warnings
from datetime import datetime
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import numpy as np import numpy as np
from IPython.display import display, Markdown from IPython.display import display, Markdown
from prettytable import PrettyTable from prettytable import PrettyTable
from extra_data import RunDirectory from extra_data import RunDirectory
from pathlib import Path from pathlib import Path
import XFELDetAna.xfelprofiler as xprof import XFELDetAna.xfelprofiler as xprof
from XFELDetAna import xfelpyanatools as xana from XFELDetAna import xfelpyanatools as xana
from XFELDetAna import xfelpycaltools as xcal from XFELDetAna import xfelpycaltools as xcal
from XFELDetAna.plotting.util import prettyPlotting from XFELDetAna.plotting.util import prettyPlotting
from cal_tools.enums import BadPixels from cal_tools.enums import BadPixels
from cal_tools.step_timing import StepTimer from cal_tools.step_timing import StepTimer
from cal_tools.epix100 import epix100lib from cal_tools.epix100 import epix100lib
from cal_tools.tools import ( from cal_tools.tools import (
get_dir_creation_date, get_dir_creation_date,
get_pdu_from_db, get_pdu_from_db,
get_report, get_report,
save_const_to_h5, save_const_to_h5,
send_to_db, send_to_db,
) )
from iCalibrationDB import Conditions, Constants from iCalibrationDB import Conditions, Constants
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
%matplotlib inline %matplotlib inline
warnings.filterwarnings('ignore') warnings.filterwarnings('ignore')
prettyPlotting = True prettyPlotting = True
profiler = xprof.Profiler() profiler = xprof.Profiler()
profiler.disable() profiler.disable()
instrument_src = instrument_source_template.format(karabo_id, receiver_template) instrument_src = instrument_source_template.format(karabo_id, receiver_template)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Read report path and create file location tuple to add with the injection # Read report path and create file location tuple to add with the injection
proposal = list(filter(None, in_folder.strip('/').split('/')))[-2] proposal = list(filter(None, in_folder.strip('/').split('/')))[-2]
file_loc = f'proposal:{proposal} runs:{run}' file_loc = f'proposal:{proposal} runs:{run}'
report = get_report(metadata_folder) report = get_report(metadata_folder)
ped_dir = os.path.join(in_folder, f"r{run:04d}") ped_dir = os.path.join(in_folder, f"r{run:04d}")
fp_name = path_template.format(run, karabo_da[0]).format(sequence) fp_name = path_template.format(run, karabo_da[0]).format(sequence)
run_dir = RunDirectory(ped_dir) run_dir = RunDirectory(ped_dir)
print(f"Reading from: {Path(ped_dir) / fp_name}") print(f"Reading from: {Path(ped_dir) / fp_name}")
print(f"Run number: {run}") print(f"Run number: {run}")
print(f"Instrument H5File source: {instrument_src}") print(f"Instrument H5File source: {instrument_src}")
if use_dir_creation_date: if use_dir_creation_date:
creation_time = get_dir_creation_date(in_folder, run) creation_time = get_dir_creation_date(in_folder, run)
print(f"Using {creation_time.isoformat()} as creation time") print(f"Using {creation_time.isoformat()} as creation time")
os.makedirs(out_folder, exist_ok=True) os.makedirs(out_folder, exist_ok=True)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Read sensor size # Read sensor size
sensor_size = run_dir[instrument_src, 'data.image.dims'].as_single_value(reduce_by='first') # (x=768, y=708) expected sensor_size = run_dir[instrument_src, 'data.image.dims'].as_single_value(reduce_by='first') # (x=768, y=708) expected
sensor_size = sensor_size[sensor_size != 1] # data.image.dims for old data is [768, 708, 1] sensor_size = sensor_size[sensor_size != 1] # data.image.dims for old data is [768, 708, 1]
assert np.array_equal(sensor_size, [768, 708]), 'Unexpected sensor dimensions.' assert np.array_equal(sensor_size, [768, 708]), 'Unexpected sensor dimensions.'
# Path to pixels ADC values # Path to pixels ADC values
pixels_src = (instrument_src, "data.image.pixels") pixels_src = (instrument_src, "data.image.pixels")
# Specifies total number of images to proceed # Specifies total number of images to proceed
n_trains = run_dir.get_data_counts(*pixels_src).shape[0] n_trains = run_dir.get_data_counts(*pixels_src).shape[0]
# Modify n_trains to process based on given maximum # Modify n_trains to process based on given maximum
# and minimun number of trains. # and minimun number of trains.
if max_trains: if max_trains:
n_trains = min(max_trains, n_trains) n_trains = min(max_trains, n_trains)
if n_trains < min_trains: if n_trains < min_trains:
raise ValueError( raise ValueError(
f"Less than {min_trains} trains are available in RAW data." f"Less than {min_trains} trains are available in RAW data."
" Not enough data to process darks.") " Not enough data to process darks.")
print(f"Number of dark images to analyze: {n_trains}") print(f"Number of dark images to analyze: {n_trains}")
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
ctrl_data = epix100lib.epix100Ctrl( ctrl_data = epix100lib.epix100Ctrl(
run_dc=run_dir, run_dc=run_dir,
instrument_src=instrument_src, instrument_src=instrument_src,
ctrl_src=f"{karabo_id}/DET/CONTROL", ctrl_src=f"{karabo_id}/DET/CONTROL",
) )
# Read integration time # Read integration time
if fix_integration_time == -1: if fix_integration_time == -1:
integration_time = ctrl_data.get_integration_time() integration_time = ctrl_data.get_integration_time()
integration_time_str_add = '' integration_time_str_add = ''
else: else:
integration_time = fix_integration_time integration_time = fix_integration_time
integration_time_str_add = '(manual input)' integration_time_str_add = '(manual input)'
# Read temperature # Read temperature
if fix_temperature == -1: if fix_temperature == -1:
temperature = ctrl_data.get_temprature() temperature = ctrl_data.get_temprature()
temperature_k = temperature + 273.15 temperature_k = temperature + 273.15
temp_str_add = '' temp_str_add = ''
else: else:
temperature_k = fix_temperature temperature_k = fix_temperature
temperature = fix_temperature - 273.15 temperature = fix_temperature - 273.15
temp_str_add = '(manual input)' temp_str_add = '(manual input)'
# Print operating conditions # Print operating conditions
print(f"Bias voltage: {bias_voltage} V") print(f"Bias voltage: {bias_voltage} V")
print(f"Detector integration time: {integration_time} \u03BCs {integration_time_str_add}") print(f"Detector integration time: {integration_time} \u03BCs {integration_time_str_add}")
print(f"Mean temperature: {temperature:0.2f}°C / {temperature_k:0.2f} K {temp_str_add}") print(f"Mean temperature: {temperature:0.2f}°C / {temperature_k:0.2f} K {temp_str_add}")
print(f"Operated in vacuum: {in_vacuum}") print(f"Operated in vacuum: {in_vacuum}")
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Passing repetitive code along the notebook to a function # Passing repetitive code along the notebook to a function
def stats_calc(data): def stats_calc(data):
''' '''
Calculates basic statistical parameters of the input data: Calculates basic statistical parameters of the input data:
mean, standard deviation, median, minimum and maximum. mean, standard deviation, median, minimum and maximum.
Returns dictionary with keys: 'mean', 'std', 'median', Returns dictionary with keys: 'mean', 'std', 'median',
'min', 'max' and 'legend'. 'min', 'max' and 'legend'.
Parameters Parameters
---------- ----------
data : ndarray data : ndarray
Data to analyse. Data to analyse.
''' '''
stats = {} stats = {}
stats['mean'] = np.nanmean(data) stats['mean'] = np.nanmean(data)
stats['std'] = np.nanstd(data) stats['std'] = np.nanstd(data)
stats['median'] = np.nanmedian(data) stats['median'] = np.nanmedian(data)
stats['min'] = np.nanmin(data) stats['min'] = np.nanmin(data)
stats['max'] = np.nanmax(data) stats['max'] = np.nanmax(data)
stats['legend'] = ('mean: ' + str(np.round(stats['mean'],2)) + stats['legend'] = ('mean: ' + str(np.round(stats['mean'],2)) +
'\nstd: ' + str(np.round(stats['std'],2)) + '\nstd: ' + str(np.round(stats['std'],2)) +
'\nmedian: ' + str(np.round(stats['median'],2)) + '\nmedian: ' + str(np.round(stats['median'],2)) +
'\nmin: ' + str(np.round(stats['min'],2)) + '\nmin: ' + str(np.round(stats['min'],2)) +
'\nmax: ' + str(np.round(stats['max'],2))) '\nmax: ' + str(np.round(stats['max'],2)))
return stats return stats
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
step_timer = StepTimer() step_timer = StepTimer()
step_timer.start() step_timer.start()
# Read data # Read data
data_dc = run_dir.select( data_dc = run_dir.select(
*pixels_src, require_all=True).select_trains(np.s_[:n_trains]) *pixels_src, require_all=True).select_trains(np.s_[:n_trains])
data = data_dc[pixels_src].ndarray() data = data_dc[pixels_src].ndarray()
# Instantiate constant maps to be filled with offset, noise and bad pixels maps # Instantiate constant maps to be filled with offset, noise and bad pixels maps
constant_maps = {} constant_maps = {}
step_timer.done_step('Darks loaded. Elapsed Time') step_timer.done_step('Darks loaded. Elapsed Time')
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Offset Map ## Offset Map
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
step_timer.start() step_timer.start()
# Calculate offset per pixel and store it in constant_maps # Calculate offset per pixel and store it in constant_maps
constant_maps['Offset'] = np.nanmean(data, axis=0)[..., np.newaxis] constant_maps['Offset'] = np.nanmean(data, axis=0)[..., np.newaxis]
# Calculate basic statistical parameters # Calculate basic statistical parameters
stats = stats_calc(constant_maps['Offset'].flatten()) stats = stats_calc(constant_maps['Offset'].flatten())
# Offset map # Offset map
fig = xana.heatmapPlot( fig = xana.heatmapPlot(
constant_maps['Offset'].squeeze(), constant_maps['Offset'].squeeze(),
lut_label='[ADU]', lut_label='[ADU]',
x_label='Column', x_label='Column',
y_label='Row', y_label='Row',
vmin=max(0, np.round((stats['median']-stats['std'])/250)*250), # Force cb label to be multiple of 250 for reproducibility vmin=max(0, np.round((stats['median']-stats['std'])/250)*250), # Force cb label to be multiple of 250 for reproducibility
vmax=min(np.power(2,14)-1, np.round((stats['median']+stats['std'])/250)*250) vmax=min(np.power(2,14)-1, np.round((stats['median']+stats['std'])/250)*250)
) )
fig.suptitle('Offset Map', x=.48, y=.9, fontsize=16) fig.suptitle('Offset Map', x=.48, y=.9, fontsize=16)
fig.set_size_inches(h=15, w=15) fig.set_size_inches(h=15, w=15)
# Offset Histogram # Offset Histogram
h, c = np.histogram( h, c = np.histogram(
constant_maps['Offset'].flatten(), constant_maps['Offset'].flatten(),
bins = np.arange(stats['min'], stats['max'], stats['std']/100) bins = np.arange(stats['min'], stats['max'], stats['std']/100)
) )
d = {'x': c[:-1], d = {'x': c[:-1],
'y': h, 'y': h,
'color': 'blue' 'color': 'blue'
} }
fig = xana.simplePlot( fig = xana.simplePlot(
d, d,
aspect=1.5, aspect=1.5,
x_label='Offset [ADU]', x_label='Offset [ADU]',
y_label='Counts', y_label='Counts',
x_range=(0, np.power(2,14)-1), x_range=(0, np.power(2,14)-1),
y_range=(0, max(h)*1.1), y_range=(0, max(h)*1.1),
y_log=True y_log=True
) )
fig.text( fig.text(
s=stats['legend'], s=stats['legend'],
x=.7, x=.7,
y=.7, y=.7,
fontsize=14, fontsize=14,
bbox=dict(facecolor='yellow', edgecolor='black', alpha=.1) bbox=dict(facecolor='yellow', edgecolor='black', alpha=.1)
) )
fig.suptitle('Offset Map Histogram', x=.5,y =.92, fontsize=16) fig.suptitle('Offset Map Histogram', x=.5,y =.92, fontsize=16)
step_timer.done_step('Offset Map created. Elapsed Time') step_timer.done_step('Offset Map created. Elapsed Time')
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Noise Map ## Noise Map
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
step_timer.start() step_timer.start()
# Calculate noise per pixel and store it in constant_maps # Calculate noise per pixel and store it in constant_maps
constant_maps['Noise'] = np.nanstd(data, axis=0)[..., np.newaxis] constant_maps['Noise'] = np.nanstd(data, axis=0)[..., np.newaxis]
# Calculate basic statistical parameters # Calculate basic statistical parameters
stats = stats_calc(constant_maps['Noise'].flatten()) stats = stats_calc(constant_maps['Noise'].flatten())
# Noise heat map # Noise heat map
fig = xana.heatmapPlot( fig = xana.heatmapPlot(
constant_maps['Noise'].squeeze(), constant_maps['Noise'].squeeze(),
lut_label='[ADU]', lut_label='[ADU]',
x_label='Column', x_label='Column',
y_label='Row', y_label='Row',
vmin=max(0, np.round((stats['median'] - badpixel_noise_sigma*stats['std']))), vmin=max(0, np.round((stats['median'] - badpixel_noise_sigma*stats['std']))),
vmax=np.round(stats['median'] + badpixel_noise_sigma*stats['std']) vmax=np.round(stats['median'] + badpixel_noise_sigma*stats['std'])
) )
fig.suptitle('Noise Map', x=.5, y=.9, fontsize=16) fig.suptitle('Noise Map', x=.5, y=.9, fontsize=16)
fig.set_size_inches(h=15, w=15) fig.set_size_inches(h=15, w=15)
# Calculate overall noise histogram # Calculate overall noise histogram
bins = np.arange(max(0, stats['mean'] - badpixel_noise_sigma*stats['std']), bins = np.arange(max(0, stats['mean'] - badpixel_noise_sigma*stats['std']),
stats['mean'] + badpixel_noise_sigma*stats['std'], stats['mean'] + badpixel_noise_sigma*stats['std'],
stats['std']/100) stats['std']/100)
h, c = np.histogram( h, c = np.histogram(
constant_maps['Noise'].flatten(), constant_maps['Noise'].flatten(),
bins = bins bins = bins
) )
d = [{'x': c[:-1], d = [{'x': c[:-1],
'y': h, 'y': h,
'color': 'black', 'color': 'black',
'label': 'Total' 'label': 'Total'
}] }]
# Calculate per ASIC histogram # Calculate per ASIC histogram
asic = [] asic = []
asic.append({'noise': constant_maps['Noise'][:int(sensor_size[1]//2), int(sensor_size[0]//2):], asic.append({'noise': constant_maps['Noise'][:int(sensor_size[1]//2), int(sensor_size[0]//2):],
'label': 'ASIC 0 (bottom right)', 'label': 'ASIC 0 (bottom right)',
'color': 'blue'}) 'color': 'blue'})
asic.append({'noise': constant_maps['Noise'][int(sensor_size[1]//2):, int(sensor_size[0]//2):], asic.append({'noise': constant_maps['Noise'][int(sensor_size[1]//2):, int(sensor_size[0]//2):],
'label': 'ASIC 1 (top right)', 'label': 'ASIC 1 (top right)',
'color': 'red'}) 'color': 'red'})
asic.append({'noise': constant_maps['Noise'][int(sensor_size[1]//2):, :int(sensor_size[0]//2)], asic.append({'noise': constant_maps['Noise'][int(sensor_size[1]//2):, :int(sensor_size[0]//2)],
'label': 'ASIC 2 (top left)', 'label': 'ASIC 2 (top left)',
'color': 'green'}) 'color': 'green'})
asic.append({'noise': constant_maps['Noise'][:int(sensor_size[1]//2), :int(sensor_size[0]//2)], asic.append({'noise': constant_maps['Noise'][:int(sensor_size[1]//2), :int(sensor_size[0]//2)],
'label': 'ASIC 3 (bottom left)', 'label': 'ASIC 3 (bottom left)',
'color': 'magenta'}) 'color': 'magenta'})
min_std = np.inf min_std = np.inf
for a in asic: for a in asic:
h, c = np.histogram(a['noise'].flatten(), bins=bins) h, c = np.histogram(a['noise'].flatten(), bins=bins)
d.append({'x': c[:-1], 'y': h, 'color': a['color'], 'label': a['label']}) d.append({'x': c[:-1], 'y': h, 'color': a['color'], 'label': a['label']})
min_std = np.nanmin((min_std, np.nanstd(a['noise']))) min_std = np.nanmin((min_std, np.nanstd(a['noise'])))
print(a['label'][:6] + print(a['label'][:6] +
': median = ' + "{:.2f}".format(np.round(np.nanmedian(a['noise']),2)) + ': median = ' + "{:.2f}".format(np.round(np.nanmedian(a['noise']),2)) +
' | std = ' + "{:.2f}".format(np.round(np.nanstd(a['noise']),2)) + ' | std = ' + "{:.2f}".format(np.round(np.nanstd(a['noise']),2)) +
a['label'][6:]) a['label'][6:])
arg_max_median = 0 arg_max_median = 0
for i in range(0,np.size(d)): for i in range(0,np.size(d)):
arg_max_median = np.max((arg_max_median, np.argmax(d[i]['y']))) arg_max_median = np.max((arg_max_median, np.argmax(d[i]['y'])))
# Plot noise histogram # Plot noise histogram
fig = xana.simplePlot( fig = xana.simplePlot(
d, d,
aspect=1.5, aspect=1.5,
x_label='Noise [ADU]', x_label='Noise [ADU]',
y_label='Counts', y_label='Counts',
x_range=(max(0, stats['median'] - badpixel_noise_sigma*stats['std']), x_range=(max(0, stats['median'] - badpixel_noise_sigma*stats['std']),
stats['median'] + badpixel_noise_sigma*stats['std']), stats['median'] + badpixel_noise_sigma*stats['std']),
y_range=(0, max(d[0]['y'])*1.1), y_range=(0, max(d[0]['y'])*1.1),
) )
plt.grid(linestyle = ':') plt.grid(linestyle = ':')
leg = fig.legend(bbox_to_anchor=(.42, .88),fontsize = 14) leg = fig.legend(bbox_to_anchor=(.42, .88),fontsize = 14)
fig.text( fig.text(
s=stats['legend'], s=stats['legend'],
x=.75, x=.75,
y=.7, y=.7,
fontsize=14, fontsize=14,
bbox=dict(facecolor='yellow', edgecolor='black', alpha=.1) bbox=dict(facecolor='yellow', edgecolor='black', alpha=.1)
) )
fig.suptitle('Noise Map Histogram',x=.5,y=.92,fontsize=16) fig.suptitle('Noise Map Histogram',x=.5,y=.92,fontsize=16)
step_timer.done_step('\nNoise Map created. Elapsed Time') step_timer.done_step('\nNoise Map created. Elapsed Time')
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Bad Pixels ## Bad Pixels
The bad pixel map is deduced by comparing offset and noise of each pixel ($v_i$) against the median value of the respective maps for each ASIC ($v_k$): The bad pixel map is deduced by comparing offset and noise of each pixel ($v_i$) against the median value of the respective maps for each ASIC ($v_k$):
$$ $$
v_i > \mathrm{median}(v_{k}) + n \sigma_{v_{k}} v_i > \mathrm{median}(v_{k}) + n \sigma_{v_{k}}
$$ $$
or or
$$ $$
v_i < \mathrm{median}(v_{k}) - n \sigma_{v_{k}} v_i < \mathrm{median}(v_{k}) - n \sigma_{v_{k}}
$$ $$
Values are encoded in a 32 bit mask, where for the dark image deduced bad pixels the following non-zero entries are relevant: Values are encoded in a 32 bit mask, where for the dark image deduced bad pixels the following non-zero entries are relevant:
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
def print_bp_entry(bp): def print_bp_entry(bp):
''' '''
Prints bad pixels bit encoding. Prints bad pixels bit encoding.
Parameters Parameters
---------- ----------
bp : enum 'BadPixels' bp : enum 'BadPixels'
''' '''
print('{:<23s}: {:032b} ({})'.format(bp.name, bp.value, bp.real)) print('{:<23s}: {:032b} ({})'.format(bp.name, bp.value, bp.real))
print_bp_entry(BadPixels.OFFSET_OUT_OF_THRESHOLD) print_bp_entry(BadPixels.OFFSET_OUT_OF_THRESHOLD)
print_bp_entry(BadPixels.NOISE_OUT_OF_THRESHOLD) print_bp_entry(BadPixels.NOISE_OUT_OF_THRESHOLD)
print_bp_entry(BadPixels.OFFSET_NOISE_EVAL_ERROR) print_bp_entry(BadPixels.OFFSET_NOISE_EVAL_ERROR)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
def eval_bpidx(const_map, n_sigma, block_size): def eval_bpidx(const_map, n_sigma, block_size):
''' '''
Evaluates bad pixels by comparing the average offset and Evaluates bad pixels by comparing the average offset and
noise of each pixel against the median value of the noise of each pixel against the median value of the
respective maps of each ASIC. respective maps of each ASIC.
Returns boolean array. Returns boolean array.
Parameters Parameters
---------- ----------
const_map : ndarray const_map : ndarray
Offset or noise constant map to input. Offset or noise constant map to input.
n_sigma : float n_sigma : float
Standard deviation multiplicity interval outside Standard deviation multiplicity interval outside
which bad pixels are defined. which bad pixels are defined.
block_size : ndarray block_size : ndarray
dimensions ([x,y]) of each ASIC. dimensions ([x,y]) of each ASIC.
''' '''
blocks = {} # Each block corresponds to 1 ASIC blocks = {} # Each block corresponds to 1 ASIC
blocks['0'] = const_map[:int(block_size[1]), int(block_size[0]):] # bottom right blocks['0'] = const_map[:int(block_size[1]), int(block_size[0]):] # bottom right
blocks['1'] = const_map[int(block_size[1]):, int(block_size[0]):] # top right blocks['1'] = const_map[int(block_size[1]):, int(block_size[0]):] # top right
blocks['2'] = const_map[int(block_size[1]):, :int(block_size[0])] # top left blocks['2'] = const_map[int(block_size[1]):, :int(block_size[0])] # top left
blocks['3'] = const_map[:int(block_size[1]), :int(block_size[0])] # bottom left blocks['3'] = const_map[:int(block_size[1]), :int(block_size[0])] # bottom left
idx = {} idx = {}
for b in range(0, len(blocks)): for b in range(0, len(blocks)):
mdn = np.nanmedian(blocks[str(b)]) mdn = np.nanmedian(blocks[str(b)])
std = np.nanstd(blocks[str(b)]) std = np.nanstd(blocks[str(b)])
idx[str(b)] = ( (blocks[str(b)] > mdn + n_sigma*std) | (blocks[str(b)] < mdn - n_sigma*std) ) idx[str(b)] = ( (blocks[str(b)] > mdn + n_sigma*std) | (blocks[str(b)] < mdn - n_sigma*std) )
idx_output = np.zeros(const_map.shape, dtype=bool) idx_output = np.zeros(const_map.shape, dtype=bool)
idx_output[:int(block_size[1]), int(block_size[0]):] = idx['0'] idx_output[:int(block_size[1]), int(block_size[0]):] = idx['0']
idx_output[int(block_size[1]):, int(block_size[0]):] = idx['1'] idx_output[int(block_size[1]):, int(block_size[0]):] = idx['1']
idx_output[int(block_size[1]):, :int(block_size[0])] = idx['2'] idx_output[int(block_size[1]):, :int(block_size[0])] = idx['2']
idx_output[:int(block_size[1]), :int(block_size[0])] = idx['3'] idx_output[:int(block_size[1]), :int(block_size[0])] = idx['3']
return idx_output return idx_output
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
def bp_analysis_table(bad_pixels_map, title = ''): def bp_analysis_table(bad_pixels_map, title = ''):
''' '''
Prints a table with short analysis of the number and Prints a table with short analysis of the number and
percentage of bad pixels on count of offset or noise percentage of bad pixels on count of offset or noise
out of threshold, or evaluation error. out of threshold, or evaluation error.
Returns PrettyTable. Returns PrettyTable.
Parameters Parameters
---------- ----------
bad_pixels_map : ndarray bad_pixels_map : ndarray
Bad pixel map to analyse. Bad pixel map to analyse.
title : string, optional title : string, optional
Table title to be used. Table title to be used.
''' '''
num_bp = np.sum(bad_pixels_map!=0) num_bp = np.sum(bad_pixels_map!=0)
offset_bp = np.sum(bad_pixels_map==1) offset_bp = np.sum(bad_pixels_map==1)
noise_bp = np.sum(bad_pixels_map==2) noise_bp = np.sum(bad_pixels_map==2)
eval_error_bp = np.sum(bad_pixels_map==4) eval_error_bp = np.sum(bad_pixels_map==4)
t = PrettyTable() t = PrettyTable()
t.field_names = [title] t.field_names = [title]
t.add_row(['Total number of Bad Pixels : {:<5} ({:<.2f}%)'.format(num_bp, 100*num_bp/np.prod(bad_pixels_map.shape))]) t.add_row(['Total number of Bad Pixels : {:<5} ({:<.2f}%)'.format(num_bp, 100*num_bp/np.prod(bad_pixels_map.shape))])
t.add_row([' Offset out of threshold : {:<5} ({:<.2f}%)'.format(offset_bp, 100*offset_bp/np.prod(bad_pixels_map.shape))]) t.add_row([' Offset out of threshold : {:<5} ({:<.2f}%)'.format(offset_bp, 100*offset_bp/np.prod(bad_pixels_map.shape))])
t.add_row([' Noise out of threshold : {:<5} ({:<.2f}%)'.format(noise_bp, 100*noise_bp/np.prod(bad_pixels_map.shape))]) t.add_row([' Noise out of threshold : {:<5} ({:<.2f}%)'.format(noise_bp, 100*noise_bp/np.prod(bad_pixels_map.shape))])
t.add_row([' Evaluation error : {:<5} ({:<.2f}%)'.format(eval_error_bp, 100*eval_error_bp/np.prod(bad_pixels_map.shape))]) t.add_row([' Evaluation error : {:<5} ({:<.2f}%)'.format(eval_error_bp, 100*eval_error_bp/np.prod(bad_pixels_map.shape))])
return t return t
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Add bad pixels from darks to constant_maps # Add bad pixels from darks to constant_maps
constant_maps['BadPixelsDark'] = np.zeros(constant_maps['Offset'].shape, np.uint32) constant_maps['BadPixelsDark'] = np.zeros(constant_maps['Offset'].shape, np.uint32)
# Find noise related bad pixels # Find noise related bad pixels
constant_maps['BadPixelsDark'][eval_bpidx(constant_maps['Noise'], badpixel_noise_sigma, sensor_size//2)] = BadPixels.NOISE_OUT_OF_THRESHOLD.value constant_maps['BadPixelsDark'][eval_bpidx(constant_maps['Noise'], badpixel_noise_sigma, sensor_size//2)] = BadPixels.NOISE_OUT_OF_THRESHOLD.value
constant_maps['BadPixelsDark'][~np.isfinite(constant_maps['Noise'])] = BadPixels.OFFSET_NOISE_EVAL_ERROR.value constant_maps['BadPixelsDark'][~np.isfinite(constant_maps['Noise'])] = BadPixels.OFFSET_NOISE_EVAL_ERROR.value
# Find offset related bad pixels # Find offset related bad pixels
constant_maps['BadPixelsDark'][eval_bpidx(constant_maps['Offset'], badpixel_offset_sigma, sensor_size//2)] = BadPixels.OFFSET_OUT_OF_THRESHOLD.value constant_maps['BadPixelsDark'][eval_bpidx(constant_maps['Offset'], badpixel_offset_sigma, sensor_size//2)] = BadPixels.OFFSET_OUT_OF_THRESHOLD.value
constant_maps['BadPixelsDark'][~np.isfinite(constant_maps['Offset'])] = BadPixels.OFFSET_NOISE_EVAL_ERROR.value constant_maps['BadPixelsDark'][~np.isfinite(constant_maps['Offset'])] = BadPixels.OFFSET_NOISE_EVAL_ERROR.value
# Plot Bad Pixels Map # Plot Bad Pixels Map
fig = xana.heatmapPlot( fig = xana.heatmapPlot(
np.exp(np.nan_to_num(np.log(constant_maps['BadPixelsDark']), neginf=np.nan)).squeeze(), # convert zeros to NaN np.exp(np.nan_to_num(np.log(constant_maps['BadPixelsDark']), neginf=np.nan)).squeeze(), # convert zeros to NaN
lut_label='Bad pixel value [ADU]', # for plotting purposes lut_label='Bad pixel value [ADU]', # for plotting purposes
x_label='Column', x_label='Column',
y_label='Row', y_label='Row',
x_range=(0, sensor_size[0]), x_range=(0, sensor_size[0]),
y_range=(0, sensor_size[1]) y_range=(0, sensor_size[1])
) )
fig.suptitle('Initial Bad Pixels Map', x=.5, y=.9, fontsize=16) fig.suptitle('Initial Bad Pixels Map', x=.5, y=.9, fontsize=16)
fig.set_size_inches(h=15, w=15) fig.set_size_inches(h=15, w=15)
step_timer.done_step('Initial Bad Pixels Map created. Elapsed Time') step_timer.done_step('Initial Bad Pixels Map created. Elapsed Time')
print(bp_analysis_table(constant_maps['BadPixelsDark'], title='Initial Bad Pixel Analysis')) print(bp_analysis_table(constant_maps['BadPixelsDark'], title='Initial Bad Pixel Analysis'))
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Common Mode Correction ## Common Mode Correction
Common mode correction is applied here to increase sensitivy to bad pixels. This is done in an iterative process. Each iteration is composed by the follwing steps: Common mode correction is applied here to increase sensitivy to bad pixels. This is done in an iterative process. Each iteration is composed by the follwing steps:
1. Common mode noise is calculated and subtracted from data. 1. Common mode noise is calculated and subtracted from data.
2. Noise map is recalculated. 2. Noise map is recalculated.
3. Bad pixels are recalulated based on the new noise map. 3. Bad pixels are recalulated based on the new noise map.
4. Data is masked based on the new bad pixels map. 4. Data is masked based on the new bad pixels map.
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
if CM_N_iterations < 1: if CM_N_iterations < 1:
print('Common mode correction not applied.') print('Common mode correction not applied.')
else: else:
commonModeBlockSize = (sensor_size//[8,2]).astype(int) # bank size (x=96,y=354) pixels commonModeBlockSize = (sensor_size//[8,2]).astype(int) # bank size (x=96,y=354) pixels
# Instantiate common mode calculators for column and row CM correction # Instantiate common mode calculators for column and row CM correction
cmCorrection_col = xcal.CommonModeCorrection( cmCorrection_col = xcal.CommonModeCorrection(
sensor_size.tolist(), sensor_size.tolist(),
commonModeBlockSize.tolist(), commonModeBlockSize.tolist(),
'col', 'col',
parallel=False) parallel=False)
cmCorrection_row = xcal.CommonModeCorrection( cmCorrection_row = xcal.CommonModeCorrection(
sensor_size.tolist(), sensor_size.tolist(),
commonModeBlockSize.tolist(), commonModeBlockSize.tolist(),
'row', 'row',
parallel=False) parallel=False)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
if CM_N_iterations > 0: if CM_N_iterations > 0:
data = data.astype(float) # This conversion is needed for offset subtraction data = data.astype(float) # This conversion is needed for offset subtraction
# Subtract Offset # Subtract Offset
data -= constant_maps['Offset'].squeeze() data -= constant_maps['Offset'].squeeze()
noise_map_corrected = np.copy(constant_maps['Noise']) noise_map_corrected = np.copy(constant_maps['Noise'])
bp_offset = [np.sum(constant_maps['BadPixelsDark']==1)] bp_offset = [np.sum(constant_maps['BadPixelsDark']==1)]
bp_noise = [np.sum(constant_maps['BadPixelsDark']==2)] bp_noise = [np.sum(constant_maps['BadPixelsDark']==2)]
for it in range (0,CM_N_iterations): for it in range (0,CM_N_iterations):
step_timer.start() step_timer.start()
# Mask bad pixels # Mask bad pixels
BadPixels_mask = np.squeeze(constant_maps['BadPixelsDark'] != 0) BadPixels_mask = np.squeeze(constant_maps['BadPixelsDark'] != 0)
BadPixels_mask = np.repeat(BadPixels_mask[np.newaxis,...],data.shape[0],axis=0) BadPixels_mask = np.repeat(BadPixels_mask[np.newaxis,...],data.shape[0],axis=0)
data[BadPixels_mask] = np.nan data[BadPixels_mask] = np.nan
# Common mode correction # Common mode correction
data = np.swapaxes(data,0,-1) data = np.swapaxes(data,0,-1)
data = cmCorrection_col.correct(data) data = cmCorrection_col.correct(data)
data = cmCorrection_row.correct(data) data = cmCorrection_row.correct(data)
data = np.swapaxes(data,0,-1) data = np.swapaxes(data,0,-1)
# Update noise map # Update noise map
noise_map_corrected = np.nanstd(data, axis=0)[..., np.newaxis] noise_map_corrected = np.nanstd(data, axis=0)[..., np.newaxis]
# Update bad pixels map # Update bad pixels map
constant_maps['BadPixelsDark'][eval_bpidx(noise_map_corrected, badpixel_noise_sigma, sensor_size//2)] = BadPixels.NOISE_OUT_OF_THRESHOLD.value constant_maps['BadPixelsDark'][eval_bpidx(noise_map_corrected, badpixel_noise_sigma, sensor_size//2)] = BadPixels.NOISE_OUT_OF_THRESHOLD.value
bp_offset.append(np.sum(constant_maps['BadPixelsDark']==1)) bp_offset.append(np.sum(constant_maps['BadPixelsDark']==1))
bp_noise.append(np.sum(constant_maps['BadPixelsDark']==2)) bp_noise.append(np.sum(constant_maps['BadPixelsDark']==2))
print(bp_analysis_table(constant_maps['BadPixelsDark'], title=f'{it+1} CM correction iterations')) print(bp_analysis_table(constant_maps['BadPixelsDark'], title=f'{it+1} CM correction iterations'))
step_timer.done_step('Elapsed Time') step_timer.done_step('Elapsed Time')
print('\n') print('\n')
# Apply final bad pixels mask # Apply final bad pixels mask
BadPixels_mask = np.broadcast_to(np.squeeze(constant_maps['BadPixelsDark'] != 0), data.shape) BadPixels_mask = np.broadcast_to(np.squeeze(constant_maps['BadPixelsDark'] != 0), data.shape)
data[BadPixels_mask] = np.nan data[BadPixels_mask] = np.nan
it = np.arange(0, CM_N_iterations+1) it = np.arange(0, CM_N_iterations+1)
plt.figure() plt.figure()
plt.plot(it, np.sum((bp_offset,bp_noise),axis=0), c = 'black', ls = '--', marker = 'o', label = 'Total') plt.plot(it, np.sum((bp_offset,bp_noise),axis=0), c = 'black', ls = '--', marker = 'o', label = 'Total')
plt.plot(it, bp_noise, c = 'red', ls = '--', marker = 'v', label = 'Noise out of threshold') plt.plot(it, bp_noise, c = 'red', ls = '--', marker = 'v', label = 'Noise out of threshold')
plt.plot(it, bp_offset, c = 'blue', ls = '--', marker = 's',label = 'Offset out of threshold') plt.plot(it, bp_offset, c = 'blue', ls = '--', marker = 's',label = 'Offset out of threshold')
plt.xticks(it) plt.xticks(it)
plt.xlabel('CM correction iteration') plt.xlabel('CM correction iteration')
plt.ylabel('# Bad Pixels') plt.ylabel('# Bad Pixels')
plt.legend() plt.legend()
plt.grid(linestyle = ':') plt.grid(linestyle = ':')
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
if CM_N_iterations > 0: if CM_N_iterations > 0:
display(Markdown(f'## Common-Mode Corrected Bad Pixels Map\n')) display(Markdown(f'## Common-Mode Corrected Bad Pixels Map\n'))
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
if CM_N_iterations > 0: if CM_N_iterations > 0:
# Plot final bad pixels map # Plot final bad pixels map
fig = xana.heatmapPlot( fig = xana.heatmapPlot(
np.exp(np.nan_to_num(np.log(constant_maps['BadPixelsDark']),neginf=np.nan)).squeeze(), # convert zeros to NaN np.exp(np.nan_to_num(np.log(constant_maps['BadPixelsDark']),neginf=np.nan)).squeeze(), # convert zeros to NaN
lut_label='Bad pixel value [ADU]', # for plotting purposes lut_label='Bad pixel value [ADU]', # for plotting purposes
x_label='Column', x_label='Column',
y_label='Row', y_label='Row',
x_range=(0, sensor_size[0]), x_range=(0, sensor_size[0]),
y_range=(0, sensor_size[1]) y_range=(0, sensor_size[1])
) )
fig.suptitle('Final Bad Pixels Map', x=.5, y=.9, fontsize=16) fig.suptitle('Final Bad Pixels Map', x=.5, y=.9, fontsize=16)
fig.set_size_inches(h=15, w=15) fig.set_size_inches(h=15, w=15)
print(bp_analysis_table(constant_maps['BadPixelsDark'], title='Final Bad Pixels Analysis')) print(bp_analysis_table(constant_maps['BadPixelsDark'], title='Final Bad Pixels Analysis'))
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Calibration Constants DB ## Calibration Constants DB
Send the dark constants to the database and/or save them locally. Send the dark constants to the database and/or save them locally.
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Save constants to DB # Save constants to DB
md = None md = None
for const_name in constant_maps.keys(): for const_name in constant_maps.keys():
const = getattr(Constants.ePix100, const_name)() const = getattr(Constants.ePix100, const_name)()
const.data = constant_maps[const_name].data const.data = constant_maps[const_name].data
# Set the operating condition # Set the operating condition
condition = Conditions.Dark.ePix100( condition = Conditions.Dark.ePix100(
bias_voltage=bias_voltage, bias_voltage=bias_voltage,
integration_time=integration_time, integration_time=integration_time,
temperature=temperature_k, temperature=temperature_k,
in_vacuum=in_vacuum) in_vacuum=in_vacuum)
for parm in condition.parameters: for parm in condition.parameters:
if parm.name == "Sensor Temperature": if parm.name == "Sensor Temperature":
parm.lower_deviation = temp_limits parm.lower_deviation = temp_limits
parm.upper_deviation = temp_limits parm.upper_deviation = temp_limits
# Get physical detector unit # Get physical detector unit
db_module = get_pdu_from_db( db_module = get_pdu_from_db(
karabo_id=karabo_id, karabo_id=karabo_id,
karabo_da=karabo_da, karabo_da=karabo_da,
constant=const, constant=const,
condition=condition, condition=condition,
cal_db_interface=cal_db_interface, cal_db_interface=cal_db_interface,
snapshot_at=creation_time)[0] snapshot_at=creation_time)[0]
# Inject or save calibration constants # Inject or save calibration constants
if db_output: if db_output:
md = send_to_db( md = send_to_db(
db_module=db_module, db_module=db_module,
karabo_id=karabo_id, karabo_id=karabo_id,
constant=const, constant=const,
condition=condition, condition=condition,
file_loc=file_loc, file_loc=file_loc,
report_path=report, report_path=report,
cal_db_interface=cal_db_interface, cal_db_interface=cal_db_interface,
creation_time=creation_time, creation_time=creation_time,
timeout=cal_db_timeout timeout=cal_db_timeout
) )
if local_output: if local_output:
md = save_const_to_h5( md = save_const_to_h5(
db_module=db_module, db_module=db_module,
karabo_id=karabo_id, karabo_id=karabo_id,
constant=const, constant=const,
condition=condition, condition=condition,
data=const.data, data=const.data,
file_loc=file_loc, file_loc=file_loc,
report=report, report=report,
creation_time=creation_time, creation_time=creation_time,
out_folder=out_folder out_folder=out_folder
) )
print(f"Calibration constant {const_name} is stored locally at {out_folder} \n") print(f"Calibration constant {const_name} is stored locally at {out_folder} \n")
print("Constants parameter conditions are:\n" print("Constants parameter conditions are:\n"
f"• Bias voltage: {bias_voltage}\n" f"• Bias voltage: {bias_voltage}\n"
f"• Integration time: {integration_time}\n" f"• Integration time: {integration_time}\n"
f"• Temperature: {temperature_k}\n" f"• Temperature: {temperature_k}\n"
f"• In Vacuum: {in_vacuum}\n" f"• In Vacuum: {in_vacuum}\n"
f"• Creation time: {md.calibration_constant_version.begin_at if md is not None else creation_time}\n") f"• Creation time: {md.calibration_constant_version.begin_at if md is not None else creation_time}\n")
``` ```
......
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
# ePix100 Data Correction # ePix100 Data Correction
Author: European XFEL Detector Group, Version: 2.0 Author: European XFEL Detector Group, Version: 2.0
The following notebook provides data correction of images acquired with the ePix100 detector. The following notebook provides data correction of images acquired with the ePix100 detector.
The sequence of correction applied are: The sequence of correction applied are:
Offset --> Common Mode Noise --> Relative Gain --> Charge Sharing --> Absolute Gain. Offset --> Common Mode Noise --> Relative Gain --> Charge Sharing --> Absolute Gain.
Offset, common mode and gain corrected data is saved to /data/image/pixels in the CORR files. Offset, common mode and gain corrected data is saved to /data/image/pixels in the CORR files.
If pattern classification is applied (charge sharing correction), this data will be saved to /data/image/pixels_classified, while the corresponding patterns will be saved to /data/image/patterns in the CORR files. If pattern classification is applied (charge sharing correction), this data will be saved to /data/image/pixels_classified, while the corresponding patterns will be saved to /data/image/patterns in the CORR files.
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
in_folder = '/gpfs/exfel/exp/MID/202330/p900329/raw' # input folder, required in_folder = "/gpfs/exfel/exp/MID/202330/p900329/raw" # input folder, required
out_folder = '' out_folder = "" # output folder, required
metadata_folder = "" # Directory containing calibration_metadata.yml when run by xfel-calibrate metadata_folder = "" # Directory containing calibration_metadata.yml when run by xfel-calibrate
sequences = [-1] # sequences to correct, set to -1 for all, range allowed sequences = [-1] # sequences to correct, set to -1 for all, range allowed
sequences_per_node = 1 # number of sequence files per cluster node if run as slurm job, set to 0 to not run SLURM parallel sequences_per_node = 1 # number of sequence files per cluster node if run as slurm job, set to 0 to not run SLURM parallel
run = 106 # which run to read data from, required run = 106 # which run to read data from, required
# Parameters for accessing the raw data. # Parameters for accessing the raw data.
karabo_id = "MID_EXP_EPIX-1" # karabo karabo_id karabo_id = "MID_EXP_EPIX-1" # karabo karabo_id
karabo_da = "EPIX01" # data aggregators karabo_da = "EPIX01" # data aggregators
db_module = "" # module id in the database db_module = "" # module id in the database
receiver_template = "RECEIVER" # detector receiver template for accessing raw data files receiver_template = "RECEIVER" # detector receiver template for accessing raw data files
path_template = 'RAW-R{:04d}-{}-S{{:05d}}.h5' # the template to use to access data path_template = 'RAW-R{:04d}-{}-S{{:05d}}.h5' # the template to use to access data
instrument_source_template = '{}/DET/{}:daqOutput' # instrument detector data source in h5files instrument_source_template = '{}/DET/{}:daqOutput' # instrument detector data source in h5files
# Parameters affecting writing corrected data. # Parameters affecting writing corrected data.
chunk_size_idim = 1 # H5 chunking size of output data chunk_size_idim = 1 # H5 chunking size of output data
limit_trains = 0 # Process only first N images, 0 - process all. limit_trains = 0 # Process only first N images, 0 - process all.
# Parameters for the calibration database. # Parameters for the calibration database.
cal_db_interface = "tcp://max-exfl-cal001:8015#8025" # calibration DB interface to use cal_db_interface = "tcp://max-exfl-cal001:8015#8025" # calibration DB interface to use
cal_db_timeout = 300000 # timeout on caldb requests cal_db_timeout = 300000 # timeout on caldb requests
creation_time = "" # The timestamp to use with Calibration DBe. Required Format: "YYYY-MM-DD hh:mm:ss" e.g. 2019-07-04 11:02:41 creation_time = "" # The timestamp to use with Calibration DBe. Required Format: "YYYY-MM-DD hh:mm:ss" e.g. 2019-07-04 11:02:41
# Conditions for retrieving calibration constants. # Conditions for retrieving calibration constants.
bias_voltage = 200 # bias voltage bias_voltage = 200 # bias voltage
in_vacuum = False # detector operated in vacuum in_vacuum = False # detector operated in vacuum
integration_time = -1 # Detector integration time, Default value -1 to use the value from the slow data. integration_time = -1 # Detector integration time, Default value -1 to use the value from the slow data.
fix_temperature = -1 # fixed temperature value in Kelvin, Default value -1 to use the value from files. fix_temperature = -1 # fixed temperature value in Kelvin, Default value -1 to use the value from files.
gain_photon_energy = 8.048 # Photon energy used for gain calibration gain_photon_energy = 8.048 # Photon energy used for gain calibration
photon_energy = 0. # Photon energy to calibrate in number of photons, 0 for calibration in keV photon_energy = 0. # Photon energy to calibrate in number of photons, 0 for calibration in keV
# Flags to select type of applied corrections. # Flags to select type of applied corrections.
pattern_classification = True # do clustering. pattern_classification = True # do clustering.
relative_gain = True # Apply relative gain correction. relative_gain = True # Apply relative gain correction.
absolute_gain = True # Apply absolute gain correction (implies relative gain). absolute_gain = True # Apply absolute gain correction (implies relative gain).
common_mode = True # Apply common mode correction. common_mode = True # Apply common mode correction.
# Parameters affecting applied correction. # Parameters affecting applied correction.
cm_min_frac = 0.25 # No CM correction is performed if after masking the ratio of good pixels falls below this cm_min_frac = 0.25 # No CM correction is performed if after masking the ratio of good pixels falls below this
cm_noise_sigma = 5. # CM correction noise standard deviation cm_noise_sigma = 5. # CM correction noise standard deviation
split_evt_primary_threshold = 7. # primary threshold for split event correction split_evt_primary_threshold = 7. # primary threshold for split event correction
split_evt_secondary_threshold = 5. # secondary threshold for split event correction split_evt_secondary_threshold = 5. # secondary threshold for split event correction
split_evt_mip_threshold = 1000. # minimum ionizing particle threshold split_evt_mip_threshold = 1000. # minimum ionizing particle threshold
def balance_sequences(in_folder, run, sequences, sequences_per_node, karabo_da): def balance_sequences(in_folder, run, sequences, sequences_per_node, karabo_da):
from xfel_calibrate.calibrate import balance_sequences as bs from xfel_calibrate.calibrate import balance_sequences as bs
return bs(in_folder, run, sequences, sequences_per_node, karabo_da) return bs(in_folder, run, sequences, sequences_per_node, karabo_da)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
import tabulate import tabulate
import warnings import warnings
from logging import warning from logging import warning
from sys import exit from sys import exit
import h5py import h5py
import pasha as psh import pasha as psh
import numpy as np import numpy as np
from datetime import datetime
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
from IPython.display import Latex, Markdown, display from IPython.display import Latex, Markdown, display
from extra_data import RunDirectory, H5File from extra_data import RunDirectory, H5File
from extra_geom import Epix100Geometry from extra_geom import Epix100Geometry
from mpl_toolkits.axes_grid1 import make_axes_locatable from mpl_toolkits.axes_grid1 import make_axes_locatable
from pathlib import Path from pathlib import Path
import cal_tools.restful_config as rest_cfg import cal_tools.restful_config as rest_cfg
from XFELDetAna import xfelpycaltools as xcal from XFELDetAna import xfelpycaltools as xcal
from cal_tools.calcat_interface import EPIX100_CalibrationData from cal_tools.calcat_interface import EPIX100_CalibrationData
from cal_tools.epix100 import epix100lib from cal_tools.epix100 import epix100lib
from cal_tools.files import DataFile from cal_tools.files import DataFile
from cal_tools.restful_config import restful_config from cal_tools.restful_config import restful_config
from cal_tools.tools import ( from cal_tools.tools import (
calcat_creation_time, calcat_creation_time,
write_constants_fragment, write_constants_fragment,
) )
from cal_tools.step_timing import StepTimer from cal_tools.step_timing import StepTimer
warnings.filterwarnings('ignore') warnings.filterwarnings('ignore')
prettyPlotting = True prettyPlotting = True
%matplotlib inline %matplotlib inline
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
x = 708 # rows of the ePix100 x = 708 # rows of the ePix100
y = 768 # columns of the ePix100 y = 768 # columns of the ePix100
if absolute_gain: if absolute_gain:
relative_gain = True relative_gain = True
plot_unit = 'ADU' plot_unit = 'ADU'
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
prop_str = in_folder[in_folder.find('/p')+1:in_folder.find('/p')+8] prop_str = in_folder[in_folder.find('/p')+1:in_folder.find('/p')+8]
in_folder = Path(in_folder) in_folder = Path(in_folder)
out_folder = Path(out_folder) out_folder = Path(out_folder)
out_folder.mkdir(parents=True, exist_ok=True) out_folder.mkdir(parents=True, exist_ok=True)
run_folder = in_folder / f"r{run:04d}" run_folder = in_folder / f"r{run:04d}"
instrument_src = instrument_source_template.format( instrument_src = instrument_source_template.format(
karabo_id, receiver_template) karabo_id, receiver_template)
print(f"Correcting run: {run_folder}") print(f"Correcting run: {run_folder}")
print(f"Instrument H5File source: {instrument_src}") print(f"Instrument H5File source: {instrument_src}")
print(f"Data corrected files are stored at: {out_folder}") print(f"Data corrected files are stored at: {out_folder}")
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
creation_time = calcat_creation_time(in_folder, run, creation_time) creation_time = calcat_creation_time(in_folder, run, creation_time)
print(f"Using {creation_time.isoformat()} as creation time") print(f"Using {creation_time.isoformat()} as creation time")
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
run_dc = RunDirectory(run_folder, _use_voview=False) run_dc = RunDirectory(run_folder, _use_voview=False)
seq_files = [Path(f.filename) for f in run_dc.select(f"*{karabo_id}*").files] seq_files = [Path(f.filename) for f in run_dc.select(f"*{karabo_id}*").files]
# If a set of sequences requested to correct, # If a set of sequences requested to correct,
# adapt seq_files list. # adapt seq_files list.
if sequences != [-1]: if sequences != [-1]:
seq_files = [f for f in seq_files if any(f.match(f"*-S{s:05d}.h5") for s in sequences)] seq_files = [f for f in seq_files if any(f.match(f"*-S{s:05d}.h5") for s in sequences)]
if not len(seq_files): if not len(seq_files):
raise IndexError("No sequence files available for the selected sequences.") raise IndexError("No sequence files available for the selected sequences.")
print(f"Processing a total of {len(seq_files)} sequence files") print(f"Processing a total of {len(seq_files)} sequence files")
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
step_timer = StepTimer() step_timer = StepTimer()
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
step_timer.start() step_timer.start()
sensorSize = [x, y] sensorSize = [x, y]
# Sensor area will be analysed according to blocksize # Sensor area will be analysed according to blocksize
blockSize = [sensorSize[0]//2, sensorSize[1]//2] blockSize = [sensorSize[0]//2, sensorSize[1]//2]
xcal.defaultBlockSize = blockSize xcal.defaultBlockSize = blockSize
memoryCells = 1 # ePIX has no memory cells memoryCells = 1 # ePIX has no memory cells
run_parallel = False run_parallel = False
# Read control data. # Read control data.
ctrl_data = epix100lib.epix100Ctrl( ctrl_data = epix100lib.epix100Ctrl(
run_dc=run_dc, run_dc=run_dc,
instrument_src=instrument_src, instrument_src=instrument_src,
ctrl_src=f"{karabo_id}/DET/CONTROL", ctrl_src=f"{karabo_id}/DET/CONTROL",
) )
if integration_time < 0: if integration_time < 0:
integration_time = ctrl_data.get_integration_time() integration_time = ctrl_data.get_integration_time()
integration_time_str_add = "" integration_time_str_add = ""
else: else:
integration_time_str_add = "(manual input)" integration_time_str_add = "(manual input)"
if fix_temperature < 0: if fix_temperature < 0:
temperature = ctrl_data.get_temprature() temperature = ctrl_data.get_temprature()
temperature_k = temperature + 273.15 temperature_k = temperature + 273.15
temp_str_add = "" temp_str_add = ""
else: else:
temperature_k = fix_temperature temperature_k = fix_temperature
temperature = fix_temperature - 273.15 temperature = fix_temperature - 273.15
temp_str_add = "(manual input)" temp_str_add = "(manual input)"
print(f"Bias voltage is {bias_voltage} V") print(f"Bias voltage is {bias_voltage} V")
print(f"Detector integration time is set to {integration_time} \u03BCs {integration_time_str_add}") print(f"Detector integration time is set to {integration_time} \u03BCs {integration_time_str_add}")
print(f"Mean temperature: {temperature:0.2f}°C / {temperature_k:0.2f} K {temp_str_add}") print(f"Mean temperature: {temperature:0.2f}°C / {temperature_k:0.2f} K {temp_str_add}")
print(f"Operated in vacuum: {in_vacuum}") print(f"Operated in vacuum: {in_vacuum}")
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Table of sequence files to process # Table of sequence files to process
table = [(k, f) for k, f in enumerate(seq_files)] table = [(k, f) for k, f in enumerate(seq_files)]
if len(table): if len(table):
md = display(Latex(tabulate.tabulate( md = display(Latex(tabulate.tabulate(
table, table,
tablefmt='latex', tablefmt='latex',
headers=["#", "file"] headers=["#", "file"]
))) )))
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Retrieving calibration constants ## Retrieving calibration constants
As a first step, dark maps have to be loaded. As a first step, dark maps have to be loaded.
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
constant_names = ["OffsetEPix100", "NoiseEPix100"] constant_names = ["OffsetEPix100", "NoiseEPix100"]
if relative_gain: if relative_gain:
constant_names += ["RelativeGainEPix100"] constant_names += ["RelativeGainEPix100"]
epix_cal = EPIX100_CalibrationData( epix_cal = EPIX100_CalibrationData(
detector_name=karabo_id, detector_name=karabo_id,
sensor_bias_voltage=bias_voltage, sensor_bias_voltage=bias_voltage,
integration_time=integration_time, integration_time=integration_time,
sensor_temperature=temperature_k, sensor_temperature=temperature_k,
in_vacuum=in_vacuum, in_vacuum=in_vacuum,
source_energy=gain_photon_energy, source_energy=gain_photon_energy,
event_at=creation_time, event_at=creation_time,
client=rest_cfg.calibration_client(), client=rest_cfg.calibration_client(),
) )
const_metadata = epix_cal.metadata(calibrations=constant_names) const_metadata = epix_cal.metadata(calibrations=constant_names)
# Display retrieved calibration constants timestamps # Display retrieved calibration constants timestamps
epix_cal.display_markdown_retrieved_constants(metadata=const_metadata) epix_cal.display_markdown_retrieved_constants(metadata=const_metadata)
# Load the constant data from files # Load the constant data from files
const_data = epix_cal.ndarray_map(metadata=const_metadata)[karabo_da] const_data = epix_cal.ndarray_map(metadata=const_metadata)[karabo_da]
# Validate the constants availability and raise/warn correspondingly. # Validate the constants availability and raise/warn correspondingly.
missing_dark_constants = {"OffsetEPix100", "NoiseEPix100"} - set(const_data) missing_dark_constants = {"OffsetEPix100", "NoiseEPix100"} - set(const_data)
if missing_dark_constants: if missing_dark_constants:
raise ValueError( raise ValueError(
f"Dark constants {missing_dark_constants} are not available to correct {karabo_da}." f"Dark constants {missing_dark_constants} are not available to correct {karabo_da}."
"No correction is performed!") "No correction is performed!")
if relative_gain and "RelativeGainEPix100" not in const_data.keys(): if relative_gain and "RelativeGainEPix100" not in const_data.keys():
warning("RelativeGainEPix100 is not found in the calibration database.") warning("RelativeGainEPix100 is not found in the calibration database.")
relative_gain = False relative_gain = False
absolute_gain = False absolute_gain = False
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Record constant details in YAML metadata # Record constant details in YAML metadata
write_constants_fragment( write_constants_fragment(
out_folder=(metadata_folder or out_folder), out_folder=(metadata_folder or out_folder),
det_metadata=const_metadata, det_metadata=const_metadata,
caldb_root=epix_cal.caldb_root, caldb_root=epix_cal.caldb_root,
) )
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Initializing some parameters. # Initializing some parameters.
hscale = 1 hscale = 1
stats = True stats = True
bins = np.arange(-50,1000) bins = np.arange(-50,1000)
hist = {'O': 0} # dictionary to store histograms hist = {'O': 0} # dictionary to store histograms
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
if common_mode: if common_mode:
commonModeBlockSize = [x//2, y//8] commonModeBlockSize = [x//2, y//8]
cmCorrectionB = xcal.CommonModeCorrection( cmCorrectionB = xcal.CommonModeCorrection(
shape=sensorSize, shape=sensorSize,
blockSize=commonModeBlockSize, blockSize=commonModeBlockSize,
orientation='block', orientation='block',
nCells=memoryCells, nCells=memoryCells,
noiseMap=const_data['NoiseEPix100'], noiseMap=const_data['NoiseEPix100'],
runParallel=run_parallel, runParallel=run_parallel,
parallel=run_parallel, parallel=run_parallel,
stats=stats, stats=stats,
minFrac=cm_min_frac, minFrac=cm_min_frac,
noiseSigma=cm_noise_sigma, noiseSigma=cm_noise_sigma,
) )
cmCorrectionR = xcal.CommonModeCorrection( cmCorrectionR = xcal.CommonModeCorrection(
shape=sensorSize, shape=sensorSize,
blockSize=commonModeBlockSize, blockSize=commonModeBlockSize,
orientation='row', orientation='row',
nCells=memoryCells, nCells=memoryCells,
noiseMap=const_data['NoiseEPix100'], noiseMap=const_data['NoiseEPix100'],
runParallel=run_parallel, runParallel=run_parallel,
parallel=run_parallel, parallel=run_parallel,
stats=stats, stats=stats,
minFrac=cm_min_frac, minFrac=cm_min_frac,
noiseSigma=cm_noise_sigma, noiseSigma=cm_noise_sigma,
) )
cmCorrectionC = xcal.CommonModeCorrection( cmCorrectionC = xcal.CommonModeCorrection(
shape=sensorSize, shape=sensorSize,
blockSize=commonModeBlockSize, blockSize=commonModeBlockSize,
orientation='col', orientation='col',
nCells=memoryCells, nCells=memoryCells,
noiseMap=const_data['NoiseEPix100'], noiseMap=const_data['NoiseEPix100'],
runParallel=run_parallel, runParallel=run_parallel,
parallel=run_parallel, parallel=run_parallel,
stats=stats, stats=stats,
minFrac=cm_min_frac, minFrac=cm_min_frac,
noiseSigma=cm_noise_sigma, noiseSigma=cm_noise_sigma,
) )
hist['CM'] = 0 hist['CM'] = 0
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
if relative_gain: if relative_gain:
# gain constant is given by the mode of the gain map # gain constant is given by the mode of the gain map
# because all bad pixels are masked using this value # because all bad pixels are masked using this value
_vals,_counts = np.unique(const_data["RelativeGainEPix100"], return_counts=True) _vals,_counts = np.unique(const_data["RelativeGainEPix100"], return_counts=True)
gain_cnst = _vals[np.argmax(_counts)] gain_cnst = _vals[np.argmax(_counts)]
gainCorrection = xcal.RelativeGainCorrection( gainCorrection = xcal.RelativeGainCorrection(
sensorSize, sensorSize,
gain_cnst/const_data["RelativeGainEPix100"][..., None], gain_cnst/const_data["RelativeGainEPix100"][..., None],
nCells=memoryCells, nCells=memoryCells,
parallel=run_parallel, parallel=run_parallel,
blockSize=blockSize, blockSize=blockSize,
gains=None, gains=None,
) )
hist['RG'] = 0 hist['RG'] = 0
if absolute_gain: if absolute_gain:
hscale = gain_cnst hscale = gain_cnst
plot_unit = 'keV' plot_unit = 'keV'
if photon_energy > 0: if photon_energy > 0:
plot_unit = '$\gamma$' plot_unit = '$\gamma$'
hscale /= photon_energy hscale /= photon_energy
hist['AG'] = 0 hist['AG'] = 0
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
if pattern_classification : if pattern_classification :
patternClassifier = xcal.PatternClassifier( patternClassifier = xcal.PatternClassifier(
[x, y], [x, y],
const_data["NoiseEPix100"], const_data["NoiseEPix100"],
split_evt_primary_threshold, split_evt_primary_threshold,
split_evt_secondary_threshold, split_evt_secondary_threshold,
split_evt_mip_threshold, split_evt_mip_threshold,
tagFirstSingles=0, tagFirstSingles=0,
nCells=memoryCells, nCells=memoryCells,
allowElongated=False, allowElongated=False,
blockSize=[x, y], blockSize=[x, y],
parallel=run_parallel, parallel=run_parallel,
) )
hist['CS'] = 0 hist['CS'] = 0
hist['S'] = 0 hist['S'] = 0
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Applying corrections ## Applying corrections
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
def correct_train(wid, index, tid, d): def correct_train(wid, index, tid, d):
d = d[..., np.newaxis].astype(np.float32) d = d[..., np.newaxis].astype(np.float32)
d = np.compress( d = np.compress(
np.any(d > 0, axis=(0, 1)), d, axis=2) np.any(d > 0, axis=(0, 1)), d, axis=2)
# Offset correction. # Offset correction.
d -= const_data["OffsetEPix100"] d -= const_data["OffsetEPix100"]
hist['O'] += np.histogram(d,bins=bins)[0] hist['O'] += np.histogram(d,bins=bins)[0]
# Common Mode correction. # Common Mode correction.
if common_mode: if common_mode:
# Block CM # Block CM
d = cmCorrectionB.correct(d) d = cmCorrectionB.correct(d)
# Row CM # Row CM
d = cmCorrectionR.correct(d) d = cmCorrectionR.correct(d)
# COL CM # COL CM
d = cmCorrectionC.correct(d) d = cmCorrectionC.correct(d)
hist['CM'] += np.histogram(d,bins=bins)[0] hist['CM'] += np.histogram(d,bins=bins)[0]
# Relative gain correction. # Relative gain correction.
if relative_gain: if relative_gain:
d = gainCorrection.correct(d) d = gainCorrection.correct(d)
hist['RG'] += np.histogram(d,bins=bins)[0] hist['RG'] += np.histogram(d,bins=bins)[0]
"""The gain correction is currently applying """The gain correction is currently applying
an absolute correction (not a relative correction an absolute correction (not a relative correction
as the implied by the name); as the implied by the name);
it changes the scale (the unit of measurement) it changes the scale (the unit of measurement)
of the data from ADU to either keV or n_of_photons. of the data from ADU to either keV or n_of_photons.
But the pattern classification relies on comparing But the pattern classification relies on comparing
data with the NoiseEPix100 map, which is still in ADU. data with the NoiseEPix100 map, which is still in ADU.
The best solution is to do a relative gain The best solution is to do a relative gain
correction first and apply the global absolute correction first and apply the global absolute
gain to the data at the end, after clustering. gain to the data at the end, after clustering.
""" """
if pattern_classification: if pattern_classification:
d_clu, patterns = patternClassifier.classify(d) d_clu, patterns = patternClassifier.classify(d)
d_clu[d_clu < (split_evt_primary_threshold*const_data["NoiseEPix100"])] = 0 d_clu[d_clu < (split_evt_primary_threshold*const_data["NoiseEPix100"])] = 0
data_clu[index, ...] = np.squeeze(d_clu) data_clu[index, ...] = np.squeeze(d_clu)
data_patterns[index, ...] = np.squeeze(patterns) data_patterns[index, ...] = np.squeeze(patterns)
hist['CS'] += np.histogram(d_clu,bins=bins)[0] hist['CS'] += np.histogram(d_clu,bins=bins)[0]
d_sing = d_clu[patterns==100] # pattern 100 corresponds to single photons events d_sing = d_clu[patterns==100] # pattern 100 corresponds to single photons events
if len(d_sing): if len(d_sing):
hist['S'] += np.histogram(d_sing,bins=bins)[0] hist['S'] += np.histogram(d_sing,bins=bins)[0]
# Absolute gain correction # Absolute gain correction
# changes data from ADU to keV (or n. of photons) # changes data from ADU to keV (or n. of photons)
if absolute_gain: if absolute_gain:
d = d * gain_cnst d = d * gain_cnst
if photon_energy > 0: if photon_energy > 0:
d /= photon_energy d /= photon_energy
hist['AG'] += np.histogram(d,bins=bins)[0] hist['AG'] += np.histogram(d,bins=bins)[0]
if pattern_classification: if pattern_classification:
# Modify pattern classification. # Modify pattern classification.
d_clu = d_clu * gain_cnst d_clu = d_clu * gain_cnst
if photon_energy > 0: if photon_energy > 0:
d_clu /= photon_energy d_clu /= photon_energy
data_clu[index, ...] = np.squeeze(d_clu) data_clu[index, ...] = np.squeeze(d_clu)
data[index, ...] = np.squeeze(d) data[index, ...] = np.squeeze(d)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# 10 is a number chosen after testing 1 ... 71 parallel threads # 10 is a number chosen after testing 1 ... 71 parallel threads
context = psh.context.ThreadContext(num_workers=10) context = psh.context.ThreadContext(num_workers=10)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
empty_seq = 0 empty_seq = 0
for f in seq_files: for f in seq_files:
seq_dc = H5File(f) seq_dc = H5File(f)
# Save corrected data in an output file with name # Save corrected data in an output file with name
# of corresponding raw sequence file. # of corresponding raw sequence file.
out_file = out_folder / f.name.replace("RAW", "CORR") out_file = out_folder / f.name.replace("RAW", "CORR")
# Data shape in seq_dc excluding trains with empty images. # Data shape in seq_dc excluding trains with empty images.
ishape = seq_dc[instrument_src, "data.image.pixels"].shape ishape = seq_dc[instrument_src, "data.image.pixels"].shape
corr_ntrains = ishape[0] corr_ntrains = ishape[0]
all_train_ids = seq_dc.train_ids all_train_ids = seq_dc.train_ids
# Raise a WARNING if this sequence has no trains to correct. # Raise a WARNING if this sequence has no trains to correct.
# Otherwise, print number of trains with no data. # Otherwise, print number of trains with no data.
if corr_ntrains == 0: if corr_ntrains == 0:
warning(f"No trains to correct for {f.name}: " warning(f"No trains to correct for {f.name}: "
"Skipping the processing of this file.") "Skipping the processing of this file.")
empty_seq += 1 empty_seq += 1
continue continue
elif len(all_train_ids) != corr_ntrains: elif len(all_train_ids) != corr_ntrains:
print(f"{f.name} has {len(all_train_ids) - corr_ntrains} trains with missing data.") print(f"{f.name} has {len(all_train_ids) - corr_ntrains} trains with missing data.")
# This parameter is only used for testing. # This parameter is only used for testing.
if limit_trains > 0: if limit_trains > 0:
print(f"\nCorrected trains are limited to: {limit_trains} trains") print(f"\nCorrected trains are limited to: {limit_trains} trains")
corr_ntrains = min(corr_ntrains, limit_trains) corr_ntrains = min(corr_ntrains, limit_trains)
oshape = (corr_ntrains, *ishape[1:]) oshape = (corr_ntrains, *ishape[1:])
data = context.alloc(shape=oshape, dtype=np.float32) data = context.alloc(shape=oshape, dtype=np.float32)
if pattern_classification: if pattern_classification:
data_clu = context.alloc(shape=oshape, dtype=np.float32) data_clu = context.alloc(shape=oshape, dtype=np.float32)
data_patterns = context.alloc(shape=oshape, dtype=np.int32) data_patterns = context.alloc(shape=oshape, dtype=np.int32)
step_timer.start() # Correct data. step_timer.start() # Correct data.
# Overwrite seq_dc after eliminating empty trains or/and applying limited images. # Overwrite seq_dc after eliminating empty trains or/and applying limited images.
slow_data_dc = seq_dc.select(instrument_src,require_all=True).select_trains(np.s_[:corr_ntrains])
seq_dc = seq_dc.select( seq_dc = seq_dc.select(
instrument_src, "*", require_all=True).select_trains(np.s_[:corr_ntrains]) instrument_src, "*", require_all=True).select_trains(np.s_[:corr_ntrains])
pixel_data = seq_dc[instrument_src, "data.image.pixels"] pixel_data = seq_dc[instrument_src, "data.image.pixels"]
context.map(correct_train, pixel_data) context.map(correct_train, pixel_data)
step_timer.done_step(f'Correcting {corr_ntrains} trains.') step_timer.done_step(f'Correcting {corr_ntrains} trains.')
step_timer.start() # Write corrected data. step_timer.start() # Write corrected data.
# Create CORR files and add corrected data sections. # Create CORR files and add corrected data sections.
image_counts = seq_dc[instrument_src, "data.image.pixels"].data_counts(labelled=False) image_counts = seq_dc[instrument_src, "data.image.pixels"].data_counts(labelled=False)
# Write corrected data. # Write corrected data.
with DataFile(out_file, "w") as ofile: with DataFile(out_file, "w") as ofile:
dataset_chunk = ((chunk_size_idim,) + oshape[1:]) # e.g. (1, pixels_x, pixels_y) dataset_chunk = ((chunk_size_idim,) + oshape[1:]) # e.g. (1, pixels_x, pixels_y)
# Create INDEX datasets. # Create INDEX datasets.
ofile.create_index(seq_dc.train_ids, from_file=seq_dc.files[0]) ofile.create_index(seq_dc.train_ids, from_file=seq_dc.files[0])
# Create METDATA datasets # Create METDATA datasets
ofile.create_metadata( ofile.create_metadata(
like=seq_dc, like=seq_dc,
sequence=seq_dc.run_metadata()["sequenceNumber"], sequence=seq_dc.run_metadata()["sequenceNumber"],
instrument_channels=(f'{instrument_src}/data',) instrument_channels=(f'{instrument_src}/data',)
) )
# Create Instrument section to later add corrected datasets. # Create Instrument section to later add corrected datasets.
outp_source = ofile.create_instrument_source(instrument_src) outp_source = ofile.create_instrument_source(instrument_src)
# Create count/first datasets at INDEX source. # Create count/first datasets at INDEX source.
outp_source.create_index(data=image_counts) outp_source.create_index(data=image_counts)
image_raw_fields = [ # /data/image/ image_raw_fields = [ # /data/image/
"binning", "bitsPerPixel", "dimTypes", "dims", "binning", "bitsPerPixel", "dimTypes", "dims",
"encoding", "flipX", "flipY", "roiOffsets", "rotation", "encoding", "flipX", "flipY", "roiOffsets", "rotation",
] ]
for field in image_raw_fields: for field in image_raw_fields:
field_arr = seq_dc[instrument_src, f"data.image.{field}"].ndarray() field_arr = seq_dc[instrument_src, f"data.image.{field}"].ndarray()
outp_source.create_key( outp_source.create_key(
f"data.image.{field}", data=field_arr, f"data.image.{field}", data=field_arr,
chunks=(chunk_size_idim, *field_arr.shape[1:])) chunks=(chunk_size_idim, *field_arr.shape[1:]))
# Add main corrected `data.image.pixels` dataset and store corrected data. # Add main corrected `data.image.pixels` dataset and store corrected data.
outp_source.create_key( outp_source.create_key(
"data.image.pixels", data=data, chunks=dataset_chunk) "data.image.pixels", data=data, chunks=dataset_chunk)
outp_source.create_key( outp_source.create_key(
"data.trainId", data=seq_dc.train_ids, chunks=min(50, len(seq_dc.train_ids))) "data.trainId", data=seq_dc.train_ids, chunks=min(50, len(seq_dc.train_ids)))
outp_source.create_key( outp_source.create_key(
"data.pulsenId", data=list(seq_dc[instrument_src]['data.pulseId'].ndarray().squeeze()), chunks=min(50, len(seq_dc.train_ids))) "data.pulsenId", data=list(seq_dc[instrument_src]['data.pulseId'].ndarray().squeeze()), chunks=min(50, len(seq_dc.train_ids)))
if pattern_classification: if pattern_classification:
# Add main corrected `data.image.pixels` dataset and store corrected data. # Add main corrected `data.image.pixels` dataset and store corrected data.
outp_source.create_key( outp_source.create_key(
"data.image.pixels_classified", data=data_clu, chunks=dataset_chunk) "data.image.pixels_classified", data=data_clu, chunks=dataset_chunk)
outp_source.create_key( outp_source.create_key(
"data.image.patterns", data=data_patterns, chunks=dataset_chunk) "data.image.patterns", data=data_patterns, chunks=dataset_chunk)
step_timer.done_step('Storing data.') step_timer.done_step('Storing data.')
if empty_seq == len(seq_files): if empty_seq == len(seq_files):
warning("No valid trains for RAW data to correct.") warning("No valid trains for RAW data to correct.")
exit(0) exit(0)
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Plot Histograms ## Plot Histograms
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
bins_ADU = bins[:-1]+np.diff(bins)[0]/2 bins_ADU = bins[:-1]+np.diff(bins)[0]/2
bins_keV = bins_ADU*hscale bins_keV = bins_ADU*hscale
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Histogram in ADU # Histogram in ADU
plt.figure(figsize=(12,8)) plt.figure(figsize=(12,8))
plt.plot(bins_ADU,hist['O'], label='Offset corr') plt.plot(bins_ADU,hist['O'], label='Offset corr')
if common_mode: if common_mode:
plt.plot(bins_ADU,hist['CM'], label='CM corr') plt.plot(bins_ADU,hist['CM'], label='CM corr')
if relative_gain: if relative_gain:
plt.plot(bins_ADU,hist['RG'], label='Relative Gain corr') plt.plot(bins_ADU,hist['RG'], label='Relative Gain corr')
if pattern_classification: if pattern_classification:
plt.plot(bins_ADU[bins_ADU>10],hist['CS'][bins_ADU>10], label='Charge Sharing corr') plt.plot(bins_ADU[bins_ADU>10],hist['CS'][bins_ADU>10], label='Charge Sharing corr')
if np.any(hist['S']): if np.any(hist['S']):
plt.plot(bins_ADU,hist['S'], label='Singles') plt.plot(bins_ADU,hist['S'], label='Singles')
xtick_step = 50 xtick_step = 50
plt.xlim(bins[0], bins[-1]+1) plt.xlim(bins[0], bins[-1]+1)
plt.xticks(np.arange(bins[0],bins[-1]+2,xtick_step)) plt.xticks(np.arange(bins[0],bins[-1]+2,xtick_step))
plt.xlabel('ADU',fontsize=12) plt.xlabel('ADU',fontsize=12)
plt.yscale('log') plt.yscale('log')
plt.title(f'{karabo_id} | {prop_str}, r{run}', fontsize=14, fontweight='bold') plt.title(f'{karabo_id} | {prop_str}, r{run}', fontsize=14, fontweight='bold')
plt.legend(fontsize=12) plt.legend(fontsize=12)
plt.grid(ls=':') plt.grid(ls=':')
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Histogram in keV/number of photons # Histogram in keV/number of photons
if absolute_gain: if absolute_gain:
colors = plt.rcParams['axes.prop_cycle'].by_key()['color'] colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
plt.figure(figsize=(12,8)) plt.figure(figsize=(12,8))
if relative_gain: if relative_gain:
plt.plot(bins_keV,hist['RG'], label='Absolute Gain corr', c=colors[2]) plt.plot(bins_keV,hist['RG'], label='Absolute Gain corr', c=colors[2])
if pattern_classification: if pattern_classification:
plt.plot(bins_keV[bins_keV>.5],hist['CS'][bins_keV>.5], label='Charge Sharing corr', c=colors[3]) plt.plot(bins_keV[bins_keV>.5],hist['CS'][bins_keV>.5], label='Charge Sharing corr', c=colors[3])
if np.any(hist['S']): if np.any(hist['S']):
plt.plot(bins_keV[bins_keV>.5],hist['S'][bins_keV>.5], label='Singles', c=colors[4]) plt.plot(bins_keV[bins_keV>.5],hist['S'][bins_keV>.5], label='Singles', c=colors[4])
if photon_energy==0: # if keV instead of #photons if photon_energy==0: # if keV instead of #photons
xtick_step = 5 xtick_step = 5
plt.xlim(left=-2) plt.xlim(left=-2)
plt.xticks(np.arange(0,plt.gca().get_xlim()[1],xtick_step)) plt.xticks(np.arange(0,plt.gca().get_xlim()[1],xtick_step))
plt.xlabel(plot_unit,fontsize=12) plt.xlabel(plot_unit,fontsize=12)
plt.yscale('log') plt.yscale('log')
plt.title(f'{karabo_id} | {prop_str}, r{run}', fontsize=14, fontweight='bold') plt.title(f'{karabo_id} | {prop_str}, r{run}', fontsize=14, fontweight='bold')
plt.legend(fontsize=12) plt.legend(fontsize=12)
plt.grid(ls=':') plt.grid(ls=':')
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Mean Image of the corrected data ## Mean Image of the corrected data
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
geom = Epix100Geometry.from_relative_positions(top=[386.5, 364.5, 0.], bottom=[386.5, -12.5, 0.]) geom = Epix100Geometry.from_relative_positions(top=[386.5, 364.5, 0.], bottom=[386.5, -12.5, 0.])
if pattern_classification: if pattern_classification:
plt.subplots(1,2,figsize=(18,18)) if pattern_classification else plt.subplots(1,1,figsize=(9,9)) plt.subplots(1,2,figsize=(18,18)) if pattern_classification else plt.subplots(1,1,figsize=(9,9))
ax = plt.subplot(1,2,1) ax = plt.subplot(1,2,1)
ax.set_title(f'Before CS correction',fontsize=12,fontweight='bold'); ax.set_title(f'Before CS correction',fontsize=12,fontweight='bold');
else: else:
plt.subplots(1,1,figsize=(9,9)) plt.subplots(1,1,figsize=(9,9))
ax = plt.subplot(1,1,1) ax = plt.subplot(1,1,1)
ax.set_title(f'{karabo_id} | {prop_str}, r{run} | Average of {data.shape[0]} trains',fontsize=12,fontweight='bold'); ax.set_title(f'{karabo_id} | {prop_str}, r{run} | Average of {data.shape[0]} trains',fontsize=12,fontweight='bold');
# Average image before charge sharing corrcetion # Average image before charge sharing corrcetion
divider = make_axes_locatable(ax) divider = make_axes_locatable(ax)
cax = divider.append_axes('bottom', size='5%', pad=0.5) cax = divider.append_axes('bottom', size='5%', pad=0.5)
image = data.mean(axis=0) image = data.mean(axis=0)
vmin = max(image.mean()-2*image.std(),0) vmin = max(image.mean()-2*image.std(),0)
vmax = image.mean()+3*image.std() vmax = image.mean()+3*image.std()
geom.plot_data(image, geom.plot_data(image,
ax=ax, ax=ax,
colorbar={'cax': cax, 'label': plot_unit, 'orientation': 'horizontal'}, colorbar={'cax': cax, 'label': plot_unit, 'orientation': 'horizontal'},
origin='upper', origin='upper',
vmin=vmin, vmin=vmin,
vmax=vmax) vmax=vmax)
# Average image after charge sharing corrcetion # Average image after charge sharing corrcetion
if pattern_classification: if pattern_classification:
ax = plt.subplot(1,2,2) ax = plt.subplot(1,2,2)
divider = make_axes_locatable(ax) divider = make_axes_locatable(ax)
cax = divider.append_axes('bottom', size='5%', pad=0.5) cax = divider.append_axes('bottom', size='5%', pad=0.5)
image = data_clu.mean(axis=0) image = data_clu.mean(axis=0)
geom.plot_data(image, geom.plot_data(image,
ax=ax, ax=ax,
colorbar={'cax': cax, 'label': plot_unit, 'orientation': 'horizontal'}, colorbar={'cax': cax, 'label': plot_unit, 'orientation': 'horizontal'},
origin='upper', origin='upper',
vmin=vmin, vmin=vmin,
vmax=vmax) vmax=vmax)
ax.set_title(f'After CS correction',fontsize=12,fontweight='bold'); ax.set_title(f'After CS correction',fontsize=12,fontweight='bold');
plt.suptitle(f'{karabo_id} | {prop_str}, r{run} | Average of {data.shape[0]} trains',fontsize=14,fontweight='bold',y=.72); plt.suptitle(f'{karabo_id} | {prop_str}, r{run} | Average of {data.shape[0]} trains',fontsize=14,fontweight='bold',y=.72);
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Single Shot of the corrected data ## Single Shot of the corrected data
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
train_idx = -1 train_idx = -1
if pattern_classification: if pattern_classification:
plt.subplots(1,2,figsize=(18,18)) if pattern_classification else plt.subplots(1,1,figsize=(9,9)) plt.subplots(1,2,figsize=(18,18)) if pattern_classification else plt.subplots(1,1,figsize=(9,9))
ax = plt.subplot(1,2,1) ax = plt.subplot(1,2,1)
ax.set_title(f'Before CS correction',fontsize=12,fontweight='bold'); ax.set_title(f'Before CS correction',fontsize=12,fontweight='bold');
else: else:
plt.subplots(1,1,figsize=(9,9)) plt.subplots(1,1,figsize=(9,9))
ax = plt.subplot(1,1,1) ax = plt.subplot(1,1,1)
ax.set_title(f'{karabo_id} | {prop_str}, r{run} | Single frame',fontsize=12,fontweight='bold'); ax.set_title(f'{karabo_id} | {prop_str}, r{run} | Single frame',fontsize=12,fontweight='bold');
# Average image before charge sharing corrcetion # Average image before charge sharing corrcetion
divider = make_axes_locatable(ax) divider = make_axes_locatable(ax)
cax = divider.append_axes('bottom', size='5%', pad=0.5) cax = divider.append_axes('bottom', size='5%', pad=0.5)
image = data[train_idx] image = data[train_idx]
vmin = max(image.mean()-2*image.std(),0) vmin = max(image.mean()-2*image.std(),0)
vmax = image.mean()+3*image.std() vmax = image.mean()+3*image.std()
geom.plot_data(image, geom.plot_data(image,
ax=ax, ax=ax,
colorbar={'cax': cax, 'label': plot_unit, 'orientation': 'horizontal'}, colorbar={'cax': cax, 'label': plot_unit, 'orientation': 'horizontal'},
origin='upper', origin='upper',
vmin=vmin, vmin=vmin,
vmax=vmax) vmax=vmax)
# Average image after charge sharing corrcetion # Average image after charge sharing corrcetion
if pattern_classification: if pattern_classification:
ax = plt.subplot(1,2,2) ax = plt.subplot(1,2,2)
divider = make_axes_locatable(ax) divider = make_axes_locatable(ax)
cax = divider.append_axes('bottom', size='5%', pad=0.5) cax = divider.append_axes('bottom', size='5%', pad=0.5)
image = data_clu[train_idx] image = data_clu[train_idx]
geom.plot_data(image, geom.plot_data(image,
ax=ax, ax=ax,
colorbar={'cax': cax, 'label': plot_unit, 'orientation': 'horizontal'}, colorbar={'cax': cax, 'label': plot_unit, 'orientation': 'horizontal'},
origin='upper', origin='upper',
vmin=vmin, vmin=vmin,
vmax=vmax) vmax=vmax)
ax.set_title(f'After CS correction',fontsize=12,fontweight='bold'); ax.set_title(f'After CS correction',fontsize=12,fontweight='bold');
plt.suptitle(f'{karabo_id} | {prop_str}, r{run} | Single frame',fontsize=14,fontweight='bold',y=.72); plt.suptitle(f'{karabo_id} | {prop_str}, r{run} | Single frame',fontsize=14,fontweight='bold',y=.72);
``` ```
......
...@@ -43,5 +43,4 @@ class epix100Ctrl(): ...@@ -43,5 +43,4 @@ class epix100Ctrl():
else: else:
return self.run_dc[ return self.run_dc[
self.instrument_src.split(':daqOutput')[0], 'slowdata.backTemp.value'].as_single_value( self.instrument_src.split(':daqOutput')[0], 'slowdata.backTemp.value'].as_single_value(
reduce_by='mean', atol=1) reduce_by='mean', atol=1)
\ No newline at end of file
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment