Skip to content
Snippets Groups Projects
Commit 691bbf05 authored by Karim Ahmed's avatar Karim Ahmed
Browse files

Merge branch 'fix/multiple_db_ports_and_no_relgain' into 'master'

empty relgain and random db port

See merge request detectors/pycalibration!315
parents a46a6a01 96812e40
No related branches found
No related tags found
1 merge request!315empty relgain and random db port
%% Cell type:markdown id: tags:
# pnCCD Data Correction #
Authors: DET Group, Modified by Kiana Setoodehnia on March 2020 - Version 2.0
The following notebook provides offset, relative gain, common mode, split events, and pattern classification corrections of images acquired with the pnCCD. This notebook *does not* yet correct for charge transfer inefficiency.
%% Cell type:code id: tags:
``` python
in_folder = "/gpfs/exfel/exp/SQS/201930/p900075/raw" # input folder
out_folder = '/gpfs/exfel/data/scratch/ahmedk/test/pnccd' # output folder
run = 365 # which run to read data from
sequences = [-1] # sequences to correct, set to -1 for all, range allowed
karabo_da = 'PNCCD01' # data aggregators
karabo_id = "SQS_NQS_PNCCD1MP" # karabo prefix of PNCCD devices
receiver_id = "PNCCD_FMT-0" # inset for receiver devices
path_template = 'RAW-R{:04d}-PNCCD01-S{{:05d}}.h5' # the template to use to access data
path_template_seqs = "{}/r{:04d}/*PNCCD01-S*.h5"
h5path = '/INSTRUMENT/{}/CAL/{}:output/data/' # path to data in the HDF5 file
overwrite = True # keep this as True to not overwrite the output
use_dir_creation_date = True # required to obtain creation time of the run
number_dark_frames = 0 # number of images to be used, if set to 0 all available images are used
chunk_size_idim = 1 # H5 chunking size of output data
cluster_profile = "noDB" # ipcluster profile to use
cpuCores = 8
commonModeBlockSize = [512, 512] # size of the detector in pixels for common mode calculations
commonModeAxis = 0 # axis along which common mode will be calculated, 0 = row, and 1 = column
split_evt_primary_threshold = 5. # primary threshold for split event classification in terms of n sigma noise
split_evt_secondary_threshold = 3. # secondary threshold for split event classification in terms of n sigma noise
split_evt_mip_threshold = 1000. # MIP threshold for event classification
sequences_per_node = 1
limit_images = 0
seq_num = 0 # sequence number for which the last plot at the end of the notebook is plotted
# pnCCD parameters:
fix_temperature = 233.
gain = 1
bias_voltage = 300
integration_time = 70
photon_energy = 1.6 # Al fluorescence in keV
cal_db_interface = "tcp://max-exfl016:8015" # calibration DB interface to use
cal_db_timeout = 300000 # timeout on caldb requests
creation_time = "" # To overwrite the measured creation_time. Required Format: YYYY-MM-DD HR:MN:SC.ms e.g. 2019-07-04 11:02:41.00
only_offset = False # Only, apply offset.
common_mode = True # Apply common mode correction
relgain = True # Apply relative gain correction
cti = False # Apply charge transfer inefficiency correction (not implemented, yet)
do_pattern_classification = True # classify split events
```
%% Cell type:code id: tags:
``` python
# Here the herarichy and dependability for correction booleans are defined
corr_bools = {}
corr_bools["only_offset"] = only_offset
# Dont apply any corrections if only_offset is requested.
if not only_offset:
corr_bools["relgain"] = relgain
corr_bools["cti"] = cti
corr_bools["common_mode"] = common_mode
corr_bools["pattern_class"] = do_pattern_classification
```
%% Cell type:code id: tags:
``` python
import warnings
warnings.filterwarnings('ignore')
import h5py
import time
import copy
import os
import glob
import datetime
from datetime import timedelta
from prettytable import PrettyTable
import numpy as np
import matplotlib.pyplot as plt
from iminuit import Minuit
from IPython.display import display, Markdown
import XFELDetAna.xfelprofiler as xprof
profiler = xprof.Profiler()
profiler.disable()
from XFELDetAna.util import env
env.iprofile = cluster_profile
from XFELDetAna import xfelpycaltools as xcal
from XFELDetAna import xfelpyanatools as xana
from XFELDetAna.plotting.util import prettyPlotting
prettyPlotting=True
from XFELDetAna.xfelreaders import ChunkReader
from XFELDetAna.detectors.fastccd import readerh5 as fastccdreaderh5
from iCalibrationDB import ConstantMetaData, Constants, Conditions, Detectors, Versions
from iCalibrationDB.detectors import DetectorTypes
from cal_tools.tools import get_dir_creation_date, get_random_db_interface
from cal_tools.tools import get_dir_creation_date, get_random_db_interface, get_constant_from_db_and_time
%matplotlib inline
if sequences[0] == -1:
sequences = None
```
%% Cell type:code id: tags:
``` python
# Each xcal.HistogramCalculator requires a total number of bins and a binning range. We define these using a
# dictionary:
# For all xcal histograms:
if gain == 1:
Hist_Bin_Dict = {
"bins": 70000, # number of bins
"bin_range": [0, 70000]
}
# For the numpy histograms on the last cell of the notebook:
Event_Bin_Dict = {
"event_bins": 1000, # number of bins
"b_range": [0, 50000] # bin range
}
elif gain == 64:
# For all xcal histograms:
Hist_Bin_Dict = {
"bins": 25000, # number of bins
"bin_range": [0, 25000]
}
# For the numpy histograms on the last cell of the notebook:
Event_Bin_Dict = {
"event_bins": 1000, # number of bins
"b_range": [0, 3000] # bin range
}
bins = Hist_Bin_Dict["bins"]
bin_range = Hist_Bin_Dict["bin_range"]
event_bins = Event_Bin_Dict["event_bins"]
b_range = Event_Bin_Dict["b_range"]
# On the singles spectrum (uploaded in the middle of this notebook), the ADU values correspoding to the boundaries
# of the first peak region are used as cti_limit_low and cti_limit_high:
if gain == 1:
cti_limit_low = 3000 # lower limit of cti
cti_limit_high = 10000 # higher limit of cti
elif gain == 64:
cti_limit_low = 50
cti_limit_high = 170
```
%% Cell type:code id: tags:
``` python
# Calibration Database Settings, and Some Initial Run Parameters & Paths:
display(Markdown('### Initial Settings and Paths'))
pixels_x = 1024 # rows of pnCCD in pixels
pixels_y = 1024 # columns of pnCCD in pixels
print(f"pnCCD size is: {pixels_x}x{pixels_y} pixels.")
print(f'Calibration database interface selected: {cal_db_interface}')
proposal = list(filter(None, in_folder.strip('/').split('/')))[-2]
print(f'Proposal: {proposal}, Run: {run}')
# Paths to the data:
ped_dir = "{}/r{:04d}".format(in_folder, run)
fp_name = path_template.format(run, karabo_da)
fp_path = '{}/{}'.format(ped_dir, fp_name)
h5path = h5path.format(karabo_id, receiver_id)
print("HDF5 path to data: {}\n".format(h5path))
# Run's creation time:
if creation_time:
try:
creation_time = datetime.datetime.strptime(creation_time, '%Y-%m-%d %H:%M:%S.%f')
except Exception as e:
print(f"creation_time value error: {e}."
"Use same format as YYYY-MM-DD HR:MN:SC.ms e.g. 2019-07-04 11:02:41.00/n")
creation_time = None
print("Given creation time wont be used.")
else:
creation_time = None
if not creation_time and use_dir_creation_date:
creation_time = get_dir_creation_date(in_folder, run)
print(f"Creation time: {creation_time}")
# Reading all sequences of the run:
file_list = []
total_sequences = 0
fsequences = []
if sequences is None:
file_list = glob.glob(fp_path.format(0).replace('00000', '*'))
file_list = sorted(file_list, key = lambda x: (len (x), x))
total_sequences = len(file_list)
fsequences = range(total_sequences)
else:
for seq in sequences:
abs_entry = fp_path.format(seq)
if os.path.isfile(abs_entry):
file_list.append(abs_entry)
total_sequences += 1
fsequences.append(seq)
sequences = fsequences
print(f"This run has a total number of {total_sequences} sequences.\n")
```
%% Cell type:code id: tags:
``` python
display(Markdown('### List of Files to be Processed'))
print("Reading data from the following files:")
for i in range(len(file_list)):
print(file_list[i])
```
%% Cell type:code id: tags:
``` python
# Sensor size and block size definitions (important for common mode and other corrections):
sensorSize = [pixels_x, pixels_y]
blockSize = [sensorSize[0]//2, sensorSize[1]//2] # sensor area will be analysed according to blockSize
xcal.defaultBlockSize = blockSize # for xcal.HistogramCalculators
memoryCells = 1 # pnCCD has 1 memory cell
```
%% Cell type:code id: tags:
``` python
# Output Folder Creation:
os.makedirs(out_folder, exist_ok=True)
if not overwrite:
raise AttributeError("Output path exists! Exiting")
```
%% Cell type:markdown id: tags:
As a first step, dark constants have to be retrieved from the calibration database
%% Cell type:code id: tags:
``` python
def get_dark(db_parms, bias_voltage, integration_time, gain, fix_temperature, creation_time, run):
# This function is to retrieve the dark constants associated with the run of interest:
cal_db_interface, cal_db_timeout = db_parms
cal_db_interface = get_random_db_interface(cal_db_interface)
print(f'Calibration database interface: {cal_db_interface}')
try:
Offset = None
Noise = None
BadPixelsDark = None
constants = {}
constants['Offset'] = Offset
constants['Noise'] = Noise
constants['BadPixelsDark'] = BadPixelsDark
when = {}
for const in constants.keys():
metadata = ConstantMetaData()
dconst = getattr(Constants.CCD(DetectorTypes.pnCCD), const)()
dconst.data = constants[const]
metadata.calibration_constant = dconst
condition = Conditions.Dark.CCD(bias_voltage=bias_voltage,
integration_time=integration_time,
gain_setting=gain,
temperature=fix_temperature,
pixels_x=pixels_x,
pixels_y=pixels_y)
device = Detectors.PnCCD1
metadata.detector_condition = condition
condition = Conditions.Dark.CCD(bias_voltage=bias_voltage,
integration_time=integration_time,
gain_setting=gain,
temperature=fix_temperature,
pixels_x=pixels_x,
pixels_y=pixels_y)
metadata.calibration_constant_version = Versions.Timespan(device=device, start=creation_time)
metadata.retrieve(cal_db_interface, when=creation_time.isoformat(), timeout=cal_db_timeout)
constants[const] = dconst.data
for const in constants.keys():
constants[const], when[const] = \
get_constant_from_db_and_time(Detectors.PnCCD1,
getattr(Constants.CCD(DetectorTypes.pnCCD), const)(),
condition,
np.zeros((pixels_x, pixels_y, 1)),
cal_db_interface,
creation_time=creation_time)
for parm in condition.parameters:
if parm.name == "Sensor Temperature":
print(f"For run {run}: dark images for offset calculation "
f"were taken at temperature: {parm.value:0.2f} "
f"K @ {when['Offset']}")
except Exception as e:
print(f"Error: {e}")
for parm in condition.parameters:
if parm.name == "Sensor Temperature":
print(f"For run {run}: dark images for offset calculation "
f"were taken at temperature: {parm.value:0.2f} "
f"K @ {metadata.calibration_constant_version.begin_at}")
return constants
```
%% Cell type:code id: tags:
``` python
display(Markdown('### Dark Data Retrieval'))
db_parms = cal_db_interface, cal_db_timeout
constants = get_dark(db_parms, bias_voltage, integration_time, gain, fix_temperature, creation_time, run)
fig = xana.heatmapPlot(constants["Offset"][:,:,0], x_label='Columns', y_label='Rows', lut_label='Pedestal (ADU)', aspect=1,
x_range=(0, pixels_y), y_range=(0, pixels_x), vmax=16000,
panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)',
title = 'Dark Offset Map')
fig = xana.heatmapPlot(constants["Noise"][:,:,0], x_label='Columns', y_label='Rows',
lut_label='Common Mode Corrected Noise (ADU)',
aspect=1, x_range=(0, pixels_y), y_range=(0, pixels_x), # vmin=-50, vmax=70000,
panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)',
title = 'Dark Noise Map')
fig = xana.heatmapPlot(np.log2(constants["BadPixelsDark"][:,:,0]), x_label='Columns', y_label='Rows',
lut_label='Bad Pixel Value (ADU)',
aspect=1, x_range=(0, pixels_y), y_range=(0, pixels_x), # vmin=-50, vmax=70000,
panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)',
title = 'Dark Bad Pixels Map')
```
%% Cell type:code id: tags:
``` python
if corr_bools.get('relgain'):
display(Markdown('We will now retrieve the relative gain map from the calibration database'))
metadata = ConstantMetaData()
relgain = Constants.CCD(DetectorTypes.pnCCD).RelativeGain()
metadata.calibration_constant = relgain
# set the operating condition
condition = Conditions.Illuminated.CCD(bias_voltage=bias_voltage,
integration_time=integration_time,
gain_setting=gain,
temperature=fix_temperature,
pixels_x=pixels_x,
pixels_y=pixels_y,
photon_energy=photon_energy)
device = Detectors.PnCCD1
metadata.detector_condition = condition
constants["RelativeGain"], relgain_time = \
get_constant_from_db_and_time(Detectors.PnCCD1,
Constants.CCD(DetectorTypes.pnCCD).RelativeGain(),
condition,
np.zeros((pixels_x, pixels_y)),
cal_db_interface,
creation_time=creation_time)
print(f"Retrieved RelativeGain constant with creation time: {relgain_time}")
# specify the a version for this constant
metadata.calibration_constant_version = Versions.Now(device=device)
metadata.retrieve(cal_db_interface, timeout=cal_db_timeout)
constants["RelativeGain"] = relgain.data
print("Retrieved RelativeGain constant with creation time:"
f"{metadata.calibration_constant_version.begin_at}")
display(Markdown('### Relative Gain Map Retrieval'))
fig = xana.heatmapPlot(constants["RelativeGain"], figsize=(8, 8), x_label='Columns', y_label='Rows', lut_label='Relative Gain',
aspect=1, x_range=(0, pixels_y), y_range=(0, pixels_x), vmin=0.8, vmax=1.2,
panel_x_label='Along Rows', panel_y_label='Along Columns',
panel_top_low_lim = 0.5, panel_top_high_lim = 1.5, panel_side_low_lim = 0.5,
panel_side_high_lim = 1.5,
title = f'1st Relative Gain Map for pnCCD (Gain = {gain})')
```
%% Cell type:code id: tags:
``` python
#************************ Calculators ************************#
if corr_bools.get('common_mode'):
# Common Mode Correction Calculator:
cmCorrection = xcal.CommonModeCorrection([pixels_x, pixels_y],
commonModeBlockSize,
commonModeAxis,
parallel=False, dType=np.float32, stride=1,
noiseMap=constants["Noise"].astype(np.float32), minFrac=0)
cmCorrection.debug()
if corr_bools.get('pattern_class'):
# Pattern Classifier Calculator:
# Left Hemisphere:
patternClassifierLH = xcal.PatternClassifier([pixels_x, pixels_y//2],
constants["Noise"][:, :pixels_y//2],
split_evt_primary_threshold,
split_evt_secondary_threshold,
split_evt_mip_threshold,
tagFirstSingles=3, # track along y-axis, left to right (see
nCells=memoryCells, # split_event.py file in pydetlib/lib/src/
cores=cpuCores, # XFELDetAna/algorithms)
allowElongated=False,
blockSize=[pixels_x, pixels_y//2],
runParallel=True)
# Right Hemisphere:
patternClassifierRH = xcal.PatternClassifier([pixels_x, pixels_y//2],
constants["Noise"][:, pixels_y//2:],
split_evt_primary_threshold,
split_evt_secondary_threshold,
split_evt_mip_threshold,
tagFirstSingles=4, # track along y-axis, right to left
nCells=memoryCells,
cores=cpuCores,
allowElongated=False,
blockSize=[pixels_x, pixels_y//2],
runParallel=True)
patternClassifierLH._imagesPerChunk = 500
patternClassifierRH._imagesPerChunk = 500
patternClassifierLH.debug()
patternClassifierRH.debug()
# Setting bad pixels:
patternClassifierLH.setBadPixelMask(constants["BadPixelsDark"][:, :pixels_y//2] != 0)
patternClassifierRH.setBadPixelMask(constants["BadPixelsDark"][:, pixels_y//2:] != 0)
```
%% Cell type:code id: tags:
``` python
#***************** Histogram Calculators ******************#
# Will contain uncorrected data:
histCalRaw = xcal.HistogramCalculator(sensorSize,
bins=bins,
range=bin_range,
nCells=memoryCells,
cores=cpuCores,
blockSize=blockSize)
histCalRaw.debug()
# Will contain offset corrected data:
histCalOffsetCor = xcal.HistogramCalculator(sensorSize,
bins=bins,
range=bin_range,
nCells=memoryCells,
cores=cpuCores,
blockSize=blockSize)
histCalOffsetCor.debug()
if corr_bools.get('common_mode'):
# Will contain common mode corrected data:
histCalCommonModeCor = xcal.HistogramCalculator(sensorSize,
bins=bins,
range=bin_range,
nCells=memoryCells,
cores=cpuCores,
blockSize=blockSize)
histCalCommonModeCor.debug()
# Will contain split events pattern data:
histCalPcorr = xcal.HistogramCalculator(sensorSize,
bins=bins,
range=bin_range,
nCells=memoryCells,
cores=cpuCores,
blockSize=blockSize)
histCalPcorr.debug()
# Will contain singles events data:
histCalPcorrS = xcal.HistogramCalculator(sensorSize,
bins=bins,
range=bin_range,
nCells=memoryCells,
cores=cpuCores,
blockSize=blockSize)
histCalPcorrS.debug()
if corr_bools.get('relgain'):
# Will contain gain corrected data:
histCalGainCor = xcal.HistogramCalculator(sensorSize,
bins=bins,
range=bin_range,
nCells=memoryCells,
cores=cpuCores,
blockSize=blockSize)
histCalGainCor.debug()
```
%% Cell type:markdown id: tags:
Applying offset and common mode corrections to the raw data
%% Cell type:code id: tags:
``` python
def copy_and_sanitize_non_cal_data(infile, outfile, h5base):
'''This function reads the .h5 data and writes the corrected .h5 data.'''
if h5base.startswith("/"):
h5base = h5base[1:]
dont_copy = ['image']
dont_copy = [h5base+"/{}".format(do) for do in dont_copy]
def visitor(k, item):
if k not in dont_copy:
if isinstance(item, h5py.Group):
outfile.create_group(k)
elif isinstance(item, h5py.Dataset):
group = str(k).split("/")
group = "/".join(group[:-1])
infile.copy(k, outfile[group])
infile.visititems(visitor)
```
%% Cell type:code id: tags:
``` python
# Data corrections and event classifications happen here. Also, the corrected data are written to datasets:
uncor_mean_im = None
uncor_single_im = None
offset_mean_im = None
offset_single_im = None
cm_mean_im = None
cm_single_im = None
gain_mean_im = None
mean_im_cc = None
single_im_cc = None
offsetMap = np.squeeze(constants["Offset"])
noiseMap = np.squeeze(constants["Noise"])
badPixelMap = np.squeeze(constants["BadPixelsDark"])
if corr_bools.get('relgain'):
relGain = constants["RelativeGain"]
for k, f in enumerate(file_list):
with h5py.File(f, 'r', driver='core') as infile:
out_fileb = "{}/{}".format(out_folder, f.split("/")[-1])
out_file = out_fileb.replace("RAW", "CORR")
data = None
noise = None
try:
with h5py.File(out_file, "w") as ofile:
copy_and_sanitize_non_cal_data(infile, ofile, h5path)
data = infile[h5path+"/image"][()]
nzidx = np.count_nonzero(data, axis=(1, 2))
data = data[nzidx != 0, ...]
if limit_images > 0:
data = data[:limit_images,...]
oshape = data.shape
data = np.moveaxis(data, 0, 2)
ddset = ofile.create_dataset(h5path+"/pixels",
oshape,
chunks=(chunk_size_idim, oshape[1], oshape[2]),
dtype=np.float32)
ddsetm = ofile.create_dataset(h5path+"/mask",
oshape,
chunks=(chunk_size_idim, oshape[1], oshape[2]),
dtype=np.uint32, compression="gzip")
if corr_bools.get('relgain'):
ddsetg = ofile.create_dataset(h5path+"/gain",
oshape,
chunks=(chunk_size_idim, oshape[1], oshape[2]),
dtype=np.float32, compression="gzip")
data = data.astype(np.float32)
# Creating maps for correction usage:
offset = np.repeat(offsetMap[...,None], data.shape[2], axis=2)
noise = np.repeat(noiseMap[...,None], data.shape[2], axis=2)
bpix = np.repeat(badPixelMap[...,None], data.shape[2], axis=2)
if corr_bools.get('relgain'):
rg = np.repeat(relGain[:,:,None], data.shape[2], axis=2) # rg = relative gain
# uncor_mean_im = averaged over all non-corrected images (in the first sequence only)
if uncor_mean_im is None:
uncor_mean_im = np.nanmean(data, axis=2)
uncor_single_im = data[...,0] # The non-corrected image corresponding to the first frame
histCalRaw.fill(data) # filling histogram with raw uncorrected data
# masking data for bad pixels and equating the values to np.nan so that the pattern classifier
# ignores them:
data[bpix != 0] = np.nan
data -= offset # offset correction
histCalOffsetCor.fill(data) # filling histogram with offset corrected data
ddset[...] = np.moveaxis(data, 2, 0)
ddsetm[...] = np.moveaxis(bpix, 2, 0)
ofile.flush()
# mean_image = averaged over all offset corrected images (in the first sequence only)
if offset_mean_im is None:
offset_mean_im = np.nanmean(data, axis=2)
offset_single_im = data[...,0] # The offset corrected image corresponding to the first frame
if corr_bools.get('relgain'):
data /= rg # relative gain correction
histCalGainCor.fill(data) # filling histogram with gain corrected data
ddsetg[...] = np.moveaxis(rg, 2, 0).astype(np.float32)
# gain_mean_image = averaged over gain corrected images (in the first sequence only)
if gain_mean_im is None:
gain_mean_im = np.nanmean(data, axis=2)
gain_single_im = data[...,0] # The gain corrected image corresponding to the first frame
if corr_bools.get('pattern_class'):
# cm: common mode, c: classifications, p: even patterns
if corr_bools.get('common_mode'):
ddsetcm = ofile.create_dataset(h5path+"/pixels_cm",
oshape,
chunks=(chunk_size_idim, oshape[1], oshape[2]),
dtype=np.float32)
ddsetc = ofile.create_dataset(h5path+"/pixels_classified",
oshape,
chunks=(chunk_size_idim, oshape[1], oshape[2]),
dtype=np.float32, compression="gzip")
ddsetp = ofile.create_dataset(h5path+"/patterns",
oshape,
chunks=(chunk_size_idim, oshape[1], oshape[2]),
dtype=np.int32, compression="gzip")
# The calculation of the cluster map:
patternClassifierLH._noisemap = noise[:, :pixels_x//2, :]
patternClassifierRH._noisemap = noise[:, pixels_x//2:, :]
if corr_bools.get('common_mode'):
data = cmCorrection.correct(data.astype(np.float32), # common mode correction
cellTable=np.zeros(data.shape[2], np.int32))
histCalCommonModeCor.fill(data) # filling histogram with common mode corrected data
if cm_mean_im is None:
cm_mean_im = np.nanmean(data, axis=2)
cm_single_im = data[...,0] # The common mode corrected image corresponding to the first frame
ddsetcm[...] = np.moveaxis(data, 2, 0)
# Dividing the data into left and right hemispheres:
dataLH = data[:, :pixels_x//2, :]
dataRH = data[:, pixels_x//2:, :]
dataLH, patternsLH = patternClassifierLH.classify(dataLH) # pattern classification on corrected data
dataRH, patternsRH = patternClassifierRH.classify(dataRH)
data[:, :pixels_x//2, :] = dataLH
data[:, pixels_x//2:, :] = dataRH
patterns = np.zeros(data.shape, patternsLH.dtype)
patterns[:, :pixels_x//2, :] = patternsLH
patterns[:, pixels_x//2:, :] = patternsRH
data[data < split_evt_primary_threshold*noise] = 0
ddsetc[...] = np.moveaxis(data, 2, 0)
ddsetp[...] = np.moveaxis(patterns, 2, 0)
histCalPcorr.fill(data) # filling histogram with split events corrected data
data[patterns != 100] = np.nan # Discard doubles, triples, quadruple, clusters, first singles
histCalPcorrS.fill(data) # filling histogram with singles events data
# mean_im_cc = averaged over all pattern classified corrected images
# (in the first sequence only)
if mean_im_cc is None:
mean_im_cc = np.nanmean(data, axis=2)
single_im_cc = data[...,0] # The final corrected image corresponding to the first frame
except Exception as e:
print(f"Couldn't calibrate data in {f}: {e}\n")
print("In addition to offset correction, the following corrections were performed:")
for k, v in corr_bools.items():
if v:
print(k)
```
%% Cell type:code id: tags:
``` python
# Histograming the resulting spectra:
# First _ refers to the bin edges and second _ refers to statistics and we ignore both.
# if you use histCalRaw.get(cumulatative = True) and so on, the cumulatative = True turns the counts array such as
# RawHistVals and so on into a 1D array instead of keeping the original shape:
RawHistVals, _, RawHistMids, _ = histCalRaw.get()
off_cor_HistVals, _, off_cor_HistMids, _ = histCalOffsetCor.get()
if corr_bools.get('common_mode'):
cm_cor_HistVals, _, cm_HistMids, _ = histCalCommonModeCor.get()
if corr_bools.get('relgain'):
gain_cor_HistVals, _, gain_cor_HistMids, _ = histCalGainCor.get()
split_HistVals, _, split_HistMids, _ = histCalPcorr.get() # split events corrected
singles_HistVals, _, singles_HistMids, _ = histCalPcorrS.get() # last s in variable names: singles events
```
%% Cell type:code id: tags:
``` python
# Saving intermediate data to disk:
np.savez(os.path.join(out_folder, 'Raw_Events.npz'), RawHistMids, RawHistVals)
np.savez(os.path.join(out_folder, 'Offset_Corrected_Events.npz'), off_cor_HistMids, off_cor_HistVals)
if corr_bools.get('common_mode'):
np.savez(os.path.join(out_folder, 'Common_Mode_Corrected_Events.npz'), cm_HistMids, cm_cor_HistVals)
if corr_bools.get('relgain'):
np.savez(os.path.join(out_folder, 'Gain_Corrected_Events.npz'), gain_cor_HistMids, gain_cor_HistVals)
np.savez(os.path.join(out_folder, 'Split_Events.npz'), split_HistMids, split_HistVals)
np.savez(os.path.join(out_folder, 'Singles_Events.npz'), singles_HistMids, singles_HistVals)
print("Various spectra are saved to disk in the form of histograms. Please check {}".format(out_folder))
```
%% Cell type:code id: tags:
``` python
# depending on the gain, the ranges of data are different, so we set a few parameters so that the plots always look
# good.
if gain == 1:
x_range = (0, 30000)
elif gain == 64:
x_range = (0, 1000)
```
%% Cell type:code id: tags:
``` python
display(Markdown('### Raw vs. Corrected Spectra'))
figure = [{'x': RawHistMids,
'y': RawHistVals,
'y_err': np.sqrt(RawHistVals[:]),
'drawstyle': 'steps-post',
'errorstyle': 'bars',
'errorcoarsing': 2,
'label': 'Uncorrected'
},
{'x': off_cor_HistMids,
'y': off_cor_HistVals,
'y_err': np.sqrt(off_cor_HistVals[:]),
'drawstyle': 'steps-post',
'errorstyle': 'bars',
'errorcoarsing': 2,
'label': 'Offset Corrected'
}]
if corr_bools.get('common_mode'):
figure.append({'x': cm_HistMids,
'y': cm_cor_HistVals,
'y_err': np.sqrt(cm_cor_HistVals[:]),
'drawstyle': 'steps-post',
'errorstyle': 'bars',
'errorcoarsing': 2,
'label': 'Common Mode Corrected'})
if corr_bools.get('relgain'):
xrange = (cti_limit_low, cti_limit_high)
figure.append({'x': gain_cor_HistMids,
'y': gain_cor_HistVals,
'y_err': np.sqrt(gain_cor_HistVals[:]),
'drawstyle': 'steps-post',
'errorstyle': 'bars',
'errorcoarsing': 2,
'label': 'Gain Corrected'})
else:
xrange = x_range
if corr_bools.get('pattern_class'):
figure.extend([{'x': split_HistMids,
'y': split_HistVals,
'y_err': np.sqrt(split_HistVals[:]),
'drawstyle': 'steps-post',
'errorstyle': 'bars',
'errorcoarsing': 2,
'label': 'Split Events Corrected'
},
{'x': singles_HistMids,
'y': singles_HistVals,
'y_err': np.sqrt(singles_HistVals[:]),
'drawstyle': 'steps-post',
'errorstyle': 'bars',
'errorcoarsing': 2,
'label': 'Singles Events'
}])
fig = xana.simplePlot(figure, aspect=1, x_label='ADU', y_label='Number of Occurrences', figsize='2col',
y_log=True, x_range=x_range, title = '1 ADU per bin is used.',
legend='top-right-frame-1col')
```
%% Cell type:code id: tags:
``` python
# This function plots pattern statistics:
def classification_plot(patternStats, hemisphere):
print("****************** {} HEMISPHERE ******************\n"
.format(hemisphere))
fig = plt.figure(figsize=(15, 15))
ax = fig.add_subplot(4, 4, 1)
sfields = ["singles", "first singles", "clusters"]
mfields = ["doubles", "triples", "quads"]
relativeOccurances = []
labels = []
for i, f in enumerate(sfields):
relativeOccurances.append(patternStats[f])
labels.append(f)
for i, f in enumerate(mfields):
for k in range(len(patternStats[f])):
relativeOccurances.append(patternStats[f][k])
labels.append("{}({})".format(f, k))
relativeOccurances = np.array(relativeOccurances, np.float)
relativeOccurances /= np.sum(relativeOccurances)
pie = ax.pie(relativeOccurances, labels=labels, autopct='%1.1f%%', shadow=True)
ax.set_title("Pattern Occurrence")
# Set aspect ratio to be equal so that pie is drawn as a circle.
a = ax.axis('equal')
smaps = ["singlemap", "firstsinglemap", "clustermap"]
for i, m in enumerate(smaps):
ax = fig.add_subplot(4, 4, 2+i)
pmap = ax.imshow(patternStats[m], interpolation="nearest", vmax=2*np.nanmedian(patternStats[m]))
ax.set_title(m)
cb = fig.colorbar(pmap)
mmaps = ["doublemap", "triplemap", "quadmap"]
k = 0
for i, m in enumerate(mmaps):
for j in range(4):
ax = fig.add_subplot(4, 4, 2+len(smaps)+k)
pmap = ax.imshow(patternStats[m][j], interpolation="nearest", vmax=2*np.median(patternStats[m][j]))
ax.set_title("{}({})".format(m,j))
cb = fig.colorbar(pmap)
k+=1
```
%% Cell type:code id: tags:
``` python
# The next two cells plot the classification results for left and right hemispheres, respectively:
display(Markdown('### Classification Results - Plots'))
if corr_bools.get('pattern_class'):
patternStatsLH = patternClassifierLH.getPatternStats()
classification_plot(patternStatsLH, 'Left')
```
%% Cell type:code id: tags:
``` python
if corr_bools.get('pattern_class'):
patternStatsRH = patternClassifierRH.getPatternStats()
classification_plot(patternStatsRH, 'Right')
```
%% Cell type:code id: tags:
``` python
display(Markdown('### Classification Results - Tabulated Statistics'))
if corr_bools.get('pattern_class'):
t0 = PrettyTable()
t0.title = "Total Number of Counts after All Corrections"
t0.field_names = ["Hemisphere", "Singles", "First-Singles", "Clusters"]
t0.add_row(["LH", patternStatsLH['singles'], patternStatsLH['first singles'], patternStatsLH['clusters']])
t0.add_row(["RH", patternStatsRH['singles'], patternStatsRH['first singles'], patternStatsRH['clusters']])
print(t0)
print("Abbreviations: D (Doubles), T (Triples), Q (Quadruples), L (Left), R (Right), and H (Hemisphere).")
t1 = PrettyTable()
t1.field_names = ["Index", "D-LH", "D-RH", "T-LH", "T-RH", "Q-LH", "Q-RH"]
t1.add_row([0, patternStatsLH['doubles'][0], patternStatsRH['doubles'][0], patternStatsLH['triples'][0],
patternStatsRH['triples'][0], patternStatsLH['quads'][0], patternStatsRH['quads'][0]])
t1.add_row([1, patternStatsLH['doubles'][1], patternStatsRH['doubles'][1], patternStatsLH['triples'][1],
patternStatsRH['triples'][1], patternStatsLH['quads'][1], patternStatsRH['quads'][1]])
t1.add_row([2, patternStatsLH['doubles'][2], patternStatsRH['doubles'][2], patternStatsLH['triples'][2],
patternStatsRH['triples'][2], patternStatsLH['quads'][2], patternStatsRH['quads'][2]])
t1.add_row([3, patternStatsLH['doubles'][3], patternStatsRH['doubles'][3], patternStatsLH['triples'][3],
patternStatsRH['triples'][3], patternStatsLH['quads'][3], patternStatsRH['quads'][3]])
print(t1)
```
%% Cell type:code id: tags:
``` python
if corr_bools.get('pattern_class'):
doublesLH = patternStatsLH['doubles'][0] + patternStatsLH['doubles'][1] + patternStatsLH['doubles'][2] + \
patternStatsLH['doubles'][3]
triplesLH = patternStatsLH['triples'][0] + patternStatsLH['triples'][1] + patternStatsLH['triples'][2] + \
patternStatsLH['triples'][3]
quadsLH = patternStatsLH['quads'][0] + patternStatsLH['quads'][1] + patternStatsLH['quads'][2] + \
patternStatsLH['quads'][3]
allsinglesLH = patternStatsLH['singles'] + patternStatsLH['first singles']
eventsLH = allsinglesLH + doublesLH + triplesLH + quadsLH
doublesRH = patternStatsRH['doubles'][0] + patternStatsRH['doubles'][1] + patternStatsRH['doubles'][2] + \
patternStatsRH['doubles'][3]
triplesRH = patternStatsRH['triples'][0] + patternStatsRH['triples'][1] + patternStatsRH['triples'][2] + \
patternStatsRH['triples'][3]
quadsRH = patternStatsRH['quads'][0] + patternStatsRH['quads'][1] + patternStatsRH['quads'][2] + \
patternStatsRH['quads'][3]
allsinglesRH = patternStatsRH['singles'] + patternStatsRH['first singles']
eventsRH = allsinglesRH + doublesRH + triplesRH + quadsRH
if eventsLH > 0.:
reloccurLH = np.array([allsinglesLH/eventsLH, doublesLH/eventsLH, triplesLH/eventsLH, quadsLH/eventsLH])
else:
reloccurLH = np.array([0]*4)
if eventsRH > 0.:
reloccurRH = np.array([allsinglesRH/eventsRH, doublesRH/eventsRH, triplesRH/eventsRH, quadsRH/eventsRH])
else:
reloccurRH = np.array([0]*4)
```
%% Cell type:code id: tags:
``` python
display(Markdown('### Classification Results - Pie Charts'))
if corr_bools.get('pattern_class'):
fig = plt.figure(figsize=(12, 7))
ax = fig.add_subplot(1, 2, 1)
labels = ['Singles', 'Doubles', 'Triples', 'Quads']
pie = ax.pie(reloccurLH, labels=labels, autopct='%1.1f%%', shadow=True)
ax.set_title("Pattern Occurrence in LH")
# Set aspect ratio to be equal so that pie is drawn as a circle.
a = ax.axis('equal')
ax = fig.add_subplot(1, 2, 2)
pie = ax.pie(reloccurRH, labels=labels, autopct='%1.1f%%', shadow=True)
ax.set_title("Pattern Occurrence in RH")
# Set aspect ratio to be equal so that pie is drawn as a circle.
a = ax.axis('equal')
```
%% Cell type:markdown id: tags:
### Mean Images of the First Sequence ###
%% Cell type:code id: tags:
``` python
fig = xana.heatmapPlot(uncor_mean_im, x_label='Columns', y_label='Rows', lut_label='Signal (ADU)', aspect=1,
x_range=(0, pixels_y), y_range=(0, pixels_x),
panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)',
title = 'Uncorrected Image Averaged over the First Sequence')
fig = xana.heatmapPlot(offset_mean_im, x_label='Columns', y_label='Rows', lut_label='Signal (ADU)', aspect=1,
x_range=(0, pixels_y), y_range=(0, pixels_x),
panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)',
title = 'Offset Corrected Image Averaged over the First Sequence')
if corr_bools.get('relgain'):
fig = xana.heatmapPlot(gain_mean_im, x_label='Columns', y_label='Rows',
lut_label='Gain Corrected Signal (ADU)', aspect=1,
x_range=(0, pixels_y), y_range=(0, pixels_x),
panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)',
title = 'Mean Gain Corrected Image of the First Sequence')
if corr_bools.get('pattern_class') and corr_bools.get('common_mode'):
fig = xana.heatmapPlot(cm_mean_im, x_label='Columns', y_label='Rows', lut_label='Signal (ADU)', aspect=1,
x_range=(0, pixels_y), y_range=(0, pixels_x),
panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)',
title = 'Common Mode Corrected Image Averaged over the First Sequence')
fig = xana.heatmapPlot(mean_im_cc, x_label='Columns', y_label='Rows', lut_label='Signal (ADU)', aspect=1,
x_range=(0, pixels_y), y_range=(0, pixels_x),
title = 'Image after All Corrections Averaged over the First Sequence')
```
%% Cell type:markdown id: tags:
### Images of the First Frame of the First Sequence ###
%% Cell type:code id: tags:
``` python
fig = xana.heatmapPlot(uncor_single_im, x_label='Columns', y_label='Rows', lut_label='Signal (ADU)', aspect=1,
x_range=(0, pixels_y), y_range=(0, pixels_x),
panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)',
title = 'Uncorrected Image (First Frame of First Sequence)')
fig = xana.heatmapPlot(offset_single_im, x_label='Columns', y_label='Rows', lut_label='Signal (ADU)', aspect=1,
x_range=(0, pixels_y), y_range=(0, pixels_x),
panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)',
title = 'Offset Corrected Image (First Frame of First Sequence)')
if corr_bools.get('pattern_class'):
if corr_bools.get('common_mode'):
fig = xana.heatmapPlot(cm_single_im, x_label='Columns', y_label='Rows', lut_label='Signal (ADU)', aspect=1,
x_range=(0, pixels_y), y_range=(0, pixels_x),
panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)',
title = 'Common Mode Corrected Image (First Frame of First Sequence)')
fig = xana.heatmapPlot(single_im_cc, x_label='Columns', y_label='Rows', lut_label='Signal (ADU)', aspect=1,
x_range=(0, pixels_y), y_range=(0, pixels_x),
panel_x_label='Row Stat (ADU)', panel_y_label='Column Stat (ADU)',
title = 'Image after All Corrections (First Frame of First Sequence)')
```
%% Cell type:code id: tags:
``` python
# Resetting the histogram calculators:
histCalRaw.reset()
histCalOffsetCor.reset()
histCalPcorr.reset()
histCalPcorrS.reset()
if corr_bools.get('common_mode'):
histCalCommonModeCor.reset()
if corr_bools.get('relgain'):
histCalGainCor.reset()
```
%% Cell type:markdown id: tags:
Next, the corrected event patterns are read from the patterns/ dataset created previously and are separated into 4 different categories (singles, doubles, triples and quadruples) using the pattern indices. However, this is done only for one sequence, corresponding to the seq_num variable, as an example.
Note that the number of bins and the bin range for the following histograms may be different from those presented above (depending on gain) to make the counts more noticible and the peaks more defined.
If you are interested in plotting the events from all sequences or the spectra of half of the sensor, execute the spectra_pnCCD_NBC.ipynb notebook.
%% Cell type:code id: tags:
``` python
if corr_bools.get('pattern_class'):
singles = []
doubles = []
triples = []
quads = []
with h5py.File("{}/CORR-R{:04d}-PNCCD01-S{:05d}.h5".format(out_folder, run, sequences[seq_num]), 'r',
driver='core') as infile:
data = infile[h5path+"/pixels_classified"][()].astype(np.float32) # classifications
patterns = infile[h5path+"/patterns"][()].astype(np.float32) # event patterns
# events' patterns indices are as follows: 100 (singles), 101 (first singles), 200 - 203 (doubles),
# 300 - 303 (triples), and 400 - 403 (quadruples). Note that for the last three types of patterns,
# there are left, right, up, and down indices.
# Separating the events:
# Singles and First Singles:
for s in range(100, 102):
single = copy.copy(data[...])
single[patterns != s] = np.nan
singles.append(single)
for d in range(200, 204):
double = copy.copy(data[...])
double[patterns != d] = np.nan
doubles.append(double)
for t in range(300, 304):
triple = copy.copy(data[...])
triple[patterns != t] = np.nan
triples.append(triple)
for q in range(400, 404):
quad = copy.copy(data[...])
quad[patterns != q] = np.nan
quads.append(quad)
```
%% Cell type:code id: tags:
``` python
if corr_bools.get('pattern_class'):
hA = 0
h = 0
for single in singles:
hs, e = np.histogram(single.flatten(), bins=event_bins, range=b_range) # h: histogram counts, e: bin edges
h += hs
hA += hs # hA: counts all events (see below)
# bin edges array has one extra element => need to plot from 0 to the one before the last element to have the
# same size as h-array => in what follows, we use e[:-1] (-1 means one before the last element)
#TODO adapt the title depending on corrections
display(Markdown('### Histograms of Offset, Common Mode and Gain Corrected Events for One Sequence Only'))
fig = plt.figure(figsize=(10, 7))
ax = fig.add_subplot(111)
ax.step(e[:-1], h, color='blue', label='Events Involving Single Pixels Only')
ax.semilogy() # y-axis is log, x-axis is linear
ax.set_xlabel("Energy (ADU) [{} bins per {} ADU.]".format(event_bins, b_range[1]-b_range[0]))
ax.set_ylabel("Gain Corrected Events (counts) for One Sequence")
ax.set_xlim(x_range)
h = 0
for double in doubles:
hd, e = np.histogram(double.flatten(), bins=event_bins, range=b_range)
h += hd
hA += hd
ax.step(e[:-1], h, color='red', label='Events Splitting on Double Pixels')
h = 0
for triple in triples:
ht, e = np.histogram(triple.flatten(), bins=event_bins, range=b_range)
h += ht
hA += ht
ax.step(e[:-1], h, color='green', label='Events Splitting on Triple Pixels')
h = 0
for quad in quads:
hq, e = np.histogram(quad.flatten(), bins=event_bins, range=b_range)
h += hq
hA += hq
ax.step(e[:-1], h, color='purple', label='Events Splitting on Quadruple Pixels')
ax.step(e[:-1], hA, color='grey', label='All Valid Events')
l = ax.legend()
```
%% Cell type:code id: tags:
``` python
```
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment