Skip to content
Snippets Groups Projects
Commit fcf8a69a authored by Cyril Danilevski's avatar Cyril Danilevski :scooter:
Browse files

[ePix100][CORRECT] Cleanup and refactor ePix100 correction notebook

Main refactors are:
1- fstrings.  
2- Removing unused variables and imports.  
3- Remove code duplication for retrieving constants.  
4- Using pathlib.  
No changes have been done to the notebook functionality.  

See merge request detectors/pycalibration!477
parents 29b152d5 d012703f
No related branches found
No related tags found
1 merge request!477[ePix100][CORRECT] Cleanup and refactor ePix100 correction notebook
%% Cell type:markdown id: tags:
# ePIX Data Correction ##
Authors: Q. Tian S. Hauf, Version 1.0
The following notebook provides Offset correction of images acquired with the ePix100 detector.
%% Cell type:code id: tags:
``` python
cluster_profile = "noDB" # ipcluster profile to use
in_folder = "/gpfs/exfel/exp/MID/201930/p900071/raw" # input folder, required
out_folder = '/gpfs/exfel/data/scratch/karnem/test/' # output folder, required
sequences = [-1] # sequences to correct, set to -1 for all, range allowed
run = 466 # which run to read data from, required
karabo_id = "MID_EXP_EPIX-1" # karabo karabo_id
karabo_da = "DA01" # data aggregators
receiver_id = "RECEIVER" # inset for receiver devices
path_template = 'RAW-R{:04d}-{}-S{{:05d}}.h5' # the template to use to access data
h5path = '/INSTRUMENT/{}/DET/{}:daqOutput/data/image' # path in the HDF5 file to images
cluster_profile = "noDB" # ipcluster profile to use
in_folder = "/gpfs/exfel/exp/CALLAB/202031/p900113/raw" # input folder, required
out_folder = "" # output folder, required
sequences = [-1] # sequences to correct, set to -1 for all, range allowed
run = 9988 # which run to read data from, required
karabo_id = "MID_EXP_EPIX-1" # karabo karabo_id
karabo_da = "EPIX01" # data aggregators
db_module = "ePix100_M15" # module id in the database
receiver_id = "RECEIVER" # inset for receiver devices
path_template = 'RAW-R{:04d}-{}-S{{:05d}}.h5' # the template to use to access data
h5path = '/INSTRUMENT/{}/DET/{}:daqOutput/data/image' # path in the HDF5 file to images
h5path_t = '/INSTRUMENT/{}/DET/{}:daqOutput/data/backTemp' # path to find temperature at
h5path_cntrl = '/CONTROL/{}/DET' # path to control data
use_dir_creation_date = True # date constants injected before directory creation time
cal_db_interface = "tcp://max-exfl016:8015#8025" # calibration DB interface to use
cal_db_timeout = 300000 # timeout on caldb requests
cpuCores = 4 # Specifies the number of running cpu cores
chunk_size_idim = 1 # H5 chunking size of output data
overwrite = True # overwrite output folder
limit_images = 0 # process only first N images, 0 - process all
db_module = "ePix100_M15" # module id in the database
sequences_per_node = 1 # number of sequence files per cluster node if run as slurm job, set to 0 to not run SLURM parallel
bias_voltage = 200 # bias voltage
in_vacuum = False # detector operated in vacuum
fix_temperature = 290. # fix temperature to this value
gain_photon_energy = 9.0 # Photon energy used for gain calibration
photon_energy = 8.0 # Photon energy to calibrate in number of photons, 0 for calibration in keV
no_relative_gain = False # do not do gain correction
split_evt_primary_threshold = 7. # primary threshold for split event correction
split_evt_secondary_threshold = 5. # secondary threshold for split event correction
split_evt_mip_threshold = 1000. # minimum ionizing particle threshold
use_dir_creation_date = True # date constants injected before directory creation time
cal_db_interface = "tcp://max-exfl016:8015#8025" # calibration DB interface to use
cal_db_timeout = 300000 # timeout on caldb requests
cpuCores = 4 # Specifies the number of running cpu cores
chunk_size_idim = 1 # H5 chunking size of output data
overwrite = True # overwrite output folder
limit_images = 0 # process only first N images, 0 - process all
sequences_per_node = 1 # number of sequence files per cluster node if run as slurm job, set to 0 to not run SLURM parallel
bias_voltage = 200 # bias voltage
in_vacuum = False # detector operated in vacuum
fix_temperature = 290. # fix temperature to this value
gain_photon_energy = 9.0 # Photon energy used for gain calibration
photon_energy = 8.0 # Photon energy to calibrate in number of photons, 0 for calibration in keV
relative_gain = False # Apply relative gain correction.
split_evt_primary_threshold = 7. # primary threshold for split event correction
split_evt_secondary_threshold = 5. # secondary threshold for split event correction
split_evt_mip_threshold = 1000. # minimum ionizing particle threshold
def balance_sequences(in_folder, run, sequences, sequences_per_node, karabo_da):
from xfel_calibrate.calibrate import balance_sequences as bs
return bs(in_folder, run, sequences, sequences_per_node, karabo_da)
```
%% Cell type:code id: tags:
``` python
import XFELDetAna.xfelprofiler as xprof
profiler = xprof.Profiler()
profiler.disable()
from XFELDetAna.util import env
env.iprofile = cluster_profile
import tabulate
import warnings
warnings.filterwarnings('ignore')
import h5py
import numpy as np
from IPython.display import Latex, display
from pathlib import Path
import XFELDetAna.xfelprofiler as xprof
from XFELDetAna import xfelpyanatools as xana
from XFELDetAna import xfelpycaltools as xcal
from XFELDetAna.plotting.util import prettyPlotting
from XFELDetAna.util import env
from cal_tools.tools import (
get_constant_from_db,
get_dir_creation_date,
)
from iCalibrationDB import (
Conditions,
Constants,
)
warnings.filterwarnings('ignore')
prettyPlotting=True
import copy
import os
import time
prettyPlotting = True
import h5py
import numpy as np
from cal_tools.tools import get_constant_from_db, get_dir_creation_date
from iCalibrationDB import Conditions, ConstantMetaData, Constants, Detectors, Versions
from iCalibrationDB.detectors import DetectorTypes
from XFELDetAna.detectors.fastccd import readerh5 as fastccdreaderh5
from XFELDetAna.xfelreaders import ChunkReader
profiler = xprof.Profiler()
profiler.disable()
env.iprofile = cluster_profile
%matplotlib inline
```
if sequences[0] == -1:
sequences = None
%% Cell type:code id: tags:
``` python
# TODO: expose to first cell after fixing common mode correction.
common_mode = False # Apply common mode correction.
h5path = h5path.format(karabo_id, receiver_id)
h5path_t = h5path_t.format(karabo_id, receiver_id)
h5path_cntrl = h5path_cntrl.format(karabo_id)
plot_unit = 'ADU'
if not no_relative_gain:
if relative_gain:
plot_unit = 'keV'
if photon_energy>0:
plot_unit = '$\gamma$'
if photon_energy > 0:
plot_unit = '$\gamma$'
```
%% Cell type:code id: tags:
``` python
x = 708 # rows of the ePix100
y = 768 # columns of the ePix100
ped_dir = "{}/r{:04d}".format(in_folder, run)
in_folder = Path(in_folder)
ped_dir = in_folder / f"r{run:04d}"
fp_name = path_template.format(run, karabo_da)
fp_path = '{}/{}'.format(ped_dir, fp_name)
print("Reading from: {}".format(fp_path))
print("Run is: {}".format(run))
print("HDF5 path: {}".format(h5path))
print("Data is output to: {}".format(out_folder))
import datetime
print(f"Reading from: {ped_dir / fp_name}")
print(f"Run is: {run}")
print(f"HDF5 path: {h5path}")
print(f"Data is output to: {out_folder}")
creation_time = None
if use_dir_creation_date:
creation_time = get_dir_creation_date(in_folder, run)
if creation_time:
print("Using {} as creation time".format(creation_time.isoformat()))
print(f"Using {creation_time.isoformat()} as creation time")
```
%% Cell type:code id: tags:
``` python
sensorSize = [x, y]
chunkSize = 100 #Number of images to read per chunk
blockSize = [sensorSize[0]//2, sensorSize[1]//2] # Sensor area will be analysed according to blocksize
chunkSize = 100 # Number of images to read per chunk
# Sensor area will be analysed according to blocksize
blockSize = [sensorSize[0]//2, sensorSize[1]//2]
xcal.defaultBlockSize = blockSize
memoryCells = 1 # ePIX has no memory cell
memoryCells = 1 # ePIX has no memory cells
run_parallel = True
filename = fp_path.format(sequences[0] if sequences else 0)
# Read slow data from the first available sequence file.
filename = ped_dir / fp_name.format(sequences[0] if sequences[0] != -1 else 0)
with h5py.File(filename, 'r') as f:
integration_time = int(f['{}/CONTROL/expTime/value'.format(h5path_cntrl)][0])
temperature = np.mean(f[h5path_t])/100.
integration_time = int(f[f"{h5path_cntrl}/CONTROL/expTime/value"][0])
temperature = np.mean(f[h5path_t]) / 100.
temperature_k = temperature + 273.15
if fix_temperature != 0:
temperature_k = fix_temperature
print("Temperature is fixed!")
print("Bias voltage is {} V".format(bias_voltage))
print("Detector integration time is set to {}".format(integration_time))
print("Mean temperature was {:0.2f} °C / {:0.2f} K at beginning of run".format(temperature, temperature_k))
print("Operated in vacuum: {} ".format(in_vacuum))
if not os.path.exists(out_folder):
os.makedirs(out_folder)
elif not overwrite:
print(f"Bias voltage is {bias_voltage} V")
print(f"Detector integration time is set to {integration_time}")
print(
f"Mean temperature was {temperature:0.2f} °C "
f"/ {temperature_k:0.2f} K at beginning of run"
)
print(f"Operated in vacuum: {in_vacuum} ")
out_folder = Path(out_folder)
if out_folder.is_dir() and not overwrite:
raise AttributeError("Output path exists! Exiting")
out_folder.mkdir(parents=True, exist_ok=True)
```
%% Cell type:code id: tags:
``` python
dirlist = sorted(os.listdir(ped_dir))
file_list = []
total_sequences = 0
fsequences = []
for entry in dirlist:
#only h5 file
abs_entry = "{}/{}".format(ped_dir, entry)
if os.path.isfile(abs_entry) and os.path.splitext(abs_entry)[1] == ".h5":
if sequences is None:
for seq in range(len(dirlist)):
if path_template.format(run, karabo_da).format(seq) in abs_entry:
file_list.append(abs_entry)
total_sequences += 1
fsequences.append(seq)
else:
for seq in sequences:
if path_template.format(run, karabo_da).format(seq) in abs_entry:
file_list.append(os.path.abspath(abs_entry))
total_sequences += 1
fsequences.append(seq)
sequences = fsequences
# Glob the right *.h5 fast data files.
seq_files = sorted(ped_dir.glob(f"*{karabo_da}*.h5"))
# If a set of sequences requested to correct,
# adapt seq_files list.
if sequences != [-1]:
seq_files = [f for f in seq_files
if any(f.match(f"*-S{s:05d}.h5") for s in sequences)]
print(f"Processing a total of {len(seq_files)} sequence files")
```
%% Cell type:code id: tags:
``` python
import tabulate
from IPython.display import HTML, Latex, Markdown, display
# Table of sequence files to process
table = [(k, f) for k, f in enumerate(seq_files)]
print("Processing a total of {} sequence files".format(total_sequences))
table = []
for k, f in enumerate(file_list):
table.append((k, f))
if len(table):
md = display(Latex(tabulate.tabulate(table, tablefmt='latex', headers=["#", "file"])))
md = display(Latex(tabulate.tabulate(
table,
tablefmt='latex',
headers=["#", "file"]
)))
```
%% Cell type:markdown id: tags:
As a first step, dark maps have to be loaded.
%% Cell type:code id: tags:
``` python
dclass="ePix100"
const_name = "Offset"
offsetMap = None
temp_limits = 5.
# Offset
det = getattr(Detectors, db_module)
dconstants = getattr(Constants, dclass)
condition = Conditions.Dark.ePix100(bias_voltage=bias_voltage,
integration_time=integration_time,
temperature=temperature_k,
in_vacuum=in_vacuum)
for parm in condition.parameters:
cond_dict = {
"bias_voltage": bias_voltage,
"integration_time": integration_time,
"temperature": temperature_k,
"in_vacuum": in_vacuum,
}
dark_condition = Conditions.Dark.ePix100(**cond_dict)
# update conditions with illuminated conditins.
cond_dict.update({
"photon_energy": gain_photon_energy
})
illum_condition = Conditions.Illuminated.ePix100(**cond_dict)
const_cond = {
"Offset": dark_condition,
"Noise": dark_condition,
"RelativeGain": illum_condition,
}
const_data = dict()
for cname, condition in const_cond.items():
if cname == "RelativeGain" and not relative_gain:
continue
# TODO: Fix this logic.
for parm in condition.parameters:
if parm.name == "Sensor Temperature":
parm.lower_deviation = temp_limits
parm.upper_deviation = temp_limits
offsetMap = get_constant_from_db(karabo_id, karabo_da,
getattr(dconstants, const_name)(),
condition,
None,
cal_db_interface,
creation_time=creation_time,
print_once=2,
timeout=cal_db_timeout)
# Noise
const_name = "Noise"
condition = Conditions.Dark.ePix100(bias_voltage=bias_voltage,
integration_time=integration_time,
temperature=temperature_k,
in_vacuum=in_vacuum)
noiseMap = get_constant_from_db(karabo_id, karabo_da,
getattr(dconstants, const_name)(),
condition,
None,
cal_db_interface,
creation_time=creation_time,
print_once=2,
timeout=cal_db_timeout)
# Gain
if not no_relative_gain:
const_name = "RelativeGain"
condition = Conditions.Illuminated.ePix100(photon_energy=gain_photon_energy,
bias_voltage=bias_voltage,
integration_time=integration_time,
temperature=temperature_k,
in_vacuum=in_vacuum)
gainMap = get_constant_from_db(karabo_id, karabo_da,
getattr(dconstants, const_name)(),
condition,
None,
cal_db_interface,
creation_time=creation_time,
print_once=2,
timeout=cal_db_timeout)
if gainMap is None:
print("Gain map requested, but not found")
print("No gain correction will be applied")
no_relative_gain = True
plot_unit = 'ADU'
gainMap = np.ones(sensorSize, np.float32)
else:
gainMap = np.ones(sensorSize, np.float32)
const_data[cname] = get_constant_from_db(
karabo_id=karabo_id,
karabo_da=karabo_da,
constant=getattr(Constants.ePix100, cname)(),
condition=condition,
empty_constant=None,
cal_db_interface=cal_db_interface,
creation_time=creation_time,
print_once=2,
timeout=cal_db_timeout
)
if relative_gain and const_data["RelativeGain"] is None:
print(
"WARNING: RelativeGain map is requested, but not found./n"
"No gain correction will be applied"
)
relative_gain = False
plot_unit = 'ADU'
```
%% Cell type:code id: tags:
``` python
#************************Calculators************************#
offsetCorrection = xcal.OffsetCorrection(sensorSize,
offsetMap,
nCells = memoryCells,
cores=cpuCores, gains=None,
blockSize=blockSize,
parallel=run_parallel)
gainCorrection = xcal.RelativeGainCorrection(
sensorSize,
1. / gainMap[..., None],
nCells=memoryCells,
parallel=run_parallel,
cores=cpuCores,
blockSize=blockSize,
gains=None)
# ************************Calculators************************ #
offsetCorrection = xcal.OffsetCorrection(
sensorSize,
const_data["Offset"],
nCells=memoryCells,
cores=cpuCores,
gains=None,
blockSize=blockSize,
parallel=run_parallel
)
if relative_gain:
gainCorrection = xcal.RelativeGainCorrection(
sensorSize,
1./const_data["RelativeGain"][..., None],
nCells=memoryCells,
parallel=run_parallel,
cores=cpuCores,
blockSize=blockSize,
gains=None,
)
```
%% Cell type:code id: tags:
``` python
#*****************Histogram Calculators******************#
histCalOffsetCor = xcal.HistogramCalculator(sensorSize,
bins=1050,
range=[-50, 1000], parallel=run_parallel,
nCells=memoryCells,
cores=cpuCores,
blockSize=blockSize)
# *****************Histogram Calculators****************** #
histCalOffsetCor = xcal.HistogramCalculator(
sensorSize,
bins=1050,
range=[-50, 1000],
parallel=run_parallel,
nCells=memoryCells,
cores=cpuCores,
blockSize=blockSize
)
```
%% Cell type:markdown id: tags:
Applying corrections
%% Cell type:code id: tags:
``` python
histCalOffsetCor.debug()
offsetCorrection.debug()
gainCorrection.debug()
if relative_gain:
gainCorrection.debug()
```
%% Cell type:code id: tags:
``` python
#************************Calculators************************#
commonModeBlockSize = [x//2, y//2]
commonModeAxisR = 'row'
cmCorrection = xcal.CommonModeCorrection([x, y],
commonModeBlockSize,
commonModeAxisR,
nCells = memoryCells,
noiseMap = noiseMap,
runParallel=True,
stats=True)
patternClassifier = xcal.PatternClassifier([x, y],
noiseMap,
split_evt_primary_threshold,
split_evt_secondary_threshold,
split_evt_mip_threshold,
tagFirstSingles = 0,
nCells=memoryCells,
cores=cpuCores,
allowElongated = False,
blockSize=[x, y],
runParallel=True)
histCalCMCor = xcal.HistogramCalculator(sensorSize,
bins=1050,
range=[-50, 1000], parallel=run_parallel,
nCells=memoryCells,
cores=cpuCores,
blockSize=blockSize)
histCalSECor = xcal.HistogramCalculator(sensorSize,
bins=1050,
range=[-50, 1000], parallel=run_parallel,
nCells=memoryCells,
cores=cpuCores,
blockSize=blockSize)
# ************************Calculators************************ #
if common_mode:
commonModeBlockSize = [x//2, y//2]
commonModeAxisR = 'row'
cmCorrection = xcal.CommonModeCorrection(
[x, y],
commonModeBlockSize,
commonModeAxisR,
nCells=memoryCells,
noiseMap=const_data["Noise"],
runParallel=run_parallel,
stats=True,
)
histCalCMCor = xcal.HistogramCalculator(
sensorSize,
bins=1050,
range=[-50, 1000],
parallel=run_parallel,
nCells=memoryCells,
cores=cpuCores,
blockSize=blockSize,
)
patternClassifier = xcal.PatternClassifier(
[x, y],
const_data["Noise"],
split_evt_primary_threshold,
split_evt_secondary_threshold,
split_evt_mip_threshold,
tagFirstSingles=0,
nCells=memoryCells,
cores=cpuCores,
allowElongated=False,
blockSize=[x, y],
runParallel=run_parallel,
)
histCalSECor = xcal.HistogramCalculator(
sensorSize,
bins=1050,
range=[-50, 1000],
parallel=run_parallel,
nCells=memoryCells,
cores=cpuCores,
blockSize=blockSize,
)
```
%% Cell type:code id: tags:
``` python
cmCorrection.debug()
if common_mode:
cmCorrection.debug()
histCalCMCor.debug()
patternClassifier.debug()
histCalCMCor.debug()
histCalSECor.debug()
```
%% Cell type:code id: tags:
``` python
def copy_and_sanitize_non_cal_data(infile, outfile, h5base):
""" Copy and sanitize data in `infile` that is not touched by `correctEPIX`
"""
def copy_and_sanitize_non_cal_data(
infile: h5py,
outfile: h5py,
h5base: str
):
""" Copy and sanitize data in `infile`
that is not touched by `correctEPIX`. """
if h5base.startswith("/"):
h5base = h5base[1:]
dont_copy = ['pixels']
dont_copy = [h5base+"/{}".format(do)
for do in dont_copy]
dont_copy = [h5base+"/pixels"]
def visitor(k, item):
if k not in dont_copy:
if isinstance(item, h5py.Group):
outfile.create_group(k)
elif isinstance(item, h5py.Dataset):
group = str(k).split("/")
group = "/".join(group[:-1])
infile.copy(k, outfile[group])
infile.visititems(visitor)
```
%% Cell type:code id: tags:
``` python
for k, f in enumerate(file_list):
with h5py.File(f, 'r') as infile:
out_fileb = "{}/{}".format(out_folder, f.split("/")[-1])
out_file = out_fileb.replace("RAW", "CORR")
#out_filed = out_fileb.replace("RAW", "CORR-SC")
data = None
with h5py.File(out_file, "w") as ofile:
try:
copy_and_sanitize_non_cal_data(infile, ofile, h5path)
data = infile[h5path+"/pixels"][()]
data = np.compress(np.any(data>0, axis=(1,2)), data, axis=0)
if limit_images > 0:
data = data[:limit_images,...]
oshape = data.shape
data = np.moveaxis(data, 0, 2)
ddset = ofile.create_dataset(h5path+"/pixels",
oshape,
chunks=(chunk_size_idim, oshape[1], oshape[2]),
dtype=np.float32)
data = offsetCorrection.correct(data.astype(np.float32)) #correct for the offset
if not no_relative_gain:
data = gainCorrection.correct(data.astype(np.float32)) #correct for the gain
if photon_energy>0:
data /= photon_energy
histCalOffsetCor.fill(data)
ddset[...] = np.moveaxis(data, 2, 0)
except Exception as e:
print("Couldn't calibrate data in {}: {}".format(f, e))
if False:
with h5py.File(out_file, "a") as ofiled:
try:
#copy_and_sanitize_non_cal_data(infile, ofiled, h5path)
ddsetcm = ofiled.create_dataset(h5path+"/pixels_cm",
oshape,
chunks=(chunk_size_idim, oshape[1], oshape[2]),
dtype=np.float32)
ddsetc = ofiled.create_dataset(h5path+"/pixels_classified",
oshape,
chunks=(chunk_size_idim, oshape[1], oshape[2]),
dtype=np.float32, compression="gzip")
ddsetp = ofiled.create_dataset(h5path+"/patterns",
oshape,
chunks=(chunk_size_idim, oshape[1], oshape[2]),
dtype=np.int32, compression="gzip")
data = cmCorrection.correct(data) #correct for the row common mode
histCalCMCor.fill(data)
ddsetcm[...] = np.moveaxis(data, 2, 0)
data, patterns = patternClassifier.classify(data)
data[data < (split_evt_primary_threshold*noiseMap)] = 0
ddsetc[...] = np.moveaxis(data, 2, 0)
ddsetp[...] = np.moveaxis(patterns, 2, 0)
data[patterns != 100] = np.nan
histCalSECor.fill(data)
except Exception as e:
print("Couldn't calibrate data in {}: {}".format(f, e))
for f in seq_files:
data = None
out_file = out_folder / f.name.replace("RAW", "CORR")
with h5py.File(f, "r") as infile, h5py.File(out_file, "w") as ofile:
try:
copy_and_sanitize_non_cal_data(infile, ofile, h5path)
data = infile[h5path+"/pixels"][()]
data = np.compress(np.any(data > 0, axis=(1, 2)), data, axis=0)
if limit_images > 0:
data = data[:limit_images, ...]
oshape = data.shape
data = np.moveaxis(data, 0, 2)
ddset = ofile.create_dataset(
h5path+"/pixels",
oshape,
chunks=(chunk_size_idim, oshape[1], oshape[2]),
dtype=np.float32)
# Offset correction.
data = offsetCorrection.correct(data.astype(np.float32))
# relative gain correction.
if relative_gain:
data = gainCorrection.correct(data.astype(np.float32))
if photon_energy > 0:
data /= photon_energy
histCalOffsetCor.fill(data)
ddset[...] = np.moveaxis(data, 2, 0)
# Common mode correction
# TODO: Fix conflict between common mode and relative gain.
"""The gain correction is currently applying an absolute correction
(not a relative correction as the implied by the name);
it changes the scale (the unit of measurement) of the data from ADU
to either keV or n_of_photons. But the common mode correction
relies on comparing data with the noise map, which is still in ADU.
The best solution is probably to do a relative gain correction first
(correct) and apply the global absolute gain to the data at the end,
after common mode and clustering.
"""
if common_mode:
ddsetcm = ofile.create_dataset(
h5path+"/pixels_cm",
oshape,
chunks=(chunk_size_idim, oshape[1], oshape[2]),
dtype=np.float32)
ddsetc = ofile.create_dataset(
h5path+"/pixels_classified",
oshape,
chunks=(chunk_size_idim, oshape[1], oshape[2]),
dtype=np.float32, compression="gzip")
ddsetp = ofile.create_dataset(
h5path+"/patterns",
oshape,
chunks=(chunk_size_idim, oshape[1], oshape[2]),
dtype=np.int32, compression="gzip")
# row common mode correction.
data = cmCorrection.correct(data)
histCalCMCor.fill(data)
ddsetcm[...] = np.moveaxis(data, 2, 0)
data, patterns = patternClassifier.classify(data)
data[data < (split_evt_primary_threshold*const_data["Noise"])] = 0 # noqa
ddsetc[...] = np.moveaxis(data, 2, 0)
ddsetp[...] = np.moveaxis(patterns, 2, 0)
data[patterns != 100] = np.nan
histCalSECor.fill(data)
except Exception as e:
print(f"ERROR applying common mode correction for {f}: {e}")
```
%% Cell type:code id: tags:
``` python
ho,eo,co,so = histCalOffsetCor.get()
d = [{'x': co,
'y': ho,
'y_err': np.sqrt(ho[:]),
'drawstyle': 'steps-mid',
'errorstyle': 'bars',
'errorcoarsing': 2,
'label': 'Offset corr.'
},
]
if False:
ho,eo,co,so = histCalCMCor.get()
d.append({'x': co,
'y': ho,
'y_err': np.sqrt(ho[:]),
'drawstyle': 'steps-mid',
'errorstyle': 'bars',
'errorcoarsing': 2,
'label': 'CM corr.'
})
ho,eo,co,so = histCalSECor.get()
d.append({'x': co,
'y': ho,
'y_err': np.sqrt(ho[:]),
'drawstyle': 'steps-mid',
'errorstyle': 'bars',
'errorcoarsing': 2,
'label': 'Single split events'
})
fig = xana.simplePlot(d, aspect=1, x_label='Energy({})'.format(plot_unit),
y_label='Number of occurrences', figsize='2col',
y_log=True, x_range=(-50,500),
legend='top-center-frame-2col')
ho, eo, co, so = histCalOffsetCor.get()
d = [{
'x': co,
'y': ho,
'y_err': np.sqrt(ho[:]),
'drawstyle': 'steps-mid',
'errorstyle': 'bars',
'errorcoarsing': 2,
'label': 'Offset corr.'
}]
if common_mode:
ho, eo, co, so = histCalCMCor.get()
d.append({
'x': co,
'y': ho,
'y_err': np.sqrt(ho[:]),
'drawstyle': 'steps-mid',
'errorstyle': 'bars',
'errorcoarsing': 2,
'label': 'CM corr.'
})
ho, eo, co, so = histCalSECor.get()
d.append({
'x': co,
'y': ho,
'y_err': np.sqrt(ho[:]),
'drawstyle': 'steps-mid',
'errorstyle': 'bars',
'errorcoarsing': 2,
'label': 'Single split events'
})
fig = xana.simplePlot(
d, aspect=1, x_label=f'Energy({plot_unit})',
y_label='Number of occurrences', figsize='2col',
y_log=True, x_range=(-50, 500),
legend='top-center-frame-2col'
)
```
%% Cell type:markdown id: tags:
## Mean Image of last Sequence ##
%% Cell type:code id: tags:
``` python
fig = xana.heatmapPlot(np.nanmedian(data, axis=2),
x_label='Columns', y_label='Rows',
lut_label='Signal ({})'.format(plot_unit),
x_range=(0,y),
y_range=(0,x), vmin=-50, vmax=50)
fig = xana.heatmapPlot(
np.nanmedian(data, axis=2),
x_label='Columns', y_label='Rows',
lut_label=f'Signal ({plot_unit})',
x_range=(0, y),
y_range=(0, x),
vmin=-50, vmax=50)
```
%% Cell type:markdown id: tags:
## Single Shot of last Sequence ##
%% Cell type:code id: tags:
``` python
fig = xana.heatmapPlot(data[...,0],
x_label='Columns', y_label='Rows',
lut_label='Signal ({})'.format(plot_unit),
x_range=(0,y),
y_range=(0,x), vmin=-50, vmax=50)
```
%% Cell type:code id: tags:
``` python
fig = xana.heatmapPlot(
data[..., 0],
x_label='Columns', y_label='Rows',
lut_label=f'Signal ({plot_unit})',
x_range=(0, y),
y_range=(0, x),
vmin=-50, vmax=50)
```
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment