Skip to content
Snippets Groups Projects
Commit 874646bd authored by Steffen Hauf's avatar Steffen Hauf
Browse files

Allow to query versions, fixes to AGIPD

parent 07d41311
No related branches found
No related tags found
1 merge request!114Feat/updates agipd query versions
...@@ -76,7 +76,8 @@ class AgipdCorrections: ...@@ -76,7 +76,8 @@ class AgipdCorrections:
cal_det_instance="AGIPD1M1", karabo_data_mode=False, cal_det_instance="AGIPD1M1", karabo_data_mode=False,
force_hg_if_below=None, force_mg_if_below=None, force_hg_if_below=None, force_mg_if_below=None,
mask_noisy_adc=False, adjust_mg_baseline=False, mask_noisy_adc=False, adjust_mg_baseline=False,
acquisition_rate=None): acquisition_rate=None, dont_zero_nans=False,
dont_zero_orange=False):
""" """
Initialize an AgipdCorrections Class Initialize an AgipdCorrections Class
...@@ -148,6 +149,9 @@ class AgipdCorrections: ...@@ -148,6 +149,9 @@ class AgipdCorrections:
self.adjust_mg_baseline = adjust_mg_baseline self.adjust_mg_baseline = adjust_mg_baseline
self.mg_bl_adjust = 0 self.mg_bl_adjust = 0
self.acquisition_rate = acquisition_rate self.acquisition_rate = acquisition_rate
self.dont_zero_nans = dont_zero_nans
self.dont_zero_orange = dont_zero_orange
self.valid_indices = None
def get_iteration_range(self): def get_iteration_range(self):
"""Returns a range expression over which to iterate in chunks """Returns a range expression over which to iterate in chunks
...@@ -999,14 +1003,16 @@ class AgipdCorrections: ...@@ -999,14 +1003,16 @@ class AgipdCorrections:
# create a bad pixel mask for the data # create a bad pixel mask for the data
# we add any non-finite values to the mask # we add any non-finite values to the mask
bidx = ~np.isfinite(im) if not self.dont_zero_nans:
im[bidx] = 0 bidx = ~np.isfinite(im)
msk[bidx] |= BadPixels.VALUE_IS_NAN.value im[bidx] = 0
msk[bidx] |= BadPixels.VALUE_IS_NAN.value
# and such with have unrealistically high and low pixel values # and such with have unrealistically high and low pixel values
bidx = (im < -1e7) | (im > 1e7) if not self.dont_zero_orange:
im[bidx] = 0 bidx = (im < -1e7) | (im > 1e7)
msk[bidx] |= BadPixels.VALUE_OUT_OF_RANGE.value im[bidx] = 0
msk[bidx] |= BadPixels.VALUE_OUT_OF_RANGE.value
# include pixels with zero standard deviation in the dataset into # include pixels with zero standard deviation in the dataset into
# the mask # the mask
...@@ -1070,7 +1076,7 @@ class AgipdCorrections: ...@@ -1070,7 +1076,7 @@ class AgipdCorrections:
""" Return the indices of valid data """ Return the indices of valid data
""" """
agipd_base = self.idx_base agipd_base = self.idx_base
print(agipd_base)
if self.index_v == 2: if self.index_v == 2:
count = np.squeeze(self.infile[agipd_base + "image/count"]) count = np.squeeze(self.infile[agipd_base + "image/count"])
first = np.squeeze(self.infile[agipd_base + "image/first"]) first = np.squeeze(self.infile[agipd_base + "image/first"])
...@@ -1082,9 +1088,21 @@ class AgipdCorrections: ...@@ -1082,9 +1088,21 @@ class AgipdCorrections:
lowok = (idxtrains > medianTrain - 1e4) lowok = (idxtrains > medianTrain - 1e4)
highok = (idxtrains < medianTrain + 1e4) highok = (idxtrains < medianTrain + 1e4)
valid &= lowok & highok valid &= lowok & highok
uq, fidxv, cntsv = np.unique(idxtrains,
return_index=True,
return_counts=True)
valid &= cntsv == 1 # filter out double trains
self.valid = valid self.valid = valid
last_index = int(first[valid][-1] + count[valid][-1]) last_index = int(first[valid][-1] + count[valid][-1])
first_index = int(first[valid][0]) first_index = int(first[valid][0])
# do actual validity filtering:
validc, validf = count[valid], first[valid]
valid_indices = np.concatenate([np.arange(validf[i],
validf[i]+validc[i])
for i in range(validf.size)], axis=0)
self.valid_indices = np.squeeze(valid_indices).astype(np.int32)
elif self.index_v == 1: elif self.index_v == 1:
status = np.squeeze(self.infile[agipd_base + "image/status"]) status = np.squeeze(self.infile[agipd_base + "image/status"])
...@@ -1119,8 +1137,11 @@ class AgipdCorrections: ...@@ -1119,8 +1137,11 @@ class AgipdCorrections:
last_index = self.last_index last_index = self.last_index
max_cells = self.max_cells max_cells = self.max_cells
agipd_base = self.agipd_base agipd_base = self.agipd_base
allcells = self.infile[agipd_base + "image/cellId"] allcells = np.squeeze(self.infile[agipd_base + "image/cellId"])
allcells = np.squeeze(allcells[first_index:last_index, ...]) if self.valid_indices is not None:
allcells = allcells[self.valid_indices]
else:
allcells = allcells[first_index:last_index]
single_image = self.infile[agipd_base + "image/data"][first_index, ...] single_image = self.infile[agipd_base + "image/data"][first_index, ...]
single_image = np.array(single_image) single_image = np.array(single_image)
...@@ -1129,7 +1150,10 @@ class AgipdCorrections: ...@@ -1129,7 +1150,10 @@ class AgipdCorrections:
if np.count_nonzero(can_calibrate) == 0: if np.count_nonzero(can_calibrate) == 0:
return return
allcells = allcells[can_calibrate] allcells = allcells[can_calibrate]
firange = np.arange(first_index, last_index) if self.valid_indices is not None:
firange = np.arange(first_index, last_index)
else:
firange = self.valid_indices
firange = firange[can_calibrate] firange = firange[can_calibrate]
self.oshape = (firange.size if not self.il_mode else firange.size // 2, self.oshape = (firange.size if not self.il_mode else firange.size // 2,
...@@ -1152,6 +1176,7 @@ class AgipdCorrections: ...@@ -1152,6 +1176,7 @@ class AgipdCorrections:
if self.il_mode: if self.il_mode:
firange = firange[0::2] firange = firange[0::2]
alltrains = self.infile[agipd_base + "image/trainId"] alltrains = self.infile[agipd_base + "image/trainId"]
alltrains = np.squeeze( alltrains = np.squeeze(
alltrains[first_index:last_index, ...]) alltrains[first_index:last_index, ...])
......
...@@ -297,9 +297,9 @@ def make_titlepage(sphinx_path, project, data_path, version): ...@@ -297,9 +297,9 @@ def make_titlepage(sphinx_path, project, data_path, version):
title_tmp = Template(file_.read()) title_tmp = Template(file_.read())
with open("{}/titlepage.tex.txt".format(sphinx_path), "w+") as mf: with open("{}/titlepage.tex.txt".format(sphinx_path), "w+") as mf:
mf.write(dedent(title_tmp.render(project=project, mf.write(dedent(title_tmp.render(project=tex_escape(project),
data_path=data_path, data_path=tex_escape(data_path),
version=version))) version=tex_escape(version))))
def finalize(joblist, finaljob, run_path, out_path, project, calibration, def finalize(joblist, finaljob, run_path, out_path, project, calibration,
...@@ -454,7 +454,8 @@ already_printed = {} ...@@ -454,7 +454,8 @@ already_printed = {}
def get_from_db(device, constant, condition, empty_constant, def get_from_db(device, constant, condition, empty_constant,
cal_db_interface, creation_time=None, cal_db_interface, creation_time=None,
verbosity=1, timeout=30000, ntries=120, meta_only=True): verbosity=1, timeout=30000, ntries=120, meta_only=True,
version_info=False):
""" """
Return calibration constants and metadata requested from CalDB Return calibration constants and metadata requested from CalDB
...@@ -474,6 +475,12 @@ def get_from_db(device, constant, condition, empty_constant, ...@@ -474,6 +475,12 @@ def get_from_db(device, constant, condition, empty_constant,
from iCalibrationDB import ConstantMetaData, Versions from iCalibrationDB import ConstantMetaData, Versions
import zmq import zmq
meta_only=True
timeout=1200000
if version_info:
meta_only = False
if device: if device:
metadata = ConstantMetaData() metadata = ConstantMetaData()
metadata.calibration_constant = constant metadata.calibration_constant = constant
...@@ -486,17 +493,25 @@ def get_from_db(device, constant, condition, empty_constant, ...@@ -486,17 +493,25 @@ def get_from_db(device, constant, condition, empty_constant,
start=creation_time) start=creation_time)
while ntries > 0: while ntries > 0:
this_interface = get_random_db_interface(cal_db_interface) this_interface = get_random_db_interface(cal_db_interface)
try: try:
metadata.retrieve(this_interface, timeout=timeout, print("Retrieving from {} for {}".format(this_interface, device))
meta_only=meta_only)
r = metadata.retrieve(this_interface, timeout=timeout,
meta_only=meta_only, version_info=version_info)
if version_info:
return r
break break
except zmq.error.Again: except zmq.error.Again:
ntries -= 1 ntries -= 1
sleep(np.random.randint(30))
except Exception as e: except Exception as e:
if verbosity > 0: if verbosity > 0:
print(e) print(e)
ntries = 0 ntries = 0
break
if ntries > 0: if ntries > 0:
if verbosity > 0: if verbosity > 0:
......
This diff is collapsed.
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
# Jungfrau Offline Correction # # Jungfrau Offline Correction #
Author: European XFEL Detector Group, Version: 0.1 Author: European XFEL Detector Group, Version: 0.1
Offline Calibration for the Jungfrau Detector Offline Calibration for the Jungfrau Detector
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
in_folder = "/gpfs/exfel/exp/FXE/201802/p002271/ra" # the folder to read data from, required in_folder = "/gpfs/exfel/exp/FXE/201802/p002271/ra" # the folder to read data from, required
run = 132 # runs to process, required run = 132 # runs to process, required
out_folder = "/gpfs/exfel/data/scratch/haufs/test/" # the folder to output to, required out_folder = "/gpfs/exfel/data/scratch/haufs/test/" # the folder to output to, required
calfile = "" # path to calibration file. Leave empty if all data should come from DB, not actually used, makes automode happy calfile = "" # path to calibration file. Leave empty if all data should come from DB, not actually used, makes automode happy
sequences = [-1] # sequences to correct, set to -1 for all, range allowed sequences = [-1] # sequences to correct, set to -1 for all, range allowed
mem_cells = 1 # memory cells in data, not actually used, makes automode happy mem_cells = 1 # memory cells in data, not actually used, makes automode happy
overwrite = True # set to True if existing data should be overwritten overwrite = True # set to True if existing data should be overwritten
no_relative_gain = False # do not do relative gain correction no_relative_gain = False # do not do relative gain correction
cluster_profile = "noDB" # cluster profile to use cluster_profile = "noDB" # cluster profile to use
bias_voltage = 90 # will be overwritten by value in file bias_voltage = 90 # will be overwritten by value in file
cal_db_interface = "tcp://max-exfl016:8016" #"tcp://max-exfl016:8015#8025" # the database interface to use cal_db_interface = "tcp://max-exfl016:8016" #"tcp://max-exfl016:8015#8025" # the database interface to use
use_dir_creation_date = True # use the creation data of the input dir for database queries use_dir_creation_date = True # use the creation data of the input dir for database queries
sequences_per_node = 5 # number of sequence files per cluster node if run as slurm job, set to 0 to not run SLURM parallel sequences_per_node = 5 # number of sequence files per cluster node if run as slurm job, set to 0 to not run SLURM parallel
photon_energy = 9.2 # photon energy in keV photon_energy = 9.2 # photon energy in keV
nodb = False # if set only file-based constants will be used nodb = False # if set only file-based constants will be used
chunk_size_idim = 1 # chunking size of imaging dimension, adjust if user software is sensitive to this. chunk_size_idim = 1 # chunking size of imaging dimension, adjust if user software is sensitive to this.
path_template = 'RAW-R{:04d}-{}-S{{:05d}}.h5' # template to use for file name, double escape sequence number path_template = 'RAW-R{:04d}-{}-S{{:05d}}.h5' # template to use for file name, double escape sequence number
path_template_control = 'RAW-R{:04d}-{}-S{{:05d}}.h5' # template to use for file name, double escape sequence number path_template_control = 'RAW-R{:04d}-{}-S{{:05d}}.h5' # template to use for file name, double escape sequence number
integration_time = 1000 # integration time in us, will be overwritten by value in file integration_time = 1000 # integration time in us, will be overwritten by value in file
h5path = '/INSTRUMENT/{}/DET/{}:daqOutput/data' # path in H5 file under which images are located h5path = '/INSTRUMENT/{}/DET/{}:daqOutput/data' # path in H5 file under which images are located
gmapfile = "/gpfs/exfel/data/scratch/xcal/jfgain/gainMaps_M233.h5" #temporary gain calibration file, not in the DB; leave empty if using DB gmapfile = "/gpfs/exfel/data/scratch/xcal/jfgain/gainMaps_M233.h5" #temporary gain calibration file, not in the DB; leave empty if using DB
memcells = 1 # number of memory cells memcells = 1 # number of memory cells
karabo_id = "FXE_XAD_JF1M" # karabo prefix of Jungfrau devices karabo_id = "FXE_XAD_JF1M" # karabo prefix of Jungfrau devices
karabo_id_control = "" # if control is on a different ID, set to empty string for using the same as for data
receiver_id = "RECEIVER" # inset for receiver devices receiver_id = "RECEIVER" # inset for receiver devices
control_id = "CONTROL" # inset for control devices control_id = "CONTROL" # inset for control devices
db_module = "Jungfrau_M233" # ID of module in calibration database db_module = "Jungfrau_M233" # ID of module in calibration database
path_inset = "JNGFR02" # file inset for image data path_inset = "JNGFR02" # file inset for image data
path_inset_control = "JNGFR01" # file inset for control data path_inset_control = "JNGFR01" # file inset for control data
manual_slow_data = False # if true, use manually entered bias_voltage and integration_time values manual_slow_data = False # if true, use manually entered bias_voltage and integration_time values
def balance_sequences(in_folder, run, sequences, sequences_per_node): def balance_sequences(in_folder, run, sequences, sequences_per_node):
import glob import glob
import re import re
import numpy as np import numpy as np
if sequences_per_node != 0: if sequences_per_node != 0:
sequence_files = glob.glob("{}/r{:04d}/*-S*.h5".format(in_folder, run)) sequence_files = glob.glob("{}/r{:04d}/*-S*.h5".format(in_folder, run))
seq_nums = set() seq_nums = set()
for sf in sequence_files: for sf in sequence_files:
seqnum = re.findall(r".*-S([0-9]*).h5", sf)[0] seqnum = re.findall(r".*-S([0-9]*).h5", sf)[0]
seq_nums.add(int(seqnum)) seq_nums.add(int(seqnum))
seq_nums -= set(sequences) seq_nums -= set(sequences)
return [l.tolist() for l in np.array_split(list(seq_nums), return [l.tolist() for l in np.array_split(list(seq_nums),
len(seq_nums)//sequences_per_node+1)] len(seq_nums)//sequences_per_node+1)]
else: else:
return sequences return sequences
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
import warnings import warnings
warnings.filterwarnings('ignore') warnings.filterwarnings('ignore')
h5path = h5path.format(karabo_id, receiver_id) h5path = h5path.format(karabo_id, receiver_id)
import matplotlib import matplotlib
matplotlib.use('agg') matplotlib.use('agg')
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
%matplotlib inline %matplotlib inline
import numpy as np import numpy as np
import XFELDetAna.xfelpyanatools as xana import XFELDetAna.xfelpyanatools as xana
import XFELDetAna.xfelpycaltools as xcal import XFELDetAna.xfelpycaltools as xcal
from cal_tools.enums import BadPixels from cal_tools.enums import BadPixels
from iCalibrationDB import ConstantMetaData, Constants, Conditions, Detectors, Versions from iCalibrationDB import ConstantMetaData, Constants, Conditions, Detectors, Versions
from cal_tools.tools import (get_dir_creation_date) from cal_tools.tools import (get_dir_creation_date)
import os import os
import h5py import h5py
from XFELDetAna.util import env from XFELDetAna.util import env
env.iprofile = cluster_profile env.iprofile = cluster_profile
from XFELDetAna.detectors.jungfrau import readerPSI as jfreaderPSI from XFELDetAna.detectors.jungfrau import readerPSI as jfreaderPSI
from XFELDetAna.detectors.jungfrau import reader as jfreader from XFELDetAna.detectors.jungfrau import reader as jfreader
from XFELDetAna.detectors.jungfrau.jf_chunk_reader import JFChunkReader from XFELDetAna.detectors.jungfrau.jf_chunk_reader import JFChunkReader
from XFELDetAna.detectors.jungfrau.util import non_empty_trains from XFELDetAna.detectors.jungfrau.util import non_empty_trains
# so constants relevant for the analysis # so constants relevant for the analysis
cpuCores = 1 cpuCores = 1
is_parallel = False is_parallel = False
sensorSize = [1024, 512] sensorSize = [1024, 512]
blockSize = [1024, 512] blockSize = [1024, 512]
xRange = [0, 0+sensorSize[0]] xRange = [0, 0+sensorSize[0]]
yRange = [0, 0+sensorSize[1]] yRange = [0, 0+sensorSize[1]]
gains = [0, 1, 2] gains = [0, 1, 2]
ped_dir = "{}/r{:04d}".format(in_folder, run) ped_dir = "{}/r{:04d}".format(in_folder, run)
if ped_dir[-1] == "/": if ped_dir[-1] == "/":
ped_dir = ped_dir[:-1] ped_dir = ped_dir[:-1]
out_folder = "{}/{}".format(out_folder, os.path.split(ped_dir)[-1]) out_folder = "{}/{}".format(out_folder, os.path.split(ped_dir)[-1])
if not os.path.exists(out_folder): if not os.path.exists(out_folder):
os.makedirs(out_folder) os.makedirs(out_folder)
elif not overwrite: elif not overwrite:
raise AttributeError("Output path exists! Exiting") raise AttributeError("Output path exists! Exiting")
fp_name = path_template.format(run, path_inset) fp_name = path_template.format(run, path_inset)
fp_path = '{}/{}'.format(ped_dir, fp_name) fp_path = '{}/{}'.format(ped_dir, fp_name)
fp_name_contr = path_template_control.format(run, path_inset_control) fp_name_contr = path_template_control.format(run, path_inset_control)
fp_path_contr = '{}/{}'.format(ped_dir, fp_name_contr) fp_path_contr = '{}/{}'.format(ped_dir, fp_name_contr)
filep_size = 500 filep_size = 500
myRange_P = sequences myRange_P = sequences
path = h5path path = h5path
if sequences[0] == -1: if sequences[0] == -1:
sequences = None sequences = None
print("Reading data from {}".format(fp_path)) print("Reading data from {}".format(fp_path))
print("Run is: {}".format(run)) print("Run is: {}".format(run))
print("HDF5 path: {}".format(h5path)) print("HDF5 path: {}".format(h5path))
creation_time = None creation_time = None
if use_dir_creation_date: if use_dir_creation_date:
creation_time = get_dir_creation_date(in_folder, run) creation_time = get_dir_creation_date(in_folder, run)
print("Using {} as creation time".format(creation_time)) print("Using {} as creation time".format(creation_time))
cal_timeout = 600000 #ms cal_timeout = 600000 #ms
if karabo_id_control == "":
karabo_id_control = karabo_id
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
import h5py import h5py
if not manual_slow_data: if not manual_slow_data:
with h5py.File(fp_path_contr.format(0), 'r') as f: with h5py.File(fp_path_contr.format(0), 'r') as f:
integration_time = int(f['/RUN/{}/DET/{}/exposureTime/value'.format(karabo_id, control_id)][()]*1e6) integration_time = int(f['/RUN/{}/DET/{}/exposureTime/value'.format(karabo_id_control, control_id)][()]*1e6)
bias_voltage = int(np.squeeze(f['/RUN/{}/DET/{}/vHighVoltage/value'.format(karabo_id, control_id)])[0]) bias_voltage = int(np.squeeze(f['/RUN/{}/DET/{}/vHighVoltage/value'.format(karabo_id_control, control_id)])[0])
print("Integration time is {} us".format(integration_time)) print("Integration time is {} us".format(integration_time))
print("Bias voltage is {} V".format(bias_voltage)) print("Bias voltage is {} V".format(bias_voltage))
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
dirlist = sorted(os.listdir(ped_dir)) dirlist = sorted(os.listdir(ped_dir))
file_list = [] file_list = []
total_sequences = 0 total_sequences = 0
fsequences = [] fsequences = []
for entry in dirlist: for entry in dirlist:
#only h5 file #only h5 file
abs_entry = "{}/{}".format(ped_dir, entry) abs_entry = "{}/{}".format(ped_dir, entry)
if os.path.isfile(abs_entry) and os.path.splitext(abs_entry)[1] == ".h5": if os.path.isfile(abs_entry) and os.path.splitext(abs_entry)[1] == ".h5":
if sequences is None: if sequences is None:
for seq in range(len(dirlist)): for seq in range(len(dirlist)):
if path_template.format(run, path_inset).format(seq) in abs_entry: if path_template.format(run, path_inset).format(seq) in abs_entry:
file_list.append(abs_entry) file_list.append(abs_entry)
total_sequences += 1 total_sequences += 1
fsequences.append(seq) fsequences.append(seq)
else: else:
for seq in sequences: for seq in sequences:
if path_template.format(run, path_inset).format(seq) in abs_entry: if path_template.format(run, path_inset).format(seq) in abs_entry:
file_list.append(os.path.abspath(abs_entry)) file_list.append(os.path.abspath(abs_entry))
total_sequences += 1 total_sequences += 1
fsequences.append(seq) fsequences.append(seq)
sequences = fsequences sequences = fsequences
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
import copy import copy
from IPython.display import HTML, display, Markdown, Latex from IPython.display import HTML, display, Markdown, Latex
import tabulate import tabulate
print("Processing a total of {} sequence files".format(total_sequences)) print("Processing a total of {} sequence files".format(total_sequences))
table = [] table = []
for k, f in enumerate(file_list): for k, f in enumerate(file_list):
table.append((k, f)) table.append((k, f))
md = display(Latex(tabulate.tabulate(table, tablefmt='latex', headers=["#", "file"]))) md = display(Latex(tabulate.tabulate(table, tablefmt='latex', headers=["#", "file"])))
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
def get_PSI_gainmaps(fname, dset): def get_PSI_gainmaps(fname, dset):
with h5py.File(fname, 'r') as f_g: with h5py.File(fname, 'r') as f_g:
gain_map = np.array(f_g[dset]) gain_map = np.array(f_g[dset])
gain_map = np.transpose(gain_map, (2, 1, 0, 3)) gain_map = np.transpose(gain_map, (2, 1, 0, 3))
gain_map = np.expand_dims(gain_map, axis=0) gain_map = np.expand_dims(gain_map, axis=0)
return gain_map return gain_map
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
## offset ## offset
metadata = ConstantMetaData() metadata = ConstantMetaData()
offset = Constants.jungfrau.Offset() offset = Constants.jungfrau.Offset()
metadata.calibration_constant = offset metadata.calibration_constant = offset
# set the operating condition # set the operating condition
condition = Conditions.Dark.jungfrau(memory_cells=1, bias_voltage=bias_voltage, condition = Conditions.Dark.jungfrau(memory_cells=1, bias_voltage=bias_voltage,
integration_time=integration_time) integration_time=integration_time)
device = getattr(Detectors, db_module) device = getattr(Detectors, db_module)
metadata.detector_condition = condition metadata.detector_condition = condition
# specify the a version for this constant # specify the a version for this constant
if creation_time is None: if creation_time is None:
metadata.calibration_constant_version = Versions.Now(device=device) metadata.calibration_constant_version = Versions.Now(device=device)
metadata.retrieve(cal_db_interface) metadata.retrieve(cal_db_interface)
else: else:
metadata.calibration_constant_version = Versions.Timespan(device=device, metadata.calibration_constant_version = Versions.Timespan(device=device,
start=creation_time) start=creation_time)
metadata.retrieve(cal_db_interface, when=creation_time.isoformat(), timeout=3000000) metadata.retrieve(cal_db_interface, when=creation_time.isoformat(), timeout=3000000)
offset_map = metadata.calibration_constant.data offset_map = metadata.calibration_constant.data
## noise ## noise
metadata = ConstantMetaData() metadata = ConstantMetaData()
noise = Constants.jungfrau.Noise() noise = Constants.jungfrau.Noise()
metadata.calibration_constant = noise metadata.calibration_constant = noise
# set the operating condition # set the operating condition
condition = Conditions.Dark.jungfrau(memory_cells=1, bias_voltage=bias_voltage, condition = Conditions.Dark.jungfrau(memory_cells=1, bias_voltage=bias_voltage,
integration_time=integration_time) integration_time=integration_time)
metadata.detector_condition = condition metadata.detector_condition = condition
# specify the a version for this constant # specify the a version for this constant
if creation_time is None: if creation_time is None:
metadata.calibration_constant_version = Versions.Now(device=device) metadata.calibration_constant_version = Versions.Now(device=device)
metadata.retrieve(cal_db_interface) metadata.retrieve(cal_db_interface)
else: else:
metadata.calibration_constant_version = Versions.Timespan(device=device, metadata.calibration_constant_version = Versions.Timespan(device=device,
start=creation_time) start=creation_time)
metadata.retrieve(cal_db_interface, when=creation_time.isoformat(), timeout=3000000) metadata.retrieve(cal_db_interface, when=creation_time.isoformat(), timeout=3000000)
noise_map = metadata.calibration_constant.data noise_map = metadata.calibration_constant.data
## bad pixels ## bad pixels
metadata = ConstantMetaData() metadata = ConstantMetaData()
bpix = Constants.jungfrau.BadPixelsDark() bpix = Constants.jungfrau.BadPixelsDark()
metadata.calibration_constant = bpix metadata.calibration_constant = bpix
# set the operating condition # set the operating condition
condition = Conditions.Dark.jungfrau(memory_cells=1, bias_voltage=bias_voltage, condition = Conditions.Dark.jungfrau(memory_cells=1, bias_voltage=bias_voltage,
integration_time=integration_time) integration_time=integration_time)
metadata.detector_condition = condition metadata.detector_condition = condition
# specify the a version for this constant # specify the a version for this constant
if creation_time is None: if creation_time is None:
metadata.calibration_constant_version = Versions.Now(device=device) metadata.calibration_constant_version = Versions.Now(device=device)
metadata.retrieve(cal_db_interface) metadata.retrieve(cal_db_interface)
else: else:
metadata.calibration_constant_version = Versions.Timespan(device=device, metadata.calibration_constant_version = Versions.Timespan(device=device,
start=creation_time) start=creation_time)
metadata.retrieve(cal_db_interface, when=creation_time.isoformat(), timeout=3000000) metadata.retrieve(cal_db_interface, when=creation_time.isoformat(), timeout=3000000)
mask = metadata.calibration_constant.data mask = metadata.calibration_constant.data
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# gain correction # gain correction
if gmapfile: if gmapfile:
gain_map = get_PSI_gainmaps(gmapfile, 'gain_map_g0') gain_map = get_PSI_gainmaps(gmapfile, 'gain_map_g0')
print('Not Using DB gain maps ') print('Not Using DB gain maps ')
print('maps from: {}'.format(gmapfile)) print('maps from: {}'.format(gmapfile))
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
def copy_and_sanitize_non_cal_data(infile, outfile, h5base): def copy_and_sanitize_non_cal_data(infile, outfile, h5base):
""" Copy and sanitize data in `infile` that is not touched by `correctLPD` """ Copy and sanitize data in `infile` that is not touched by `correctLPD`
""" """
if h5base.startswith("/"): if h5base.startswith("/"):
h5base = h5base[1:] h5base = h5base[1:]
dont_copy = ["adc",] dont_copy = ["adc",]
dont_copy = [h5base+"/{}".format(do) dont_copy = [h5base+"/{}".format(do)
for do in dont_copy] for do in dont_copy]
def visitor(k, item): def visitor(k, item):
if k not in dont_copy: if k not in dont_copy:
if isinstance(item, h5py.Group): if isinstance(item, h5py.Group):
outfile.create_group(k) outfile.create_group(k)
elif isinstance(item, h5py.Dataset): elif isinstance(item, h5py.Dataset):
group = str(k).split("/") group = str(k).split("/")
group = "/".join(group[:-1]) group = "/".join(group[:-1])
infile.copy(k, outfile[group]) infile.copy(k, outfile[group])
infile.visititems(visitor) infile.visititems(visitor)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
import copy import copy
offset_map = np.squeeze(offset_map) offset_map = np.squeeze(offset_map)
mask = np.squeeze(mask) mask = np.squeeze(mask)
fim_data = None fim_data = None
gim_data = None gim_data = None
rim_data = None rim_data = None
msk_data = None msk_data = None
for k, f in enumerate(file_list): for k, f in enumerate(file_list):
with h5py.File(f, 'r') as infile: with h5py.File(f, 'r') as infile:
out_file = "{}/{}".format(out_folder, f.split("/")[-1]) out_file = "{}/{}".format(out_folder, f.split("/")[-1])
out_file = out_file.replace("RAW", "CORR") out_file = out_file.replace("RAW", "CORR")
with h5py.File(out_file, "w") as ofile: with h5py.File(out_file, "w") as ofile:
copy_and_sanitize_non_cal_data(infile, ofile, h5path) copy_and_sanitize_non_cal_data(infile, ofile, h5path)
d = infile[h5path+"/adc"][()].astype(np.float32) d = infile[h5path+"/adc"][()].astype(np.float32)
if k == 0: if k == 0:
rim_data = np.squeeze(copy.copy(d)) rim_data = np.squeeze(copy.copy(d))
g = infile[h5path+"/gain"][()] g = infile[h5path+"/gain"][()]
oshape = d.shape oshape = d.shape
ddset = ofile.create_dataset(h5path+"/adc", ddset = ofile.create_dataset(h5path+"/adc",
oshape, oshape,
chunks=(chunk_size_idim, 1, oshape[1], oshape[2]), chunks=(chunk_size_idim, 1, oshape[1], oshape[2]),
dtype=np.float32) dtype=np.float32)
mskset = ofile.create_dataset(h5path+"/mask", mskset = ofile.create_dataset(h5path+"/mask",
oshape, oshape,
chunks=(chunk_size_idim, 1, oshape[1], oshape[2]), chunks=(chunk_size_idim, 1, oshape[1], oshape[2]),
dtype=np.uint32, dtype=np.uint32,
compression="gzip", compression_opts=1, shuffle=True) compression="gzip", compression_opts=1, shuffle=True)
g[g==3] = 2 g[g==3] = 2
#offset correction #offset correction
offset = np.choose(g, (offset_map[...,0], offset_map[...,1], offset_map[...,2])) offset = np.choose(g, (offset_map[...,0], offset_map[...,1], offset_map[...,2]))
d -= offset d -= offset
#gain correction #gain correction
cal = np.choose(g, (gain_map[..., 0], gain_map[..., 1], gain_map[..., 2])) cal = np.choose(g, (gain_map[..., 0], gain_map[..., 1], gain_map[..., 2]))
d /= cal d /= cal
ddset[...] = d ddset[...] = d
msk = np.choose(g, (mask[...,0], mask[...,1], mask[...,2])) msk = np.choose(g, (mask[...,0], mask[...,1], mask[...,2]))
mskset[...] = msk mskset[...] = msk
if k == 0: if k == 0:
fim_data = np.squeeze(copy.copy(d)) fim_data = np.squeeze(copy.copy(d))
gim_data = np.squeeze(copy.copy(g)) gim_data = np.squeeze(copy.copy(g))
msk_data = np.squeeze(copy.copy(msk)) msk_data = np.squeeze(copy.copy(msk))
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
def do_2d_plot(data, edges, y_axis, x_axis): def do_2d_plot(data, edges, y_axis, x_axis):
from matplotlib.colors import LogNorm from matplotlib.colors import LogNorm
fig = plt.figure(figsize=(10,10)) fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111) ax = fig.add_subplot(111)
extent = [np.min(edges[1]), np.max(edges[1]),np.min(edges[0]), np.max(edges[0])] extent = [np.min(edges[1]), np.max(edges[1]),np.min(edges[0]), np.max(edges[0])]
im = ax.imshow(data[::-1,:], extent=extent, aspect="auto", norm=LogNorm(vmin=1, vmax=np.max(data))) im = ax.imshow(data[::-1,:], extent=extent, aspect="auto", norm=LogNorm(vmin=1, vmax=np.max(data)))
ax.set_xlabel(x_axis) ax.set_xlabel(x_axis)
ax.set_ylabel(y_axis) ax.set_ylabel(y_axis)
cb = fig.colorbar(im) cb = fig.colorbar(im)
cb.set_label("Counts") cb.set_label("Counts")
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
h, ex, ey = np.histogram2d(rim_data.flatten(), gim_data.flatten(), h, ex, ey = np.histogram2d(rim_data.flatten(), gim_data.flatten(),
bins=[100, 4], range=[[0, 10000], [0,4]]) bins=[100, 4], range=[[0, 10000], [0,4]])
do_2d_plot(h, (ex, ey), "Signal (ADU)", "Gain Bit Value") do_2d_plot(h, (ex, ey), "Signal (ADU)", "Gain Bit Value")
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
### Mean RAW Preview ### ### Mean RAW Preview ###
The per pixel mean of the sequence file of RAW data The per pixel mean of the sequence file of RAW data
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
fig = plt.figure(figsize=(20,10)) fig = plt.figure(figsize=(20,10))
ax = fig.add_subplot(111) ax = fig.add_subplot(111)
im = ax.imshow(np.mean(rim_data,axis=0), im = ax.imshow(np.mean(rim_data,axis=0),
vmin=min(0.75*np.median(rim_data[rim_data > 0]), 2000), vmin=min(0.75*np.median(rim_data[rim_data > 0]), 2000),
vmax=max(1.5*np.median(rim_data[rim_data > 0]), 16000), cmap="jet") vmax=max(1.5*np.median(rim_data[rim_data > 0]), 16000), cmap="jet")
cb = fig.colorbar(im, ax=ax) cb = fig.colorbar(im, ax=ax)
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
### Mean CORRECTED Preview ### ### Mean CORRECTED Preview ###
The per pixel mean of the sequence file of CORR data The per pixel mean of the sequence file of CORR data
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
fig = plt.figure(figsize=(20,10)) fig = plt.figure(figsize=(20,10))
ax = fig.add_subplot(111) ax = fig.add_subplot(111)
im = ax.imshow(np.mean(fim_data,axis=0), im = ax.imshow(np.mean(fim_data,axis=0),
vmin=min(0.75*np.median(fim_data[fim_data > 0]), -0.5), vmin=min(0.75*np.median(fim_data[fim_data > 0]), -0.5),
vmax=max(2.*np.median(fim_data[fim_data > 0]), 100), cmap="jet") vmax=max(2.*np.median(fim_data[fim_data > 0]), 100), cmap="jet")
cb = fig.colorbar(im, ax=ax) cb = fig.colorbar(im, ax=ax)
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
### Single Train Preview ### ### Single Train Preview ###
A single image from the first train A single image from the first train
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
fig = plt.figure(figsize=(20,10)) fig = plt.figure(figsize=(20,10))
ax = fig.add_subplot(111) ax = fig.add_subplot(111)
im = ax.imshow(fim_data[0,...], im = ax.imshow(fim_data[0,...],
vmin=min(0.75*np.median(fim_data[0,...]), -0.5), vmin=min(0.75*np.median(fim_data[0,...]), -0.5),
vmax=max(2.*np.median(fim_data[0,...]), 1000), cmap="jet") vmax=max(2.*np.median(fim_data[0,...]), 1000), cmap="jet")
cb = fig.colorbar(im, ax=ax) cb = fig.colorbar(im, ax=ax)
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Signal Distribution ## ## Signal Distribution ##
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
fig = plt.figure(figsize=(20,10)) fig = plt.figure(figsize=(20,10))
ax = fig.add_subplot(211) ax = fig.add_subplot(211)
h = ax.hist(fim_data.flatten(), bins=1000, range=(-100, 1000), log=True) h = ax.hist(fim_data.flatten(), bins=1000, range=(-100, 1000), log=True)
l = ax.set_xlabel("Signal (keV)") l = ax.set_xlabel("Signal (keV)")
l = ax.set_ylabel("Counts") l = ax.set_ylabel("Counts")
ax = fig.add_subplot(212) ax = fig.add_subplot(212)
h = ax.hist(fim_data.flatten(), bins=1000, range=(-100, 10000), log=True) h = ax.hist(fim_data.flatten(), bins=1000, range=(-100, 10000), log=True)
l = ax.set_xlabel("Signal (keV)") l = ax.set_xlabel("Signal (keV)")
l = ax.set_ylabel("Counts") l = ax.set_ylabel("Counts")
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
### Maximum GAIN Preview ### ### Maximum GAIN Preview ###
The per pixel maximum of the first train of the GAIN data The per pixel maximum of the first train of the GAIN data
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
fig = plt.figure(figsize=(20,10)) fig = plt.figure(figsize=(20,10))
ax = fig.add_subplot(111) ax = fig.add_subplot(111)
im = ax.imshow(np.max(gim_data, axis=0), vmin=0, im = ax.imshow(np.max(gim_data, axis=0), vmin=0,
vmax=3, cmap="jet") vmax=3, cmap="jet")
cb = fig.colorbar(im, ax=ax) cb = fig.colorbar(im, ax=ax)
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Bad Pixels ## ## Bad Pixels ##
The mask contains dedicated entries for all pixels and memory cells as well as all three gains stages. Each mask entry is encoded in 32 bits as: The mask contains dedicated entries for all pixels and memory cells as well as all three gains stages. Each mask entry is encoded in 32 bits as:
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
from cal_tools.enums import BadPixels from cal_tools.enums import BadPixels
from IPython.display import HTML, display, Markdown, Latex from IPython.display import HTML, display, Markdown, Latex
import tabulate import tabulate
table = [] table = []
for item in BadPixels: for item in BadPixels:
table.append((item.name, "{:016b}".format(item.value))) table.append((item.name, "{:016b}".format(item.value)))
md = display(Latex(tabulate.tabulate(table, tablefmt='latex', headers=["Bad pixel type", "Bit mask"]))) md = display(Latex(tabulate.tabulate(table, tablefmt='latex', headers=["Bad pixel type", "Bit mask"])))
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
### Single Image Bad Pixels ### ### Single Image Bad Pixels ###
A single image bad pixel map for the first image of the first train A single image bad pixel map for the first image of the first train
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
fig = plt.figure(figsize=(20,10)) fig = plt.figure(figsize=(20,10))
ax = fig.add_subplot(111) ax = fig.add_subplot(111)
im = ax.imshow(np.log2(msk_data[0,...]), vmin=0, vmax=32, cmap="jet") im = ax.imshow(np.log2(msk_data[0,...]), vmin=0, vmax=32, cmap="jet")
cb = fig.colorbar(im, ax=ax) cb = fig.colorbar(im, ax=ax)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
``` ```
......
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
# Jungfrau Dark Image Characterization # # Jungfrau Dark Image Characterization #
Version: 0.1, Author: M. Ramilli, S. Hauf Version: 0.1, Author: M. Ramilli, S. Hauf
Analyzes Jungfrau dark image data to deduce offset, noise and resulting bad pixel maps Analyzes Jungfrau dark image data to deduce offset, noise and resulting bad pixel maps
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
in_folder = '/gpfs/exfel/exp/SPB/201921/p002429/raw/' # folder under which runs are located, required in_folder = '/gpfs/exfel/exp/SPB/201921/p002429/raw/' # folder under which runs are located, required
out_folder = '' # path to place reports at, required out_folder = '' # path to place reports at, required
sequences = 1 # number of sequence files in that run sequences = 1 # number of sequence files in that run
path_template = 'RAW-R{:04d}-{}-S{{:05d}}.h5' # template to use for file name, double escape sequence number path_template = 'RAW-R{:04d}-{}-S{{:05d}}.h5' # template to use for file name, double escape sequence number
path_inset = "DA06" # file inset for image data path_inset = "DA06" # file inset for image data
path_inset_control = "DA06" # file inset for control data path_inset_control = "DA06" # file inset for control data
cluster_profile = 'noDB' # the ipcluster profile name cluster_profile = 'noDB' # the ipcluster profile name
cal_db_interface = 'tcp://max-exfl016:8016' # calibrate db interface to connect to cal_db_interface = 'tcp://max-exfl016:8016' # calibrate db interface to connect to
integration_time = 1000 # integration time in us, will be overwritten by value in file integration_time = 1000 # integration time in us, will be overwritten by value in file
bias_voltage = 90 # sensor bias voltage in V, will be overwritten by value in file bias_voltage = 90 # sensor bias voltage in V, will be overwritten by value in file
badpixel_threshold_sigma = 20. # bad pixels defined by values outside n times this std from median badpixel_threshold_sigma = 20. # bad pixels defined by values outside n times this std from median
offset_abs_threshold_low = [1000, 10000, 10000] # absolute bad pixel threshold in terms of offset, lower values offset_abs_threshold_low = [1000, 10000, 10000] # absolute bad pixel threshold in terms of offset, lower values
offset_abs_threshold_high = [8000, 15000, 15000] # absolute bad pixel threshold in terms of offset, upper values offset_abs_threshold_high = [8000, 15000, 15000] # absolute bad pixel threshold in terms of offset, upper values
chunkSize = 10 # iteration chunk size, needs to match or be less than number of images in a sequence file chunkSize = 10 # iteration chunk size, needs to match or be less than number of images in a sequence file
imageRange = [0, 500] # image range in which to evaluate imageRange = [0, 500] # image range in which to evaluate
memoryCells = 1 # number of memory cells memoryCells = 1 # number of memory cells
h5path = '/INSTRUMENT/{}/DET/{}:daqOutput/data' # path in H5 file under which images are located h5path = '/INSTRUMENT/{}/DET/{}:daqOutput/data' # path in H5 file under which images are located
run_high = 0 # run number for G0 dark run, required run_high = 0 # run number for G0 dark run, required
run_med = 0 # run number for G1 dark run, required run_med = 0 # run number for G1 dark run, required
run_low = 0 # run number for G2 dark run, required run_low = 0 # run number for G2 dark run, required
karabo_id = "FXE_XAD_JF500K" # karabo prefix of Jungfrau devices karabo_id = "FXE_XAD_JF500K" # karabo prefix of Jungfrau devices
karabo_id_control = "" # if control is on a different ID, set to empty string for using the same as for data
receiver_id = "RECEIVER" # inset for receiver devices receiver_id = "RECEIVER" # inset for receiver devices
control_id = "CONTROL" # inset for control devices control_id = "CONTROL" # inset for control devices
db_module = "Jungfrau_M233" # ID of module in calibration database db_module = "Jungfrau_M233" # ID of module in calibration database
use_dir_creation_date = True # use dir creation date use_dir_creation_date = True # use dir creation date
manual_slow_data = False # if true, use manually entered bias_voltage and integration_time values manual_slow_data = False # if true, use manually entered bias_voltage and integration_time values
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
import warnings import warnings
warnings.filterwarnings('ignore') warnings.filterwarnings('ignore')
import matplotlib import matplotlib
matplotlib.use('agg') matplotlib.use('agg')
import numpy as np import numpy as np
import XFELDetAna.xfelpyanatools as xana import XFELDetAna.xfelpyanatools as xana
import XFELDetAna.xfelpycaltools as xcal import XFELDetAna.xfelpycaltools as xcal
from cal_tools.enums import BadPixels from cal_tools.enums import BadPixels
from cal_tools.tools import get_dir_creation_date from cal_tools.tools import get_dir_creation_date
from iCalibrationDB import ConstantMetaData, Constants, Conditions, Detectors, Versions from iCalibrationDB import ConstantMetaData, Constants, Conditions, Detectors, Versions
from XFELDetAna.util import env from XFELDetAna.util import env
env.iprofile = cluster_profile env.iprofile = cluster_profile
from XFELDetAna.detectors.jungfrau import readerPSI as jfreaderPSI from XFELDetAna.detectors.jungfrau import readerPSI as jfreaderPSI
from XFELDetAna.detectors.jungfrau import reader as jfreader from XFELDetAna.detectors.jungfrau import reader as jfreader
from XFELDetAna.detectors.jungfrau.jf_chunk_reader import JFChunkReader from XFELDetAna.detectors.jungfrau.jf_chunk_reader import JFChunkReader
from XFELDetAna.detectors.jungfrau.util import non_empty_trains from XFELDetAna.detectors.jungfrau.util import non_empty_trains
# so constants relevant for the analysis # so constants relevant for the analysis
run_nums = [run_high, run_med, run_low] # run number for G0/HG0, G1, G2 run_nums = [run_high, run_med, run_low] # run number for G0/HG0, G1, G2
cpuCores = 1 cpuCores = 1
is_parallel = False is_parallel = False
sensorSize = [1024, 512] sensorSize = [1024, 512]
blockSize = [1024, 512] blockSize = [1024, 512]
xRange = [0, 0+sensorSize[0]] xRange = [0, 0+sensorSize[0]]
yRange = [0, 0+sensorSize[1]] yRange = [0, 0+sensorSize[1]]
gains = [0, 1, 2] gains = [0, 1, 2]
h5path = h5path.format(karabo_id, receiver_id) h5path = h5path.format(karabo_id, receiver_id)
creation_time = None creation_time = None
if use_dir_creation_date: if use_dir_creation_date:
creation_time = get_dir_creation_date(in_folder, run_high) creation_time = get_dir_creation_date(in_folder, run_high)
print("Using {} as creation time".format(creation_time)) print("Using {} as creation time".format(creation_time))
offset_abs_threshold = [offset_abs_threshold_low, offset_abs_threshold_high] offset_abs_threshold = [offset_abs_threshold_low, offset_abs_threshold_high]
if karabo_id_control == "":
karabo_id_control = karabo_id
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
noiseCal = xcal.NoiseCalculator(sensorSize, nCells=memoryCells, cores=cpuCores, parallel=is_parallel, gains=gains, noiseCal = xcal.NoiseCalculator(sensorSize, nCells=memoryCells, cores=cpuCores, parallel=is_parallel, gains=gains,
blockSize=blockSize) blockSize=blockSize)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
import h5py import h5py
for i_run, run in enumerate(run_nums): for i_run, run in enumerate(run_nums):
if run is not None: if run is not None:
ped_dir = "{}/r{:04d}".format(in_folder, run) ped_dir = "{}/r{:04d}".format(in_folder, run)
fp_name = path_template.format(run, path_inset_control) fp_name = path_template.format(run, path_inset_control)
fp_path = '{}/{}'.format(ped_dir, fp_name) fp_path = '{}/{}'.format(ped_dir, fp_name)
if not manual_slow_data: if not manual_slow_data:
with h5py.File(fp_path.format(0), 'r') as f: with h5py.File(fp_path.format(0), 'r') as f:
integration_time = int(f['/RUN/{}/DET/{}/exposureTime/value'.format(karabo_id, control_id)][()]*1e6) integration_time = int(f['/RUN/{}/DET/{}/exposureTime/value'.format(karabo_id_control, control_id)][()]*1e6)
bias_voltage = int(np.squeeze(f['/RUN/{}/DET/{}/vHighVoltage/value'.format(karabo_id, control_id)])[0]) bias_voltage = int(np.squeeze(f['/RUN/{}/DET/{}/vHighVoltage/value'.format(karabo_id_control, control_id)])[0])
print("Integration time is {} us".format(integration_time)) print("Integration time is {} us".format(integration_time))
print("Bias voltage is {} V".format(bias_voltage)) print("Bias voltage is {} V".format(bias_voltage))
fp_name = path_template.format(run, path_inset) fp_name = path_template.format(run, path_inset)
fp_path = '{}/{}'.format(ped_dir, fp_name) fp_path = '{}/{}'.format(ped_dir, fp_name)
filep_size = 500 filep_size = 500
myRange_P = range(0, sequences) myRange_P = range(0, sequences)
path = h5path path = h5path
print("Reading data from {}".format(fp_path)) print("Reading data from {}".format(fp_path))
print("Run is: {}".format(run)) print("Run is: {}".format(run))
print("HDF5 path: {}".format(h5path)) print("HDF5 path: {}".format(h5path))
reader = JFChunkReader(filename = fp_path, readFun = jfreader.readData, size = filep_size, chunkSize = chunkSize, reader = JFChunkReader(filename = fp_path, readFun = jfreader.readData, size = filep_size, chunkSize = chunkSize,
path = path, image_range=imageRange, pixels_x = sensorSize[0], pixels_y = sensorSize[1], path = path, image_range=imageRange, pixels_x = sensorSize[0], pixels_y = sensorSize[1],
x_range = xRange, y_range = yRange, imagesPerChunk=chunkSize, filesRange = myRange_P, x_range = xRange, y_range = yRange, imagesPerChunk=chunkSize, filesRange = myRange_P,
memoryCells=memoryCells, blockSize=blockSize) memoryCells=memoryCells, blockSize=blockSize)
for data in reader.readChunks(): for data in reader.readChunks():
images = np.squeeze(data[0]) images = np.squeeze(data[0])
gainmaps = np.squeeze(data[1]) gainmaps = np.squeeze(data[1])
trainId = np.array(data[2]) trainId = np.array(data[2])
idxs = non_empty_trains(trainId) idxs = non_empty_trains(trainId)
noiseCal.fill(data=images[..., idxs], gainMap=gainmaps[..., idxs]) noiseCal.fill(data=images[..., idxs], gainMap=gainmaps[..., idxs])
else: else:
print("dark run for G{} is missing".format(i_run)) print("dark run for G{} is missing".format(i_run))
offset_map = noiseCal.getOffset() offset_map = noiseCal.getOffset()
noise_map = noiseCal.get() noise_map = noiseCal.get()
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Offset and Noise Maps ## ## Offset and Noise Maps ##
Below offset and noise maps for the high ($g_0$) gain stage are shown, alongside the distribution of these values. One expects block-like structures mapping to the ASICs of the detector Below offset and noise maps for the high ($g_0$) gain stage are shown, alongside the distribution of these values. One expects block-like structures mapping to the ASICs of the detector
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
%matplotlib inline %matplotlib inline
from XFELDetAna.plotting.histogram import histPlot from XFELDetAna.plotting.histogram import histPlot
from XFELDetAna.plotting.heatmap import heatmapPlot from XFELDetAna.plotting.heatmap import heatmapPlot
g_name = ['G0', 'G1', 'G2'] g_name = ['G0', 'G1', 'G2']
g_range = [(0, 8000), (8000, 16000), (8000, 16000)] g_range = [(0, 8000), (8000, 16000), (8000, 16000)]
for g_idx in gains: for g_idx in gains:
off = offset_map[:, :, 0, g_idx] off = offset_map[:, :, 0, g_idx]
fo_0 = heatmapPlot(np.swapaxes(off, 0, 1), fo_0 = heatmapPlot(np.swapaxes(off, 0, 1),
y_label="Row", y_label="Row",
x_label="Column", x_label="Column",
lut_label="Pedestal {} [ADCu]".format(g_name[g_idx]), lut_label="Pedestal {} [ADCu]".format(g_name[g_idx]),
vmin=g_range[g_idx][0], vmin=g_range[g_idx][0],
vmax=g_range[g_idx][1]) vmax=g_range[g_idx][1])
fo_01, axo_01 = plt.subplots() fo_01, axo_01 = plt.subplots()
ho0, eo0 , xo0, stato0 = histPlot(axo_01, off, ho0, eo0 , xo0, stato0 = histPlot(axo_01, off,
bins=800, bins=800,
range=g_range[g_idx], range=g_range[g_idx],
facecolor='b', facecolor='b',
histotype='stepfilled') histotype='stepfilled')
axo_01.tick_params(axis='both',which='major',labelsize=15) axo_01.tick_params(axis='both',which='major',labelsize=15)
axo_01.set_title('Module pedestal distribution', fontsize=15) axo_01.set_title('Module pedestal distribution', fontsize=15)
axo_01.set_xlabel('Pedestal {} [ADCu]'.format(g_name[g_idx]),fontsize=15) axo_01.set_xlabel('Pedestal {} [ADCu]'.format(g_name[g_idx]),fontsize=15)
axo_01.set_yscale('log') axo_01.set_yscale('log')
noise = noise_map[:, :, 0, g_idx] noise = noise_map[:, :, 0, g_idx]
fn_0 = heatmapPlot(np.swapaxes(noise, 0, 1), fn_0 = heatmapPlot(np.swapaxes(noise, 0, 1),
y_label="Row", y_label="Row",
x_label="Column", x_label="Column",
lut_label="RMS noise {} [ADCu]".format(g_name[g_idx]), lut_label="RMS noise {} [ADCu]".format(g_name[g_idx]),
vmin=0, vmin=0,
vmax=50) vmax=50)
fn_01, axn_01 = plt.subplots() fn_01, axn_01 = plt.subplots()
hn0, en0 , xn0, statn0 = histPlot(axn_01, noise, hn0, en0 , xn0, statn0 = histPlot(axn_01, noise,
bins=100, bins=100,
range=(0, 50), range=(0, 50),
facecolor='b', facecolor='b',
histotype='stepfilled') histotype='stepfilled')
axn_01.tick_params(axis='both',which='major',labelsize=15) axn_01.tick_params(axis='both',which='major',labelsize=15)
axn_01.set_title('Module noise distribution', fontsize=15) axn_01.set_title('Module noise distribution', fontsize=15)
axn_01.set_xlabel('RMS noise {} [ADCu]'.format(g_name[g_idx]),fontsize=15) axn_01.set_xlabel('RMS noise {} [ADCu]'.format(g_name[g_idx]),fontsize=15)
axn_01.set_yscale('log') axn_01.set_yscale('log')
plt.show() plt.show()
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Bad Pixel Map ### ## Bad Pixel Map ###
The bad pixel map is deduced by comparing offset and noise of each pixel ($v_i$) and each gain ($g$) against the median value for that gain stage: The bad pixel map is deduced by comparing offset and noise of each pixel ($v_i$) and each gain ($g$) against the median value for that gain stage:
$$ $$
v_i > \mathrm{median}(v_{k,g}) + n \sigma_{v_{k,g}} v_i > \mathrm{median}(v_{k,g}) + n \sigma_{v_{k,g}}
$$ $$
or or
$$ $$
v_i < \mathrm{median}(v_{k,g}) - n \sigma_{v_{k,g}} v_i < \mathrm{median}(v_{k,g}) - n \sigma_{v_{k,g}}
$$ $$
Values are encoded in a 32 bit mask, where for the dark image deduced bad pixels the following non-zero entries are relevant: Values are encoded in a 32 bit mask, where for the dark image deduced bad pixels the following non-zero entries are relevant:
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
def print_bp_entry(bp): def print_bp_entry(bp):
print("{:<30s} {:032b}".format(bp.name, bp.value)) print("{:<30s} {:032b}".format(bp.name, bp.value))
print_bp_entry(BadPixels.OFFSET_OUT_OF_THRESHOLD) print_bp_entry(BadPixels.OFFSET_OUT_OF_THRESHOLD)
print_bp_entry(BadPixels.NOISE_OUT_OF_THRESHOLD) print_bp_entry(BadPixels.NOISE_OUT_OF_THRESHOLD)
print_bp_entry(BadPixels.OFFSET_NOISE_EVAL_ERROR) print_bp_entry(BadPixels.OFFSET_NOISE_EVAL_ERROR)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
bad_pixels_map = np.zeros(noise_map.shape, np.uint32) bad_pixels_map = np.zeros(noise_map.shape, np.uint32)
def eval_bpidx(d): def eval_bpidx(d):
mdn = np.nanmedian(d, axis=(0, 1, 2))[None, None, None, :] mdn = np.nanmedian(d, axis=(0, 1, 2))[None, None, None, :]
std = np.nanstd(d, axis=(0, 1, 2))[None, None, None, :] std = np.nanstd(d, axis=(0, 1, 2))[None, None, None, :]
idx = (d > badpixel_threshold_sigma*std+mdn) | (d < (-badpixel_threshold_sigma)*std+mdn) idx = (d > badpixel_threshold_sigma*std+mdn) | (d < (-badpixel_threshold_sigma)*std+mdn)
return idx return idx
offset_abs_threshold = np.array(offset_abs_threshold) offset_abs_threshold = np.array(offset_abs_threshold)
bad_pixels_map[eval_bpidx(offset_map)] = BadPixels.OFFSET_OUT_OF_THRESHOLD.value bad_pixels_map[eval_bpidx(offset_map)] = BadPixels.OFFSET_OUT_OF_THRESHOLD.value
bad_pixels_map[~np.isfinite(offset_map)] |= BadPixels.OFFSET_NOISE_EVAL_ERROR.value bad_pixels_map[~np.isfinite(offset_map)] |= BadPixels.OFFSET_NOISE_EVAL_ERROR.value
bad_pixels_map[eval_bpidx(noise_map)] |= BadPixels.NOISE_OUT_OF_THRESHOLD.value bad_pixels_map[eval_bpidx(noise_map)] |= BadPixels.NOISE_OUT_OF_THRESHOLD.value
bad_pixels_map[~np.isfinite(noise_map)] |= BadPixels.OFFSET_NOISE_EVAL_ERROR.value bad_pixels_map[~np.isfinite(noise_map)] |= BadPixels.OFFSET_NOISE_EVAL_ERROR.value
bad_pixels_map[(offset_map < offset_abs_threshold[0][None, None, None, :]) | (offset_map > offset_abs_threshold[1][None, None, None, :])] |= BadPixels.OFFSET_OUT_OF_THRESHOLD.value bad_pixels_map[(offset_map < offset_abs_threshold[0][None, None, None, :]) | (offset_map > offset_abs_threshold[1][None, None, None, :])] |= BadPixels.OFFSET_OUT_OF_THRESHOLD.value
for g_idx in gains: for g_idx in gains:
bad_pixels = bad_pixels_map[:, :, 0, g_idx] bad_pixels = bad_pixels_map[:, :, 0, g_idx]
fn_0 = heatmapPlot(np.swapaxes(bad_pixels, 0, 1), fn_0 = heatmapPlot(np.swapaxes(bad_pixels, 0, 1),
y_label="Row", y_label="Row",
x_label="Column", x_label="Column",
lut_label="Badpixels {} [ADCu]".format(g_name[g_idx]), lut_label="Badpixels {} [ADCu]".format(g_name[g_idx]),
vmin=0) vmin=0)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
## offset ## offset
metadata = ConstantMetaData() metadata = ConstantMetaData()
offset = Constants.jungfrau.Offset() offset = Constants.jungfrau.Offset()
offset.data = np.moveaxis(offset_map, 0, 1) offset.data = np.moveaxis(offset_map, 0, 1)
metadata.calibration_constant = offset metadata.calibration_constant = offset
# set the operating condition # set the operating condition
condition = Conditions.Dark.jungfrau(memory_cells=1, bias_voltage=bias_voltage, condition = Conditions.Dark.jungfrau(memory_cells=1, bias_voltage=bias_voltage,
integration_time=integration_time) integration_time=integration_time)
device = getattr(Detectors, db_module) device = getattr(Detectors, db_module)
metadata.detector_condition = condition metadata.detector_condition = condition
# specify the a version for this constant # specify the a version for this constant
if creation_time is None: if creation_time is None:
metadata.calibration_constant_version = Versions.Now(device=device) metadata.calibration_constant_version = Versions.Now(device=device)
metadata.retrieve(cal_db_interface) metadata.retrieve(cal_db_interface)
else: else:
metadata.calibration_constant_version = Versions.Timespan(device=device, metadata.calibration_constant_version = Versions.Timespan(device=device,
start=creation_time) start=creation_time)
metadata.send(cal_db_interface) metadata.send(cal_db_interface)
## noise ## noise
metadata = ConstantMetaData() metadata = ConstantMetaData()
noise = Constants.jungfrau.Noise() noise = Constants.jungfrau.Noise()
noise.data = np.moveaxis(noise_map, 0, 1) noise.data = np.moveaxis(noise_map, 0, 1)
metadata.calibration_constant = noise metadata.calibration_constant = noise
# set the operating condition # set the operating condition
condition = Conditions.Dark.jungfrau(memory_cells=1, bias_voltage=bias_voltage, condition = Conditions.Dark.jungfrau(memory_cells=1, bias_voltage=bias_voltage,
integration_time=integration_time) integration_time=integration_time)
metadata.detector_condition = condition metadata.detector_condition = condition
# specify the a version for this constant # specify the a version for this constant
if creation_time is None: if creation_time is None:
metadata.calibration_constant_version = Versions.Now(device=device) metadata.calibration_constant_version = Versions.Now(device=device)
metadata.retrieve(cal_db_interface) metadata.retrieve(cal_db_interface)
else: else:
metadata.calibration_constant_version = Versions.Timespan(device=device, metadata.calibration_constant_version = Versions.Timespan(device=device,
start=creation_time) start=creation_time)
metadata.send(cal_db_interface) metadata.send(cal_db_interface)
## bad pixels ## bad pixels
metadata = ConstantMetaData() metadata = ConstantMetaData()
bpix = Constants.jungfrau.BadPixelsDark() bpix = Constants.jungfrau.BadPixelsDark()
bpix.data = np.moveaxis(bad_pixels_map, 0, 1) bpix.data = np.moveaxis(bad_pixels_map, 0, 1)
metadata.calibration_constant = bpix metadata.calibration_constant = bpix
# set the operating condition # set the operating condition
condition = Conditions.Dark.jungfrau(memory_cells=1, bias_voltage=bias_voltage, condition = Conditions.Dark.jungfrau(memory_cells=1, bias_voltage=bias_voltage,
integration_time=integration_time) integration_time=integration_time)
metadata.detector_condition = condition metadata.detector_condition = condition
# specify the a version for this constant # specify the a version for this constant
if creation_time is None: if creation_time is None:
metadata.calibration_constant_version = Versions.Now(device=device) metadata.calibration_constant_version = Versions.Now(device=device)
metadata.retrieve(cal_db_interface) metadata.retrieve(cal_db_interface)
else: else:
metadata.calibration_constant_version = Versions.Timespan(device=device, metadata.calibration_constant_version = Versions.Timespan(device=device,
start=creation_time) start=creation_time)
metadata.send(cal_db_interface) metadata.send(cal_db_interface)
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Verification ## ## Verification ##
For verification we correct the input data with the offset deduced from it. We expect a distribution centered around zero. For verification we correct the input data with the offset deduced from it. We expect a distribution centered around zero.
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
fd_path = fp_path fd_path = fp_path
d_path = path d_path = path
filed_size = 500 filed_size = 500
myRange_D = range(0, 2) myRange_D = range(0, 2)
imageRange = [0, 500] imageRange = [0, 500]
chunkSize = 25 chunkSize = 25
reader = JFChunkReader(filename = fd_path, readFun = jfreader.readData, size = filed_size, chunkSize = chunkSize, reader = JFChunkReader(filename = fd_path, readFun = jfreader.readData, size = filed_size, chunkSize = chunkSize,
path = d_path, image_range=imageRange, pixels_x = sensorSize[0], pixels_y = sensorSize[1], path = d_path, image_range=imageRange, pixels_x = sensorSize[0], pixels_y = sensorSize[1],
x_range = xRange, y_range = yRange, imagesPerChunk=chunkSize, filesRange = myRange_D, x_range = xRange, y_range = yRange, imagesPerChunk=chunkSize, filesRange = myRange_D,
memoryCells=memoryCells, blockSize=blockSize) memoryCells=memoryCells, blockSize=blockSize)
offsetCorr = xcal.OffsetCorrection(shape=sensorSize, offsetMap=offset_map, nCells=memoryCells, offsetCorr = xcal.OffsetCorrection(shape=sensorSize, offsetMap=offset_map, nCells=memoryCells,
cores=cpuCores, parallel=is_parallel, gains=gains, blockSize=blockSize) cores=cpuCores, parallel=is_parallel, gains=gains, blockSize=blockSize)
pixel_data = [] pixel_data = []
for data_raw in reader.readChunks(): for data_raw in reader.readChunks():
images = np.array(data_raw[0]).astype(np.float32) images = np.array(data_raw[0]).astype(np.float32)
gainmaps = np.array(data_raw[1]) gainmaps = np.array(data_raw[1])
trainID = np.array(data_raw[2]) trainID = np.array(data_raw[2])
idxs = non_empty_trains(trainId) idxs = non_empty_trains(trainId)
data_out = offsetCorr.correct(data=images[..., idxs], gainMap=gainmaps[..., idxs]) data_out = offsetCorr.correct(data=images[..., idxs], gainMap=gainmaps[..., idxs])
pixel_data.append(data_out) pixel_data.append(data_out)
pixel_data = np.array(pixel_data) pixel_data = np.array(pixel_data)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
is_log = True is_log = True
h_range = (-500, 500) h_range = (-500, 500)
h_bins = 300 h_bins = 300
f_sp, ax_sp = plt.subplots() f_sp, ax_sp = plt.subplots()
h, e , x, stat = histPlot(ax_sp, pixel_data.flatten(), h, e , x, stat = histPlot(ax_sp, pixel_data.flatten(),
bins=h_bins, bins=h_bins,
range=h_range, range=h_range,
facecolor='b', facecolor='b',
log=is_log, log=is_log,
histotype='stepfilled') histotype='stepfilled')
ax_sp.tick_params(axis='both',which='major',labelsize=15) ax_sp.tick_params(axis='both',which='major',labelsize=15)
ax_sp.set_xlabel('Energy [ADCu]', fontsize=15) ax_sp.set_xlabel('Energy [ADCu]', fontsize=15)
plt.show() plt.show()
``` ```
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment