Skip to content
Snippets Groups Projects
Commit cd4f1ed7 authored by Karim Ahmed's avatar Karim Ahmed
Browse files

Merge branch 'fix/int_hist_pulses' into 'master'

int conversion directly at hist_pulse

See merge request detectors/pycalibration!248
parents f8a2bd22 74e16575
No related branches found
No related tags found
1 merge request!248int conversion directly at hist_pulse
......@@ -125,8 +125,8 @@ class AgipdCorrections:
# avoid list(range(*[0]]))
self.pulses_lst = list(range(*max_pulses)) \
if not (len(max_pulses) == 1 and max_pulses[0] == 0) else max_pulses #noqa
self.min_pulse = int(self.pulses_lst[0])
self.max_pulse = int(self.pulses_lst[-1])
self.min_pulse = self.pulses_lst[0]
self.max_pulse = self.pulses_lst[-1]
self.max_cells = max_cells
self.hist_pulses = 0
self.hists_signal_low = 0
......@@ -1047,10 +1047,11 @@ class AgipdCorrections:
copim[copim < self.median_noise] = np.nan
# avoid 0 hist_pulses, otherwise histogram plot will fail
# hist_pulse must be of a type(int)
if self.max_pulse == 0:
self.hist_pulses = self.max_pulse + 1
self.hist_pulses = int(self.max_pulse + 1)
else:
self.hist_pulses = self.max_pulse
self.hist_pulses = int(self.max_pulse)
bins = (self.bins_signal_low_range, self.hist_pulses)
rnge = [[-50, 1000], [self.min_pulse,
......
%% Cell type:markdown id: tags:
# AGIPD Offline Correction #
Author: European XFEL Detector Group, Version: 1.0
Offline Calibration for the AGIPD Detector
%% Cell type:code id: tags:
``` python
in_folder = "/gpfs/exfel/exp/MID/201931/p900107/raw" # the folder to read data from, required
run = 11 # runs to process, required
out_folder = "/gpfs/exfel/data/scratch/ahmedk/test/AGIPD_Corr" # the folder to output to, required
calfile = "/gpfs/exfel/data/scratch/haufs/agipd_on_demand/agipd_store_mid.h5" # path to calibration file. Leave empty if all data should come from DB
sequences = [-1] # sequences to correct, set to -1 for all, range allowed
mem_cells = 0 # number of memory cells used, set to 0 to automatically infer
interlaced = False # whether data is in interlaced layout
overwrite = True # set to True if existing data should be overwritten
relative_gain = False # do relative gain correction
cluster_profile = "noDB"
max_pulses = [0, 500, 1] # range list [st, end, step] of maximum pulse indices. 3 allowed maximum list input elements.
local_input = False
bias_voltage = 300
cal_db_interface = "tcp://max-exfl016:8015#8045" # the database interface to use
use_dir_creation_date = True # use the creation data of the input dir for database queries
sequences_per_node = 2 # number of sequence files per cluster node if run as slurm job, set to 0 to not run SLURM parallel
photon_energy = 9.2 # photon energy in keV
index_v = 2 # version of RAW index type
nodb = False # if set only file-based constants will be used
blc_noise_threshold = 5000 # above this mean signal intensity now baseline correction via noise is attempted
blc_hist = False # if set, base line correction via histogram matching is attempted
corr_asic_diag = False # if set, diagonal drop offs on ASICs are correted
melt_snow = "" # if set to "none" snowy pixels are identified and resolved to NaN, if set to "interpolate", the value is interpolated from neighbouring pixels
cal_db_timeout = 30000 # in milli seconds
max_cells_db_dark = 0 # set to a value different than 0 to use this value for dark data DB queries
max_cells_db = 0 # set to a value different than 0 to use this value for DB queries
chunk_size_idim = 1 # chunking size of imaging dimension, adjust if user software is sensitive to this.
creation_date_offset = "00:00:00" # add an offset to creation date, e.g. to get different constants
instrument = "MID" # the instrument the detector is installed at, required
force_hg_if_below = 1000 # set to a value other than 0 to force a pixel into high gain if it's high gain offset subtracted value is below this threshold
force_mg_if_below = 1000 # set to a value other than 0 to force a pixel into medium gain if it's medium gain offset subtracted value is below this threshold
mask_noisy_adc = 0.25 # set to a value other than 0 and below 1 to mask entire ADC if fraction of noisy pixels is above
acq_rate = 0. # the detector acquisition rate, use 0 to try to auto-determine
# Correction Booleans
only_offset = False # Apply only Offset correction.
pc_corr = False # Apply only Pulse Capictor correction.
ff_corr = False # Apply only Flat Field correction.
blc_noise = False # if set, baseline correction via noise peak location is attempted
match_asics = False # if set, inner ASIC borders are matched to the same signal level
adjust_mg_baseline = False # adjust medium gain baseline to match highest high gain value
dont_zero_nans = False # do not zero NaN values in corrected data
dont_zero_orange = False # do not zero very negative and very large values
def balance_sequences(in_folder, run, sequences, sequences_per_node):
import glob
import re
import numpy as np
if sequences[0] == -1:
sequence_files = glob.glob("{}/r{:04d}/*-S*.h5".format(in_folder, run))
seq_nums = set()
for sf in sequence_files:
seqnum = re.findall(r".*-S([0-9]*).h5", sf)[0]
seq_nums.add(int(seqnum))
seq_nums -= set(sequences)
else:
seq_nums = set(sequences)
nsplits = len(seq_nums)//sequences_per_node+1
while nsplits > 32:
sequences_per_node += 1
nsplits = len(seq_nums)//sequences_per_node+1
print("Changed to {} sequences per node to have a maximum of 8 concurrent jobs".format(sequences_per_node))
return [l.tolist() for l in np.array_split(list(seq_nums), nsplits) if l.size > 0]
```
%% Cell type:code id: tags:
``` python
# Fill dictionaries comprising bools and arguments for correction and data analysis
# Here the herarichy and dependability for correction booleans are defined
corr_bools = {}
# offset is at the bottom of AGIPD correction pyramid.
corr_bools["only_offset"] = only_offset
# Dont apply any corrections if only_offset is requested
if not only_offset:
# Apply PC correction only if requested
if pc_corr:
corr_bools["pc_corr"] = pc_corr
# Apply FF correction only if requested
if ff_corr:
corr_bools["ff_corr"] = ff_corr
corr_bools["adjust_mg_baseline"] = adjust_mg_baseline
corr_bools["do_rel_gain"] = relative_gain
corr_bools["blc_noise"] = blc_noise
corr_bools["match_asics"] = match_asics
corr_bools["corr_asic_diag"] = corr_asic_diag
corr_bools["dont_zero_nans"] = dont_zero_nans
corr_bools["dont_zero_orange"] = dont_zero_orange
# Here the herarichy and dependability for data analysis booleans and arguments are defined
data_analysis_parms = {}
```
%% Cell type:code id: tags:
``` python
import sys
from collections import OrderedDict
# make sure a cluster is running with ipcluster start --n=32, give it a while to start
import os
import h5py
import numpy as np
import matplotlib
matplotlib.use("agg")
import matplotlib.pyplot as plt
from ipyparallel import Client
print("Connecting to profile {}".format(cluster_profile))
view = Client(profile=cluster_profile)[:]
view.use_dill()
from iCalibrationDB import ConstantMetaData, Constants, Conditions, Detectors, Versions
from cal_tools.tools import (gain_map_files, parse_runs, run_prop_seq_from_path, get_notebook_name,
get_dir_creation_date, get_constant_from_db)
from dateutil import parser
from datetime import timedelta
il_mode = interlaced
max_cells = mem_cells
gains = np.arange(3)
cells = np.arange(max_cells)
creation_time = None
if use_dir_creation_date:
creation_time = get_dir_creation_date(in_folder, run)
offset = parser.parse(creation_date_offset)
delta = timedelta(hours=offset.hour, minutes=offset.minute, seconds=offset.second)
creation_time += delta
print("Using {} as creation time".format(creation_time))
in_folder = "{}/r{:04d}".format(in_folder, run)
print("Working in IL Mode: {}. Actual cells in use are: {}".format(il_mode, max_cells))
if sequences[0] == -1:
sequences = None
QUADRANTS = 4
MODULES_PER_QUAD = 4
DET_FILE_INSET = "AGIPD"
CHUNK_SIZE = 250
MAX_PAR = 32
if in_folder[-1] == "/":
in_folder = in_folder[:-1]
print("Outputting to {}".format(out_folder))
if not os.path.exists(out_folder):
os.makedirs(out_folder)
elif not overwrite:
raise AttributeError("Output path exists! Exiting")
import warnings
warnings.filterwarnings('ignore')
from cal_tools.agipdlib import SnowResolution
melt_snow = False if melt_snow == "" else SnowResolution(melt_snow)
special_opts = blc_noise_threshold, blc_hist, melt_snow
loc = None
if instrument == "SPB":
loc = "SPB_DET_AGIPD1M-1"
dinstance = "AGIPD1M1"
else:
loc = "MID_DET_AGIPD1M-1"
dinstance = "AGIPD1M2"
print("Detector in use is {}".format(loc))
```
%% Cell type:code id: tags:
``` python
def combine_stack(d, sdim):
combined = np.zeros((sdim, 2048, 2048))
combined[...] = np.nan
dy = 0
for i in range(16):
try:
if i < 8:
dx = -512
if i > 3:
dx -= 25
mx = 1
my = i % 8
combined[:, my*128+dy:(my+1)*128+dy,
mx*512-dx:(mx+1)*512-dx] = np.rollaxis(d[i],2,1)[:,:,::-1]
dy += 30
if i == 3:
dy += 30
elif i < 12:
dx = 80 - 50
if i == 8:
dy = 4*30 + 30 +50 -30
mx = 1
my = i % 8 +4
combined[:, my*128+dy:(my+1)*128+dy,
mx*512-dx:(mx+1)*512-dx] = np.rollaxis(d[i],2,1)[:,::-1,:]
dy += 30
else:
dx = 100 - 50
if i == 11:
dy = 20
mx = 1
my = i - 14
combined[:, my*128+dy:(my+1)*128+dy,
mx*512-dx:(mx+1)*512-dx] = np.rollaxis(d[i],2,1)[:,::-1,:]
dy += 30
except:
continue
return combined
```
%% Cell type:code id: tags:
``` python
# set everything up filewise
from queue import Queue
from collections import OrderedDict
def map_modules_from_files(filelist):
module_files = OrderedDict()
mod_ids = OrderedDict()
total_sequences = 0
sequences_qm = {}
one_module = None
for quadrant in range(0, QUADRANTS):
for module in range(0, MODULES_PER_QUAD):
name = "Q{}M{}".format(quadrant + 1, module + 1)
module_files[name] = Queue()
num = quadrant * 4 + module
mod_ids[name] = num
file_infix = "{}{:02d}".format(DET_FILE_INSET, num)
sequences_qm[name] = 0
for file in filelist:
if file_infix in file:
if not one_module:
one_module = file, num
module_files[name].put(file)
total_sequences += 1
sequences_qm[name] += 1
return module_files, mod_ids, total_sequences, sequences_qm, one_module
dirlist = sorted(os.listdir(in_folder))
file_list = []
for entry in dirlist:
#only h5 file
abs_entry = "{}/{}".format(in_folder, entry)
if os.path.isfile(abs_entry) and os.path.splitext(abs_entry)[1] == ".h5":
if sequences is None:
file_list.append(abs_entry)
else:
for seq in sequences:
if "{:05d}.h5".format(seq) in abs_entry:
file_list.append(os.path.abspath(abs_entry))
mapped_files, mod_ids, total_sequences, sequences_qm, one_module = map_modules_from_files(file_list)
MAX_PAR = min(MAX_PAR, total_sequences)
```
%% Cell type:markdown id: tags:
## Processed Files ##
%% Cell type:code id: tags:
``` python
import copy
from IPython.display import HTML, display, Markdown, Latex
import tabulate
print("Processing a total of {} sequence files in chunks of {}".format(total_sequences, MAX_PAR))
table = []
mfc = copy.copy(mapped_files)
ti = 0
for k, files in mfc.items():
i = 0
while not files.empty():
f = files.get()
if i == 0:
table.append((ti, k, i, f))
else:
table.append((ti, "", i, f))
i += 1
ti += 1
md = display(Latex(tabulate.tabulate(table, tablefmt='latex', headers=["#", "module", "# module", "file"])))
# restore the queue
mapped_files, mod_ids, total_sequences, sequences_qm, one_module = map_modules_from_files(file_list)
```
%% Cell type:code id: tags:
``` python
import copy
from functools import partial
def correct_module(max_cells, index_v, CHUNK_SIZE, total_sequences, sequences_qm,
bins_gain_vs_signal, bins_signal_low_range, bins_signal_high_range,
bins_dig_gain_vs_signal, max_pulses, dbparms, fileparms, nodb, chunk_size_idim,
special_opts, il_mode, loc, dinstance, force_hg_if_below, force_mg_if_below,
mask_noisy_adc, acq_rate, corr_bools, inp):
print("foo")
import numpy as np
import copy
import h5py
import socket
from datetime import datetime
import re
import os
from influxdb import InfluxDBClient
from cal_tools.enums import BadPixels
from cal_tools.agipdlib import AgipdCorrections, SnowResolution
from cal_tools.agipdlib import get_num_cells, get_acq_rate
#client = InfluxDBClient('exflqr18318', 8086, 'root', 'root', 'calstats')
def create_influx_entry(run, proposal, qm, sequence, filesize, chunksize,
total_sequences, success, runtime, reason=""):
return {
"measurement": "run_correction",
"tags": {
"host": socket.gethostname(),
"run": run,
"proposal": proposal,
"mem_cells": max_cells,
"sequence": sequence,
"module": qm,
"filesize": filesize,
"chunksize": chunksize,
"total_sequences": total_sequences,
"sequences_module": sequences_qm[qm],
"detector": "AGIPD",
"instrument": "SPB",
},
"time": datetime.utcnow().isoformat(),
"fields": {
"success": success,
"reason": reason,
"runtime": runtime,
}
}
hists_signal_low = None
hists_signal_high = None
hists_gain_vs_signal = None
hists_dig_gain_vs_signal = None
hist_pulses = None
low_edges = None
high_edges = None
signal_edges = None
dig_signal_edges = None
gain_stats = 0
when = None
err = None
try:
start = datetime.now()
success = True
reason = ""
filename, filename_out, channel, qm = inp
print("Have input")
if max_cells == 0:
max_cells = get_num_cells(filename, loc, channel)
if max_cells is None:
raise ValueError("No raw images found for {}".format(qm))
else:
cells = np.arange(max_cells)
if acq_rate == 0.:
acq_rate = get_acq_rate(filename, loc, channel)
else:
acq_rate = None
if dbparms[2] == 0:
dbparms[2] = max_cells
if dbparms[5] == 0:
dbparms[5] = dbparms[2]
print("Set memory cells to {}".format(max_cells))
print("Set acquistion rate cells to {} MHz".format(acq_rate))
infile = h5py.File(filename, "r", driver="core")
outfile = h5py.File(filename_out, "w")
try:
agipd_corr = AgipdCorrections(infile, outfile, max_cells, channel, max_pulses,
bins_gain_vs_signal, bins_signal_low_range,
bins_signal_high_range, bins_dig_gain_vs_signal,
chunk_size_idim=chunk_size_idim,
il_mode=il_mode, raw_fmt_version=index_v,
h5_data_path="INSTRUMENT/{}/DET/{{}}CH0:xtdf/".format(loc),
h5_index_path="INDEX/{}/DET/{{}}CH0:xtdf/".format(loc),
cal_det_instance=dinstance, force_hg_if_below=force_hg_if_below,
force_mg_if_below=force_mg_if_below, mask_noisy_adc=mask_noisy_adc,
acquisition_rate=acq_rate, corr_bools=corr_bools)
blc_noise_threshold, blc_hist, melt_snow = special_opts
if not corr_bools["only_offset"]:
blc_hist = False
melt_snow = False
agipd_corr.baseline_corr_noise_threshold = blc_noise_threshold
agipd_corr.baseline_corr_using_hmatch = blc_hist
agipd_corr.melt_snow = melt_snow
try:
agipd_corr.get_valid_image_idx()
except IOError:
return
if not nodb:
when = agipd_corr.initialize_from_db(dbparms, qm, only_dark=(fileparms != ""))
if fileparms != "" and not corr_bools["only_offset"]:
agipd_corr.initialize_from_file(fileparms, qm, with_dark=nodb)
print("Initialized constants")
for irange in agipd_corr.get_iteration_range():
agipd_corr.correct_agipd(irange)
print("Iterated")
print("All iterations are finished")
hists, edges = agipd_corr.get_histograms()
hists_signal_low, hists_signal_high, hists_gain_vs_signal, hists_dig_gain_vs_signal, hist_pulses = hists
low_edges, high_edges, signal_edges, dig_signal_edges = edges
gain_stats = np.array(agipd_corr.gain_stats)
finally:
outfile.close()
infile.close()
print("Closed files")
except Exception as e:
print(e)
err = e
success = False
reason = "Error"
finally:
run = re.findall(r'.*r([0-9]{4}).*', filename)[0]
proposal = re.findall(r'.*p([0-9]{6}).*', filename)[0]
sequence = re.findall(r'.*S([0-9]{5}).*', filename)[0]
filesize = os.path.getsize(filename)
duration = (datetime.now()-start).total_seconds()
#influx = create_influx_entry(run, proposal, qm, sequence, filesize, CHUNK_SIZE, total_sequences, success, duration, reason)
#client.write_points([influx])
return (hists_signal_low, hists_signal_high, hists_gain_vs_signal, hists_dig_gain_vs_signal, hist_pulses,
low_edges, high_edges, signal_edges, dig_signal_edges, gain_stats, max_cells, when, qm, err)
done = False
first_files = []
inp = []
left = total_sequences
# Display Information about the selected pulses indices for correction.
pulses_lst = list(range(*max_pulses)) if not (len(max_pulses)==1 and max_pulses[0]==0) else max_pulses
try:
if len(pulses_lst) > 1:
print("A range of {} pulse indices is selected: from {} to {} with a step of {}"
.format(len(pulses_lst), pulses_lst[0] , pulses_lst[-1] + (pulses_lst[1] - pulses_lst[0]),
pulses_lst[1] - pulses_lst[0]))
else:
print("one pulse is selected: a pulse of idx {}".format(pulses_lst[0]))
except Exception as e:
raise ValueError('max_pulses input Error: {}'.format(e))
bins_gain_vs_signal = (100, 100)
bins_signal_low_range = 100
bins_signal_high_range = 100
bins_dig_gain_vs_signal = (100, 4)
hists_gain_vs_signal = np.zeros((bins_gain_vs_signal), np.float64)
hists_dig_gain_vs_signal = np.zeros((bins_dig_gain_vs_signal), np.float64)
gain_stats = 0
low_edges, high_edges, signal_edges, dig_signal_edges = None, None, None, None
dbparms = [cal_db_interface, creation_time, max_cells_db, bias_voltage,
photon_energy, max_cells_db_dark, cal_db_timeout]
fileparms = calfile
all_cells = []
whens = []
errors = []
while not done:
dones = []
first = True
for i in range(16):
qm = "Q{}M{}".format(i//4 +1, i % 4 + 1)
if qm in mapped_files and not mapped_files[qm].empty():
fname_in = str(mapped_files[qm].get())
dones.append(mapped_files[qm].empty())
else:
print("Skipping {}".format(qm))
first_files.append((None, None))
continue
fout = os.path.abspath("{}/{}".format(out_folder, (os.path.split(fname_in)[-1]).replace("RAW", "CORR")))
if first:
first_files.append((fname_in, fout))
inp.append((fname_in, fout, i, qm))
first = False
if len(inp) >= min(MAX_PAR, left):
print("Running {} tasks parallel".format(len(inp)))
p = partial(correct_module, max_cells, index_v, CHUNK_SIZE, total_sequences,
sequences_qm, bins_gain_vs_signal, bins_signal_low_range, bins_signal_high_range,
bins_dig_gain_vs_signal, max_pulses, dbparms, fileparms, nodb, chunk_size_idim,
special_opts, il_mode, loc, dinstance, force_hg_if_below, force_mg_if_below,
mask_noisy_adc, acq_rate, corr_bools)
r = view.map_sync(p, inp)
#r = list(map(p, inp))
inp = []
left -= MAX_PAR
init_hist = False
for rr in r:
if rr is not None:
hl, hh, hg, hdg, hp, low_edges, high_edges, signal_edges, dig_signal_edges, gs, cells, when, qm, err = rr
all_cells.append(cells)
whens.append((qm, when))
errors.append(err)
if not init_hist:
# Validate hp to be int not None.
if not init_hist and hp is not None:
hists_signal_low = np.zeros((bins_signal_low_range, hp), np.float64)
hists_signal_high = np.zeros((bins_signal_low_range, hp), np.float64)
init_hist = True
if hl is not None: # any one being None will also make the others None
hists_signal_low += hl.astype(np.float64)
hists_signal_high += hh.astype(np.float64)
hists_gain_vs_signal += hg.astype(np.float64)
hists_dig_gain_vs_signal += hdg.astype(np.float64)
gain_stats += gs
done = all(dones)
```
%% Cell type:code id: tags:
``` python
print("Constants were injected on: ")
to_store = []
for i, (qm, when) in enumerate(whens):
print(qm)
line = [qm]
# If correction is crashed
if errors[i] is not None:
print("Error: {}".format(errors[i]) )
else:
for key, item in when.items():
if hasattr(item, 'strftime'):
item = item.strftime('%y-%m-%d %H:%M')
when[key] = item
print('{:.<12s}'.format(key), item)
# Store few time stamps if exists
# Add NA to keep array structure
for key in ['offset', 'slopesPC', 'slopesFF']:
if when and key in when:
line.append(when[key])
else:
if errors[i] is not None:
line.append('Err')
else:
line.append('NA')
to_store.append(line)
np.savetxt("{}/tmp_const_beginat_S{:05d}.csv".format(out_folder, sequences[0]),
np.array(to_store).astype(str), fmt='%s', delimiter = ',')
```
%% Cell type:code id: tags:
``` python
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import numpy as np
%matplotlib inline
def do_3d_plot(data, edges, x_axis, y_axis):
fig = plt.figure(figsize=(10,10))
ax = fig.gca(projection='3d')
# Make data.
X = edges[0][:-1]
Y = edges[1][:-1]
X, Y = np.meshgrid(X, Y)
Z = data.T
# Plot the surface.
surf = ax.plot_surface(X, Y, Z, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
ax.set_xlabel(x_axis)
ax.set_ylabel(y_axis)
ax.set_zlabel("Counts")
```
%% Cell type:markdown id: tags:
## Signal vs. Analogue Gain ##
The following plot shows plots signal vs. gain for the first 128 images.
%% Cell type:code id: tags:
``` python
do_3d_plot(hists_gain_vs_signal, signal_edges, "Signal (ADU)", "Analogue gain (ADU)")
if signal_edges is not None:
do_3d_plot(hists_gain_vs_signal, signal_edges, "Signal (ADU)", "Analogue gain (ADU)")
```
%% Cell type:code id: tags:
``` python
def do_2d_plot(data, edges, y_axis, x_axis):
from matplotlib.colors import LogNorm
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111)
extent = [np.min(edges[1]), np.max(edges[1]),np.min(edges[0]), np.max(edges[0])]
im = ax.imshow(data[::-1,:], extent=extent, aspect="auto", norm=LogNorm(vmin=1, vmax=np.max(data)))
ax.set_xlabel(x_axis)
ax.set_ylabel(y_axis)
cb = fig.colorbar(im)
cb.set_label("Counts")
do_2d_plot(hists_gain_vs_signal, signal_edges, "Signal (ADU)", "Gain Value (ADU)")
if signal_edges is not None:
do_2d_plot(hists_gain_vs_signal, signal_edges, "Signal (ADU)", "Gain Value (ADU)")
```
%% Cell type:markdown id: tags:
## Signal vs. Digitized Gain ##
The following plot shows plots signal vs. digitized gain for the first 128 images.
%% Cell type:code id: tags:
``` python
do_2d_plot(hists_dig_gain_vs_signal, dig_signal_edges, "Signal (ADU)", "Gain Bit Value")
if dig_signal_edges is not None:
do_2d_plot(hists_dig_gain_vs_signal, dig_signal_edges, "Signal (ADU)", "Gain Bit Value")
```
%% Cell type:code id: tags:
``` python
fig, ax = plt.subplots()
ax.pie(gain_stats, labels=["high", "medium", "low"], autopct='%1.2f%%',
shadow=True, startangle=27)
a = ax.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
if gain_stats != 0:
ax.pie(gain_stats, labels=["high", "medium", "low"], autopct='%1.2f%%',
shadow=True, startangle=27)
a = ax.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
```
%% Cell type:markdown id: tags:
## Mean Intensity per Pulse ##
The following plots show the mean signal for each pulse in a detailed and expanded intensity region.
%% Cell type:code id: tags:
``` python
do_3d_plot(hists_signal_low, low_edges, "Signal (ADU)", "Pulse id")
do_2d_plot(hists_signal_low, low_edges, "Signal (ADU)", "Pulse id")
do_3d_plot(hists_signal_high, high_edges, "Signal (ADU)", "Pulse id")
do_2d_plot(hists_signal_high, high_edges, "Signal (ADU)", "Pulse id")
if low_edges is not None:
do_3d_plot(hists_signal_low, low_edges, "Signal (ADU)", "Pulse id")
do_2d_plot(hists_signal_low, low_edges, "Signal (ADU)", "Pulse id")
if high_edges is not None:
do_3d_plot(hists_signal_high, high_edges, "Signal (ADU)", "Pulse id")
do_2d_plot(hists_signal_high, high_edges, "Signal (ADU)", "Pulse id")
```
%% Cell type:code id: tags:
``` python
corrected = []
raw = []
gains = []
mask = []
cell_fac = 1
first_idx = 0
last_idx = cell_fac*176+first_idx
pulse_ids = []
train_ids = []
for i, ff in enumerate(first_files[:16]):
try:
rf, cf = ff
#print(cf, i)
if rf is None:
raise Exception("File not present")
#print(rf)
infile = h5py.File(rf, "r")
#print("/INSTRUMENT/{}_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/data".format(instrument, i))
raw.append(np.array(infile["/INSTRUMENT/{}_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/data".format(instrument, i)][first_idx:last_idx,0,...]))
infile.close()
infile = h5py.File(cf, "r")
#print("/INSTRUMENT/SPB_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/data".format(i))
corrected.append(np.array(infile["/INSTRUMENT/{}_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/data".format(instrument, i)][first_idx:last_idx,...]))
gains.append(np.array(infile["/INSTRUMENT/{}_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/gain".format(instrument, i)][first_idx:last_idx,...]))
mask.append(np.array(infile["/INSTRUMENT/{}_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/mask".format(instrument, i)][first_idx:last_idx,...]))
pulse_ids.append(np.squeeze(infile["/INSTRUMENT/{}_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/pulseId".format(instrument, i)][first_idx:last_idx,...]))
train_ids.append(np.squeeze(infile["/INSTRUMENT/{}_DET_AGIPD1M-1/DET/{}CH0:xtdf/image/trainId".format(instrument, i)][first_idx:last_idx,...]))
infile.close()
except Exception as e:
print(e)
corrected.append(np.zeros((last_idx-first_idx, 512, 128)))
gains.append(np.zeros((last_idx-first_idx, 512, 128)))
mask.append(np.zeros((last_idx-first_idx, 512, 128)))
raw.append(np.zeros((last_idx-first_idx, 512, 128)))
```
%% Cell type:code id: tags:
``` python
domask = False
if domask:
for i, c in enumerate(corrected):
c[mask[i] != 0] = 0
#c[pats[i] < 200] = 0
#c[np.abs(pats[i]) == 1000] = np.abs(c[np.abs(pats[i]) == 1000])
combined = combine_stack(corrected, last_idx-first_idx)
combined_raw = combine_stack(raw, last_idx-first_idx)
combined_g = combine_stack(gains, last_idx-first_idx)
combined_mask = combine_stack(mask, last_idx-first_idx)
```
%% Cell type:markdown id: tags:
### Mean RAW Preview ###
The per pixel mean of the first 128 images of the RAW data
%% Cell type:code id: tags:
``` python
%matplotlib inline
fig = plt.figure(figsize=(20,10))
ax = fig.add_subplot(111)
im = ax.imshow(np.mean(combined_raw[:,:1300,400:1600],axis=0),
vmin=min(0.75*np.median(combined_raw[combined_raw > 0]), 4000),
vmax=max(1.5*np.median(combined_raw[combined_raw > 0]), 7000), cmap="jet")
cb = fig.colorbar(im, ax=ax)
```
%% Cell type:markdown id: tags:
### Single Shot Preview ###
A single shot image from cell 12 of the first train
%% Cell type:code id: tags:
``` python
fig = plt.figure(figsize=(20,10))
ax = fig.add_subplot(111)
dim = combined[1,:1300,400:1600]
im = ax.imshow(dim, vmin=-0, vmax=max(20*np.median(dim[dim > 0]), 100), cmap="jet", interpolation="nearest")
cb = fig.colorbar(im, ax=ax)
```
%% Cell type:code id: tags:
``` python
fig = plt.figure(figsize=(20,10))
ax = fig.add_subplot(111)
h = ax.hist(dim.flatten(), bins=1000, range=(0, 2000))
```
%% Cell type:markdown id: tags:
### Mean CORRECTED Preview ###
The per pixel mean of the first 128 images of the CORRECTED data
%% Cell type:code id: tags:
``` python
fig = plt.figure(figsize=(20,10))
ax = fig.add_subplot(111)
im = ax.imshow(np.mean(combined[:,:1300,400:1600], axis=0), vmin=-50,
vmax=max(100*np.median(combined[combined > 0]), 100), cmap="jet", interpolation="nearest")
cb = fig.colorbar(im, ax=ax)
```
%% Cell type:code id: tags:
``` python
fig = plt.figure(figsize=(20,10))
ax = fig.add_subplot(111)
combined[combined <= 0] = 0
h = ax.hist(combined.flatten(), bins=1000, range=(-50, 1000), log=True)
```
%% Cell type:code id: tags:
``` python
#np.save('/gpfs/exfel/data/scratch/haufs/agipd_hist/prop_off_pcor_splits.npy', h)
```
%% Cell type:markdown id: tags:
### Maximum GAIN Preview ###
The per pixel maximum of the first 128 images of the digitized GAIN data
%% Cell type:code id: tags:
``` python
fig = plt.figure(figsize=(20,10))
ax = fig.add_subplot(111)
im = ax.imshow(np.max(combined_g[1,:1300,400:1600][None,...], axis=0), vmin=0,
vmax=3, cmap="jet")
cb = fig.colorbar(im, ax=ax)
```
%% Cell type:markdown id: tags:
## Bad Pixels ##
The mask contains dedicated entries for all pixels and memory cells as well as all three gains stages. Each mask entry is encoded in 32 bits as:
%% Cell type:code id: tags:
``` python
from cal_tools.enums import BadPixels
from IPython.display import HTML, display, Markdown, Latex
import tabulate
table = []
for item in BadPixels:
table.append((item.name, "{:016b}".format(item.value)))
md = display(Latex(tabulate.tabulate(table, tablefmt='latex', headers=["Bad pixel type", "Bit mask"])))
```
%% Cell type:markdown id: tags:
### Single Shot Bad Pixels ###
A single shot bad pixel map from cell 4 of the first train
%% Cell type:code id: tags:
``` python
fig = plt.figure(figsize=(20,10))
ax = fig.add_subplot(111)
im = ax.imshow(np.log2(combined_mask[10,:1300,400:1600]), vmin=0,
vmax=32, cmap="jet")
cb = fig.colorbar(im, ax=ax)
```
%% Cell type:markdown id: tags:
### Full Train Bad Pixels ###
%% Cell type:code id: tags:
``` python
fig = plt.figure(figsize=(20,10))
ax = fig.add_subplot(111)
im = ax.imshow(np.log2(np.max(combined_mask[:,:1300,400:1600], axis=0)), vmin=0,
vmax=32, cmap="jet")
cb = fig.colorbar(im, ax=ax)
```
%% Cell type:markdown id: tags:
### Full Train Bad Pixels - Only Dark Char. Related ###
%% Cell type:code id: tags:
``` python
fig = plt.figure(figsize=(20,10))
ax = fig.add_subplot(111)
im = ax.imshow(np.max((combined_mask.astype(np.uint32)[:,:1300,400:1600] & BadPixels.NOISY_ADC.value) != 0, axis=0), vmin=0,
vmax=1, cmap="jet")
cb = fig.colorbar(im, ax=ax)
```
%% Cell type:code id: tags:
``` python
from cal_tools.enums import BadPixels
fig = plt.figure(figsize=(20,10))
ax = fig.add_subplot(111)
cm = combined_mask[:,:1300,400:1600]
cm[cm > BadPixels.NO_DARK_DATA.value] = 0
im = ax.imshow(np.log2(np.max(cm, axis=0)), vmin=0,
vmax=32, cmap="jet")
cb = fig.colorbar(im, ax=ax)
```
%% Cell type:code id: tags:
``` python
```
%% Cell type:code id: tags:
``` python
```
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment