Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • calibration/pycalibration
1 result
Show changes
Commits on Source (6)
Showing
with 244 additions and 180 deletions
isort:
stage: test
script:
- python3 -m pip install --user isort==5.6.4
- isort --diff **/*.py && isort -c **/*.py
pytest:
stage: test
script:
- python3 -m pip install --user -r requirements.txt
- python3 -m pip install --user pytest
- pytest -vv tests/test_*
import traceback
from pathlib import Path
from typing import Optional, Tuple
import h5py
import numpy as np
import sharedmem
import traceback
from cal_tools.agipdutils import *
from cal_tools.cython import agipdalgs as calgs
from cal_tools.enums import BadPixels, SnowResolution
from cal_tools.tools import get_constant_from_db_and_time
from iCalibrationDB import Constants, Conditions, Detectors
from iCalibrationDB import Conditions, Constants, Detectors
from cal_tools.cython import agipdalgs as calgs
def get_num_cells(fname, loc, module):
......@@ -242,7 +242,8 @@ class AgipdCorrections:
self.corr_bools = corr_bools
else:
bools = list(set(corr_bools) - set(tot_corr_bools))
raise Exception(f'Correction Booleans: {bools} are not available!') # noqa
raise Exception('Correction Booleans: '
f'{bools} are not available!')
# Flags allowing for pulse capacitor constant retrieval.
self.pc_bools = [self.corr_bools.get("rel_gain"),
......@@ -853,15 +854,15 @@ class AgipdCorrections:
# save INDEX contents (first, count) in CORR files
outfile.create_dataset(idx_base + "{}/first".format(do),
fidxv.shape,
dtype=fidxv.dtype,
data=fidxv,
fletcher32=True)
fidxv.shape,
dtype=fidxv.dtype,
data=fidxv,
fletcher32=True)
outfile.create_dataset(idx_base + "{}/count".format(do),
cntsv.shape,
dtype=cntsv.dtype,
data=cntsv,
fletcher32=True)
cntsv.shape,
dtype=cntsv.dtype,
data=cntsv,
fletcher32=True)
def retrieve_constant_and_time(self, const_dict, device,
cal_db_interface, creation_time):
......@@ -899,7 +900,7 @@ class AgipdCorrections:
print_once=0)
return cons_data, when
def init_constants(self, cons_data, module_idx):
def init_constants(self, cons_data, when, module_idx):
"""
For CI derived gain, a mean multiplication factor of 4.48 compared
to medium gain is used, as no reliable CI data for all memory cells
......@@ -938,6 +939,8 @@ class AgipdCorrections:
rel_low gain = _rel_medium gain * 4.48
:param cons_data: A dictionary for each retrieved constant value.
:param when: A dictionary for the creation time
of each retrieved constant.
:param module_idx: A module_idx index
:return:
"""
......@@ -954,114 +957,129 @@ class AgipdCorrections:
bpixels = cons_data["BadPixelsDark"].astype(np.uint32)
if self.corr_bools.get("xray_corr"):
bpixels |= cons_data["BadPixelsFF"].astype(np.uint32)[..., :bpixels.shape[2], None] # noqa
slopesFF = cons_data["SlopesFF"]
# This could be used for backward compatibility
# for very old SlopesFF constants
if len(slopesFF.shape) == 4:
slopesFF = slopesFF[..., 0]
# This is for backward compatability for old FF constants
# (128, 512, mem_cells)
if slopesFF.shape[-1] == 2:
xray_cor = np.squeeze(slopesFF[...,0])
xray_cor_med = np.nanmedian(xray_cor)
xray_cor[np.isnan(xray_cor)]= xray_cor_med
xray_cor[(xray_cor<0.8) | (xray_cor>1.2)] = xray_cor_med
xray_cor = np.dstack([xray_cor]*self.max_cells)
if when["BadPixelsFF"]:
bpixels |= cons_data["BadPixelsFF"].astype(np.uint32)[...,
:bpixels.shape[2], # noqa
None]
if when["SlopesFF"]: # Checking if constant was retrieved
slopesFF = cons_data["SlopesFF"]
# This could be used for backward compatibility
# for very old SlopesFF constants
if len(slopesFF.shape) == 4:
slopesFF = slopesFF[..., 0]
# This is for backward compatability for old FF constants
# (128, 512, mem_cells)
if slopesFF.shape[-1] == 2:
xray_cor = np.squeeze(slopesFF[...,0])
xray_cor_med = np.nanmedian(xray_cor)
xray_cor[np.isnan(xray_cor)]= xray_cor_med
xray_cor[(xray_cor<0.8) | (xray_cor>1.2)] = xray_cor_med
xray_cor = np.dstack([xray_cor]*self.max_cells)
else:
# Memory cell resolved xray_cor correction
xray_cor = slopesFF # (128, 512, mem_cells)
if xray_cor.shape[-1] < self.max_cells:
# In case of having new constant with less memory cells,
# due to lack of enough FF data or during development.
# xray_cor should be expanded by last memory cell.
xray_cor = np.dstack(xray_cor,
np.dstack([xray_cor[..., -1]]
* (self.max_cells - xray_cor.shape[-1]))) # noqa
# This is already done for old constants,
# but new constant is absolute and we need to have
# global ADU output for the moment
xray_cor /= self.ff_gain
else:
# Memory cell resolved xray_cor correction
xray_cor = slopesFF # (128, 512, mem_cells)
if xray_cor.shape[-1] < self.max_cells:
# In case of having new constant with less memory cells,
# due to lack of enough FF data or during development.
# xray_cor should be expanded by last memory cell.
xray_cor = np.dstack(xray_cor,
np.dstack([xray_cor[..., -1]]
* (self.max_cells - xray_cor.shape[-1]))) # noqa
# This is already done for old constants,
# but new constant is absolute and we need to have
# global ADU output for the moment
xray_cor /= self.ff_gain
xray_cor = np.ones((128, 512, self.max_cells), np.float32)
self.xray_cor[module_idx][...] = xray_cor.transpose()[...]
# add additional bad pixel information
if any(self.pc_bools):
bppc = np.moveaxis(cons_data["BadPixelsPC"].astype(np.uint32),
0, 2)
bpixels |= bppc[..., :bpixels.shape[2], None]
slopesPC = cons_data["SlopesPC"].astype(np.float32)
# This will handle some historical data in a different format
# constant dimension injected first
if slopesPC.shape[0] == 10 or slopesPC.shape[0] == 11:
slopesPC = np.moveaxis(slopesPC, 0, 3)
slopesPC = np.moveaxis(slopesPC, 0, 2)
if when["BadPixelsPC"]:
bppc = np.moveaxis(cons_data["BadPixelsPC"].astype(np.uint32),
0, 2)
bpixels |= bppc[..., :bpixels.shape[2], None]
# calculate relative gain from the constants
rel_gain = np.ones((128, 512, self.max_cells, 3), np.float32)
# high gain slope from pulse capacitor data
pc_high_m = slopesPC[..., :self.max_cells, 0]
pc_high_l = slopesPC[..., :self.max_cells, 1]
# medium gain slope from pulse capacitor data
pc_med_m = slopesPC[..., :self.max_cells, 3]
pc_med_l = slopesPC[..., :self.max_cells, 4]
# calculate median for slopes
pc_high_med = np.nanmedian(pc_high_m, axis=(0,1))
pc_med_med = np.nanmedian(pc_med_m, axis=(0,1))
# calculate median for intercepts:
pc_high_l_med = np.nanmedian(pc_high_l, axis=(0,1))
pc_med_l_med = np.nanmedian(pc_med_l, axis=(0,1))
# sanitize PC data
# (it should be done already on the level of constants)
# In the following loop,
# replace `nan`s across memory cells with
# the median value calculated previously.
# Then, values outside of the valid range (0.8 and 1.2)
# are fixed to the median value.
# This is applied for high and medium gain stages
for i in range(self.max_cells):
pc_high_m[np.isnan(pc_high_m[..., i])] = pc_high_med[i]
pc_med_m[np.isnan(pc_med_m[..., i])] = pc_med_med[i]
pc_high_l[np.isnan(pc_high_l[..., i])] = pc_high_l_med[i]
pc_med_l[np.isnan(pc_med_l[..., i])] = pc_med_l_med[i]
pc_high_m[(pc_high_m[..., i] < 0.8 * pc_high_med[i]) |
(pc_high_m[..., i] > 1.2 * pc_high_med[i])] = pc_high_med[i] # noqa
pc_med_m[(pc_med_m[..., i] < 0.8 * pc_med_med[i]) |
(pc_med_m[..., i] > 1.2 * pc_med_med[i])] = pc_med_med[i] # noqa
pc_high_l[(pc_high_l[..., i] < 0.8 * pc_high_l_med[i]) |
(pc_high_l[..., i] > 1.2 * pc_high_l_med[i])] = pc_high_l_med[i] # noqa
pc_med_l[(pc_med_l[..., i] < 0.8 * pc_med_l_med[i]) |
(pc_med_l[..., i] > 1.2 * pc_med_l_med[i])] = pc_med_l_med[i] # noqa
# ration between HG and MG per pixel per mem cell used
# for rel gain calculation
frac_high_med_pix = pc_high_m / pc_med_m
# avarage ration between HG and MG as a function of
# mem cell (needed for bls_stripes)
# TODO: Per pixel would be more optimal correction
frac_high_med = pc_high_med / pc_med_med
# calculate additional medium-gain offset
md_additional_offset = pc_high_l - pc_med_l * pc_high_m / pc_med_m
# Calculate relative gain. If FF constants are available,
# use them for high gain
# if not rel_gain is calculated using PC data only
# if self.corr_bools.get("xray_corr"):
# rel_gain[..., :self.max_cells, 0] /= xray_corr
# PC data should be 'calibrated with X-ray data,
# if it is not done, it is better to use 1 instead of bias
# the results with PC arteffacts.
# rel_gain[..., 0] = 1./(pc_high_m / pc_high_ave)
rel_gain[..., 1] = rel_gain[..., 0] * frac_high_med_pix
rel_gain[..., 2] = rel_gain[..., 1] * 4.48
if when["SlopesPC"]:
slopesPC = cons_data["SlopesPC"].astype(np.float32)
# This will handle some historical data in a different format
# constant dimension injected first
if slopesPC.shape[0] == 10 or slopesPC.shape[0] == 11:
slopesPC = np.moveaxis(slopesPC, 0, 3)
slopesPC = np.moveaxis(slopesPC, 0, 2)
# high gain slope from pulse capacitor data
pc_high_m = slopesPC[..., :self.max_cells, 0]
pc_high_l = slopesPC[..., :self.max_cells, 1]
# medium gain slope from pulse capacitor data
pc_med_m = slopesPC[..., :self.max_cells, 3]
pc_med_l = slopesPC[..., :self.max_cells, 4]
# calculate median for slopes
pc_high_med = np.nanmedian(pc_high_m, axis=(0,1))
pc_med_med = np.nanmedian(pc_med_m, axis=(0,1))
# calculate median for intercepts:
pc_high_l_med = np.nanmedian(pc_high_l, axis=(0,1))
pc_med_l_med = np.nanmedian(pc_med_l, axis=(0,1))
# sanitize PC data
# (it should be done already on the level of constants)
# In the following loop,
# replace `nan`s across memory cells with
# the median value calculated previously.
# Then, values outside of the valid range (0.8 and 1.2)
# are fixed to the median value.
# This is applied for high and medium gain stages
for i in range(self.max_cells):
pc_high_m[np.isnan(pc_high_m[..., i])] = pc_high_med[i]
pc_med_m[np.isnan(pc_med_m[..., i])] = pc_med_med[i]
pc_high_l[np.isnan(pc_high_l[..., i])] = pc_high_l_med[i]
pc_med_l[np.isnan(pc_med_l[..., i])] = pc_med_l_med[i]
pc_high_m[(pc_high_m[..., i] < 0.8 * pc_high_med[i]) |
(pc_high_m[..., i] > 1.2 * pc_high_med[i])] = pc_high_med[i] # noqa
pc_med_m[(pc_med_m[..., i] < 0.8 * pc_med_med[i]) |
(pc_med_m[..., i] > 1.2 * pc_med_med[i])] = pc_med_med[i] # noqa
pc_high_l[(pc_high_l[..., i] < 0.8 * pc_high_l_med[i]) |
(pc_high_l[..., i] > 1.2 * pc_high_l_med[i])] = pc_high_l_med[i] # noqa
pc_med_l[(pc_med_l[..., i] < 0.8 * pc_med_l_med[i]) |
(pc_med_l[..., i] > 1.2 * pc_med_l_med[i])] = pc_med_l_med[i] # noqa
# ration between HG and MG per pixel per mem cell used
# for rel gain calculation
frac_high_med_pix = pc_high_m / pc_med_m
# average ratio between HG and MG as a function of
# mem cell (needed for bls_stripes)
# TODO: Per pixel would be more optimal correction
frac_high_med = pc_high_med / pc_med_med
# calculate additional medium-gain offset
md_additional_offset = pc_high_l - pc_med_l * pc_high_m / pc_med_m # noqa
# Calculate relative gain. If FF constants are available,
# use them for high gain
# if not rel_gain is calculated using PC data only
# if self.corr_bools.get("xray_corr"):
# rel_gain[..., :self.max_cells, 0] /= xray_corr
# PC data should be 'calibrated with X-ray data,
# if it is not done, it is better to use 1 instead of bias
# the results with PC arteffacts.
# rel_gain[..., 0] = 1./(pc_high_m / pc_high_ave)
rel_gain[..., 1] = rel_gain[..., 0] * frac_high_med_pix
rel_gain[..., 2] = rel_gain[..., 1] * 4.48
else:
# Intialize with fake calculated parameters of Ones
md_additional_offset = rel_gain
frac_high_med = np.ones((self.max_cells,), np.float32)
self.md_additional_offset[module_idx][...] = md_additional_offset.transpose()[...] # noqa
self.rel_gain[module_idx][...] = rel_gain[...].transpose()
......@@ -1099,7 +1117,8 @@ class AgipdCorrections:
else:
# Create empty constant using the list elements
cons_data[cname] = getattr(np, mdata["file-path"][0])(mdata["file-path"][1]) # noqa
self.init_constants(cons_data, module_idx)
self.init_constants(cons_data, when, module_idx)
return when
......@@ -1174,7 +1193,7 @@ class AgipdCorrections:
cal_db_interface,
creation_time)
self.init_constants(cons_data, module_idx)
self.init_constants(cons_data, when, module_idx)
return when
......@@ -1182,7 +1201,7 @@ class AgipdCorrections:
"""
Allocate memory for correction constants
:param modules: Module indises
:param modules: Module indices
:param constant_shape: Shape of expected constants (gain, cells, x, y)
"""
for module_idx in modules:
......
import copy
import numpy as np
from scipy.signal import cwt, ricker, find_peaks_cwt
from cal_tools.enums import BadPixels, SnowResolution
from scipy.signal import cwt, find_peaks_cwt, ricker
from sklearn.mixture import GaussianMixture
from sklearn.preprocessing import StandardScaler
from cal_tools.enums import BadPixels, SnowResolution
def assemble_constant_dict(corr_bools, pc_bools, memory_cells, bias_voltage,
gain_setting, acquisition_rate,
......
from typing import Any, Dict, List, Optional, Tuple
from iminuit import Minuit
import numpy as np
from cal_tools.enums import BadPixelsFF
from iminuit import Minuit
def any_in(mask: np.ndarray, bits: int) -> bool:
......
from enum import Enum
import datetime
import glob
from enum import Enum
import dateutil.parser
import h5py
......
......@@ -4,7 +4,7 @@ import h5py
import numpy as np
from cal_tools.enums import BadPixels
from cal_tools.tools import get_constant_from_db, get_constant_from_db_and_time
from iCalibrationDB import Constants, Conditions, Detectors
from iCalibrationDB import Conditions, Constants, Detectors
class LpdCorrections:
......
import numpy as np
import h5py, os, re
import os
import re
import h5py
import numpy as np
from matplotlib import pylab as plt
......
......@@ -3,13 +3,12 @@ from typing import Any, Dict, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import colors
from matplotlib.patches import Patch
from mpl_toolkits.axes_grid1 import AxesGrid
from extra_geom import (AGIPD_1MGeometry, AGIPD_500K2GGeometry,
DSSC_1MGeometry, LPD_1MGeometry)
from extra_geom import tests as eg_tests
from matplotlib import colors
from matplotlib.patches import Patch
from mpl_toolkits.axes_grid1 import AxesGrid
def show_overview(d, cell_to_preview, gain_to_preview, out_folder=None, infix=None):
......
......@@ -3,6 +3,7 @@ from time import perf_counter
import numpy as np
class StepTimer:
def __init__(self):
self.steps = defaultdict(list)
......
from collections import OrderedDict
import datetime
from glob import glob
import json
import re
from collections import OrderedDict
from glob import glob
from os import environ, listdir, path
from os.path import isfile
from pathlib import Path
from queue import Queue
import re
from time import sleep
from typing import Optional
from urllib.parse import urljoin
......@@ -14,15 +14,16 @@ from urllib.parse import urljoin
import dateutil.parser
import h5py
import ipykernel
from metadata_client.metadata_client import MetadataClient
from notebook.notebookapp import list_running_servers
import numpy as np
import requests
import zmq
from iCalibrationDB import ConstantMetaData, Versions
from metadata_client.metadata_client import MetadataClient
from notebook.notebookapp import list_running_servers
from .ana_tools import save_dict_to_hdf5
from iCalibrationDB import ConstantMetaData, Versions
from .mdc_config import MDC_config
import zmq
def parse_runs(runs, return_type=str):
pruns = []
......
import numpy as np
cimport numpy as cnp
cimport cython
cimport numpy as cnp
@cython.boundscheck(False)
@cython.wraparound(False)
......
......@@ -40,11 +40,10 @@ extensions = [
]
import glob
import sys
import os
import sys
from subprocess import Popen
sys.path.append(os.path.abspath("../pycalibration/"))
p = Popen(["./makeAllDocs.sh"])
p.communicate()
......@@ -375,13 +374,16 @@ except:
check_call(["wget", pandoc_url])
check_call(["dpkg", "-i", pandoc_pack])
# generate the list of available notebooks
from xfel_calibrate import notebooks
import os
from subprocess import check_output
from textwrap import dedent, indent
from nbconvert import RSTExporter
import os
import nbformat
from nbconvert import RSTExporter
# generate the list of available notebooks
from xfel_calibrate import notebooks
rst_exporter = RSTExporter()
with open("available_notebooks.rst", "w") as f:
......@@ -440,14 +442,15 @@ with open("available_notebooks.rst", "w") as f:
# add test results
test_artefact_dir = os.path.realpath("../../tests/artefacts")
from datetime import datetime
from dateutil.parser import parse
from lxml import etree
import shutil
import tabulate
import textwrap
from datetime import datetime
from uuid import uuid4
import tabulate
from dateutil.parser import parse
from lxml import etree
def xml_to_rst_report(xml, git_tag, reports=[]):
e = etree.fromstring(xml.encode())
......
%% Cell type:markdown id: tags:
# AGIPD Offline Correction #
Author: European XFEL Detector Group, Version: 2.0
Offline Calibration for the AGIPD Detector
%% Cell type:code id: tags:
``` python
in_folder = "/gpfs/exfel/exp/HED/202031/p900174/raw" # the folder to read data from, required
out_folder = "/gpfs/exfel/data/scratch/ahmedk/test/hibef_agipd2" # the folder to output to, required
sequences = [-1] # sequences to correct, set to -1 for all, range allowed
modules = [-1] # modules to correct, set to -1 for all, range allowed
run = 155 # runs to process, required
karabo_id = "HED_DET_AGIPD500K2G" # karabo karabo_id
karabo_da = ['-1'] # a list of data aggregators names, Default [-1] for selecting all data aggregators
receiver_id = "{}CH0" # inset for receiver devices
path_template = 'RAW-R{:04d}-{}-S{:05d}.h5' # the template to use to access data
h5path = 'INSTRUMENT/{}/DET/{}:xtdf/' # path in the HDF5 file to images
h5path_idx = 'INDEX/{}/DET/{}:xtdf/' # path in the HDF5 file to images
h5path_ctrl = '/CONTROL/{}/MDL/FPGA_COMP' # path to control information
karabo_id_control = "HED_EXP_AGIPD500K2G" # karabo-id for control device
karabo_da_control = 'AGIPD500K2G00' # karabo DA for control infromation
slopes_ff_from_files = "" # Path to locally stored SlopesFF and BadPixelsFF constants
use_dir_creation_date = True # use the creation data of the input dir for database queries
cal_db_interface = "tcp://max-exfl016:8015#8045" # the database interface to use
cal_db_timeout = 30000 # in milli seconds
creation_date_offset = "00:00:00" # add an offset to creation date, e.g. to get different constants
max_cells = 0 # number of memory cells used, set to 0 to automatically infer
bias_voltage = 300 # Bias voltage
acq_rate = 0. # the detector acquisition rate, use 0 to try to auto-determine
gain_setting = 0.1 # the gain setting, use 0.1 to try to auto-determine
photon_energy = 9.2 # photon energy in keV
overwrite = True # set to True if existing data should be overwritten
max_pulses = [0, 500, 1] # range list [st, end, step] of maximum pulse indices within a train. 3 allowed maximum list input elements.
mem_cells_db = 0 # set to a value different than 0 to use this value for DB queries
cell_id_preview = 1 # cell Id used for preview in single-shot plots
# Correction parameters
blc_noise_threshold = 5000 # above this mean signal intensity now baseline correction via noise is attempted
cm_dark_fraction = 0.66 # threshold for fraction of empty pixels to consider module enough dark to perform CM correction
cm_dark_range = [-50.,30] # range for signal value ADU for pixel to be consider as a dark pixel
cm_n_itr = 4 # number of iterations for common mode correction
hg_hard_threshold = 1000 # threshold to force medium gain offset subtracted pixel to high gain
mg_hard_threshold = 1000 # threshold to force medium gain offset subtracted pixel from low to medium gain
noisy_adc_threshold = 0.25 # threshold to mask complete adc
ff_gain = 7.2 # conversion gain for absolute FlatField constants, while applying xray_gain
# Correction Booleans
only_offset = False # Apply only Offset correction. if False, Offset is applied by Default. if True, Offset is only applied.
rel_gain = False # do relative gain correction based on PC data
xray_gain = False # do relative gain correction based on xray data
blc_noise = False # if set, baseline correction via noise peak location is attempted
blc_stripes = False # if set, baseline corrected via stripes
blc_hmatch = False # if set, base line correction via histogram matching is attempted
match_asics = False # if set, inner ASIC borders are matched to the same signal level
adjust_mg_baseline = False # adjust medium gain baseline to match highest high gain value
zero_nans = False # set NaN values in corrected data to 0
zero_orange = False # set to 0 very negative and very large values in corrected data
blc_set_min = False # Shift to 0 negative medium gain pixels after offset corr
corr_asic_diag = False # if set, diagonal drop offs on ASICs are correted
force_hg_if_below = False # set high gain if mg offset subtracted value is below hg_hard_threshold
force_mg_if_below = False # set medium gain if mg offset subtracted value is below mg_hard_threshold
mask_noisy_adc = False # Mask entire ADC if they are noise above a relative threshold
common_mode = False # Common mode correction
melt_snow = False # Identify (and optionally interpolate) 'snowy' pixels
mask_zero_std = False # Mask pixels with zero standard deviation across train
low_medium_gap = False # 5 sigma separation in thresholding between low and medium gain
# Paralellization parameters
chunk_size = 1000 # Size of chunk for image-weise correction
chunk_size_idim = 1 # chunking size of imaging dimension, adjust if user software is sensitive to this.
n_cores_correct = 16 # Number of chunks to be processed in parallel
n_cores_files = 4 # Number of files to be processed in parallel
sequences_per_node = 2 # number of sequence files per cluster node if run as slurm job, set to 0 to not run SLURM parallel
def balance_sequences(in_folder, run, sequences, sequences_per_node, karabo_da):
from xfel_calibrate.calibrate import balance_sequences as bs
return bs(in_folder, run, sequences, sequences_per_node, karabo_da)
```
%% Cell type:code id: tags:
``` python
import copy
from datetime import timedelta
from dateutil import parser
import gc
import glob
import itertools
from IPython.display import HTML, display, Markdown, Latex
import math
from multiprocessing import Pool
import os
import re
import sys
import traceback
from time import time, sleep, perf_counter
import tabulate
import warnings
warnings.filterwarnings('ignore')
import yaml
from extra_geom import AGIPD_1MGeometry, AGIPD_500K2GGeometry
from extra_data import RunDirectory, stack_detector_data
from iCalibrationDB import Detectors
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from matplotlib.colors import LogNorm
from matplotlib import cm as colormap
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use("agg")
%matplotlib inline
import numpy as np
import seaborn as sns
sns.set()
sns.set_context("paper", font_scale=1.4)
sns.set_style("ticks")
from cal_tools.agipdlib import (AgipdCorrections, get_acq_rate,
get_gain_setting, get_num_cells)
from cal_tools.cython import agipdalgs as calgs
from cal_tools.ana_tools import get_range
from cal_tools.enums import BadPixels
from cal_tools.tools import get_dir_creation_date, map_modules_from_folder
from cal_tools.step_timing import StepTimer
import seaborn as sns
sns.set()
sns.set_context("paper", font_scale=1.4)
sns.set_style("ticks")
```
%% Cell type:markdown id: tags:
## Evaluated parameters ##
%% Cell type:code id: tags:
``` python
# Fill dictionaries comprising bools and arguments for correction and data analysis
# Here the herarichy and dependability for correction booleans are defined
corr_bools = {}
# offset is at the bottom of AGIPD correction pyramid.
corr_bools["only_offset"] = only_offset
# Dont apply any corrections if only_offset is requested
if not only_offset:
corr_bools["adjust_mg_baseline"] = adjust_mg_baseline
corr_bools["rel_gain"] = rel_gain
corr_bools["xray_corr"] = xray_gain
corr_bools["blc_noise"] = blc_noise
corr_bools["blc_stripes"] = blc_stripes
corr_bools["blc_hmatch"] = blc_hmatch
corr_bools["blc_set_min"] = blc_set_min
corr_bools["match_asics"] = match_asics
corr_bools["corr_asic_diag"] = corr_asic_diag
corr_bools["zero_nans"] = zero_nans
corr_bools["zero_orange"] = zero_orange
corr_bools["mask_noisy_adc"] = mask_noisy_adc
corr_bools["force_hg_if_below"] = force_hg_if_below
corr_bools["force_mg_if_below"] = force_mg_if_below
corr_bools["common_mode"] = common_mode
corr_bools["melt_snow"] = melt_snow
corr_bools["mask_zero_std"] = mask_zero_std
corr_bools["low_medium_gap"] = low_medium_gap
```
%% Cell type:code id: tags:
``` python
if in_folder[-1] == "/":
in_folder = in_folder[:-1]
if sequences[0] == -1:
sequences = None
control_fname = f'{in_folder}/r{run:04d}/RAW-R{run:04d}-{karabo_da_control}-S00000.h5'
h5path_ctrl = h5path_ctrl.format(karabo_id_control)
h5path = h5path.format(karabo_id, receiver_id)
h5path_idx = h5path_idx.format(karabo_id, receiver_id)
print(f'Path to control file {control_fname}')
```
%% Cell type:code id: tags:
``` python
# Create output folder
os.makedirs(out_folder, exist_ok=overwrite)
# Evaluate detector instance for mapping
instrument = karabo_id.split("_")[0]
if instrument == "SPB":
dinstance = "AGIPD1M1"
nmods = 16
elif instrument == "MID":
dinstance = "AGIPD1M2"
nmods = 16
elif instrument == "HED":
dinstance = "AGIPD500K"
nmods = 8
# Evaluate requested modules
if karabo_da[0] == '-1':
if modules[0] == -1:
modules = list(range(nmods))
karabo_da = ["AGIPD{:02d}".format(i) for i in modules]
else:
modules = [int(x[-2:]) for x in karabo_da]
def mod_name(modno):
return f"Q{modno // 4 + 1}M{modno % 4 + 1}"
print("Process modules: ", ', '.join(
[mod_name(x) for x in modules]))
print(f"Detector in use is {karabo_id}")
print(f"Instrument {instrument}")
print(f"Detector instance {dinstance}")
```
%% Cell type:code id: tags:
``` python
# Display Information about the selected pulses indices for correction.
pulses_lst = list(range(*max_pulses)) if not (len(max_pulses)==1 and max_pulses[0]==0) else max_pulses
try:
if len(pulses_lst) > 1:
print("A range of {} pulse indices is selected: from {} to {} with a step of {}"
.format(len(pulses_lst), pulses_lst[0] , pulses_lst[-1] + (pulses_lst[1] - pulses_lst[0]),
pulses_lst[1] - pulses_lst[0]))
else:
print("one pulse is selected: a pulse of idx {}".format(pulses_lst[0]))
except Exception as e:
raise ValueError('max_pulses input Error: {}'.format(e))
```
%% Cell type:code id: tags:
``` python
# set everything up filewise
mmf = map_modules_from_folder(in_folder, run, path_template,
karabo_da, sequences)
mapped_files, mod_ids, total_sequences, sequences_qm, _ = mmf
file_list = []
# ToDo: Split table over pages
print(f"Processing a total of {total_sequences} sequence files in chunks of {n_cores_files}")
table = []
ti = 0
for k, files in mapped_files.items():
i = 0
for f in list(files.queue):
file_list.append(f)
if i == 0:
table.append((ti, k, i, f))
else:
table.append((ti, "", i, f))
i += 1
ti += 1
md = display(Latex(tabulate.tabulate(table, tablefmt='latex',
headers=["#", "module", "# module", "file"])))
file_list = sorted(file_list, key=lambda name: name[-10:])
```
%% Cell type:code id: tags:
``` python
filename = file_list[0]
channel = int(re.findall(r".*-AGIPD([0-9]+)-.*", filename)[0])
# Evaluate number of memory cells
mem_cells = get_num_cells(filename, karabo_id, channel)
if mem_cells is None:
raise ValueError(f"No raw images found in {filename}")
mem_cells_db = mem_cells if mem_cells_db == 0 else mem_cells_db
max_cells = mem_cells if max_cells == 0 else max_cells
# Evaluate aquisition rate
if acq_rate == 0:
acq_rate = get_acq_rate((filename, karabo_id, channel))
print(f"Maximum memory cells to calibrate: {max_cells}")
```
%% Cell type:code id: tags:
``` python
# Evaluate creation time
creation_time = None
if use_dir_creation_date:
creation_time = get_dir_creation_date(in_folder, run)
offset = parser.parse(creation_date_offset)
delta = timedelta(hours=offset.hour,
minutes=offset.minute, seconds=offset.second)
creation_time += delta
# Evaluate gain setting
if gain_setting == 0.1:
if creation_time.replace(tzinfo=None) < parser.parse('2020-01-31'):
print("Set gain-setting to None for runs taken before 2020-01-31")
gain_setting = None
else:
try:
gain_setting = get_gain_setting(control_fname, h5path_ctrl)
except Exception as e:
print(f'ERROR: while reading gain setting from: \n{control_fname}')
print(e)
print("Set gain setting to 0")
gain_setting = 0
```
%% Cell type:code id: tags:
``` python
print(f"Using {creation_time} as creation time")
print(f"Operating conditions are:\n• Bias voltage: {bias_voltage}\n• Memory cells: {mem_cells_db}\n"
f"• Acquisition rate: {acq_rate}\n• Gain setting: {gain_setting}\n• Photon Energy: {photon_energy}\n")
```
%% Cell type:markdown id: tags:
## Data processing ##
%% Cell type:code id: tags:
``` python
agipd_corr = AgipdCorrections(max_cells, max_pulses,
h5_data_path=h5path,
h5_index_path=h5path_idx,
corr_bools=corr_bools)
agipd_corr.baseline_corr_noise_threshold = -blc_noise_threshold
agipd_corr.hg_hard_threshold = hg_hard_threshold
agipd_corr.mg_hard_threshold = mg_hard_threshold
agipd_corr.cm_dark_min = cm_dark_range[0]
agipd_corr.cm_dark_max = cm_dark_range[1]
agipd_corr.cm_dark_fraction = cm_dark_fraction
agipd_corr.cm_n_itr = cm_n_itr
agipd_corr.noisy_adc_threshold = noisy_adc_threshold
agipd_corr.ff_gain = ff_gain
```
%% Cell type:code id: tags:
``` python
# Retrieve calibration constants to RAM
agipd_corr.allocate_constants(modules, (3, mem_cells_db, 512, 128))
const_yaml = None
if os.path.isfile(f'{out_folder}/retrieved_constants.yml'):
with open(f'{out_folder}/retrieved_constants.yml', "r") as f:
const_yaml = yaml.safe_load(f.read())
# retrive constants
def retrieve_constants(mod):
"""
Retrieve calibration constants and load them to shared memory
Metadata for constants is taken from yml file or retrieved from the DB
"""
device = getattr(getattr(Detectors, dinstance), mod_name(mod))
err = ''
try:
# check if there is a yaml file in out_folder that has the device constants.
if const_yaml and device.device_name in const_yaml:
when = agipd_corr.initialize_from_yaml(const_yaml, mod, device)
else:
when = agipd_corr.initialize_from_db(cal_db_interface, creation_time, mem_cells_db, bias_voltage,
photon_energy, gain_setting, acq_rate, mod, device, False)
except Exception as e:
err = f"Error: {e}\nError traceback: {traceback.format_exc()}"
when = None
return err, mod, when, device.device_name
ts = perf_counter()
with Pool(processes=len(modules)) as pool:
const_out = pool.map(retrieve_constants, modules)
print(f"Constants were loaded in {perf_counter()-ts:.01f}s")
```
%% Cell type:code id: tags:
``` python
# allocate memory for images and hists
n_images_max = max_cells*256
data_shape = (n_images_max, 512, 128)
agipd_corr.allocate_images(data_shape, n_cores_files)
```
%% Cell type:code id: tags:
``` python
def batches(l, batch_size):
"""Group a list into batches of (up to) batch_size elements"""
start = 0
while start < len(l):
yield l[start:start + batch_size]
start += batch_size
```
%% Cell type:code id: tags:
``` python
def imagewise_chunks(img_counts):
"""Break up the loaded data into chunks of up to chunk_size
Yields (file data slot, start index, stop index)
"""
for i_proc, n_img in enumerate(img_counts):
n_chunks = math.ceil(n_img / chunk_size)
for i in range(n_chunks):
yield i_proc, i * n_img // n_chunks, (i+1) * n_img // n_chunks
```
%% Cell type:code id: tags:
``` python
step_timer = StepTimer()
```
%% Cell type:code id: tags:
``` python
with Pool() as pool:
for file_batch in batches(file_list, n_cores_files):
# TODO: Move some printed output to logging or similar
print(f"Processing next {len(file_batch)} files:")
for file_name in file_batch:
print(" ", file_name)
step_timer.start()
img_counts = pool.starmap(agipd_corr.read_file, enumerate(file_batch))
step_timer.done_step('Loading data from files')
# Evaluate zero-data-std mask
pool.starmap(agipd_corr.mask_zero_std, itertools.product(
range(len(file_batch)), np.array_split(np.arange(agipd_corr.max_cells), n_cores_correct)
))
step_timer.done_step('Mask 0 std')
# Perform offset image-wise correction
pool.starmap(agipd_corr.offset_correction, imagewise_chunks(img_counts))
step_timer.done_step("Offset correction")
if blc_noise or blc_stripes or blc_hmatch:
# Perform image-wise correction
pool.starmap(agipd_corr.baseline_correction, imagewise_chunks(img_counts))
step_timer.done_step("Base-line shift correction")
if common_mode:
# Perform cross-file correction parallel over asics
pool.starmap(agipd_corr.cm_correction, itertools.product(
range(len(file_batch)), range(16) # 16 ASICs per module
))
step_timer.done_step("Common-mode correction")
# Perform image-wise correction
pool.starmap(agipd_corr.gain_correction, imagewise_chunks(img_counts))
step_timer.done_step("Image-wise correction")
# Save corrected data
pool.starmap(agipd_corr.write_file, [
(i_proc, file_name, os.path.join(out_folder, os.path.basename(file_name).replace("RAW", "CORR")))
for i_proc, file_name in enumerate(file_batch)
])
step_timer.done_step("Save")
```
%% Cell type:code id: tags:
``` python
print(f"Correction of {len(file_list)} files is finished")
print(f"Total processing time {step_timer.timespan():.01f} s")
print(f"Timing summary per batch of {n_cores_files} files:")
step_timer.print_summary()
```
%% Cell type:code id: tags:
``` python
# if there is a yml file that means a leading notebook got processed
# and the reporting would be generated from it.
fst_print = True
to_store = []
line = []
for i, (error, modno, when, mod_dev) in enumerate(const_out):
qm = mod_name(modno)
# expose errors while applying correction
if error:
print("Error: {}".format(error) )
if not const_yaml or mod_dev not in const_yaml:
if fst_print:
print("Constants are retrieved with creation time: ")
fst_print = False
line = [qm]
# If correction is crashed
if not error:
print(f"{qm}:")
for key, item in when.items():
if hasattr(item, 'strftime'):
item = item.strftime('%y-%m-%d %H:%M')
when[key] = item
print('{:.<12s}'.format(key), item)
# Store few time stamps if exists
# Add NA to keep array structure
for key in ['Offset', 'SlopesPC', 'SlopesFF']:
if when and key in when and when[key]:
line.append(when[key])
else:
if error is not None:
line.append('Err')
else:
line.append('NA')
if len(line) > 0:
to_store.append(line)
seq = sequences[0] if sequences else 0
if len(to_store) > 0:
with open(f"{out_folder}/retrieved_constants_s{seq}.yml","w") as fyml:
yaml.safe_dump({"time-summary": {f"S{seq}":to_store}}, fyml)
```
%% Cell type:code id: tags:
``` python
def do_3d_plot(data, edges, x_axis, y_axis):
fig = plt.figure(figsize=(10, 10))
ax = fig.gca(projection='3d')
# Make data.
X = edges[0][:-1]
Y = edges[1][:-1]
X, Y = np.meshgrid(X, Y)
Z = data.T
# Plot the surface.
surf = ax.plot_surface(X, Y, Z, cmap=colormap.coolwarm,
linewidth=0, antialiased=False)
ax.set_xlabel(x_axis)
ax.set_ylabel(y_axis)
ax.set_zlabel("Counts")
def do_2d_plot(data, edges, y_axis, x_axis):
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111)
extent = [np.min(edges[1]), np.max(edges[1]),
np.min(edges[0]), np.max(edges[0])]
im = ax.imshow(data[::-1, :], extent=extent, aspect="auto",
norm=LogNorm(vmin=1, vmax=max(10, np.max(data))))
ax.set_xlabel(x_axis)
ax.set_ylabel(y_axis)
cb = fig.colorbar(im)
cb.set_label("Counts")
```
%% Cell type:code id: tags:
``` python
def get_trains_data(run_folder, source, include, tid=None, path='*/DET/*', modules=16, fillvalue=np.nan):
"""
Load single train for all module
def get_trains_data(run_folder, source, include, detector_id, tid=None, modules=16, fillvalue=np.nan):
"""Load single train for all module
:param run_folder: Path to folder with data
:param source: Data source to be loaded
:param include: Inset of file name to be considered
:param detector_id: The karabo id of the detector to get data for
:param tid: Train Id to be loaded. First train is considered if None is given
:param path: Path to find image data inside h5 file
"""
run_data = RunDirectory(run_folder, include)
if tid:
tid, data = run_data.select('*/DET/*', source).train_from_id(tid)
return tid, stack_detector_data(train=data, data=source, fillvalue=fillvalue, modules=modules)
if tid is not None:
tid, data = run_data.select(f'{detector_id}/DET/*', source).train_from_id(tid)
else:
for tid, data in run_data.select('*/DET/*', source).trains(require_all=True):
return tid, stack_detector_data(train=data, data=source, fillvalue=fillvalue, modules=modules)
return None, None
tid, data = next(iter(run_data.select(f'{detector_id}/DET/*', source).trains(require_all=True)))
return tid, stack_detector_data(train=data, data=source, fillvalue=fillvalue, modules=modules)
```
%% Cell type:code id: tags:
``` python
if dinstance == "AGIPD500K":
geom = AGIPD_500K2GGeometry.from_origin()
else:
geom = AGIPD_1MGeometry.from_quad_positions(quad_pos=[
(-525, 625),
(-550, -10),
(520, -160),
(542.5, 475),
])
```
%% Cell type:code id: tags:
``` python
include = '*S00000*' if sequences is None else f'*S{sequences[0]:05d}*'
tid, corrected = get_trains_data(f'{out_folder}/', 'image.data', include, modules=nmods)
_, gains = get_trains_data(f'{out_folder}/', 'image.gain', include, tid, modules=nmods)
_, mask = get_trains_data(f'{out_folder}/', 'image.mask', include, tid, modules=nmods)
_, blshift = get_trains_data(f'{out_folder}/', 'image.blShift', include, tid, modules=nmods)
_, cellId = get_trains_data(f'{out_folder}/', 'image.cellId', include, tid, modules=nmods)
_, pulseId = get_trains_data(f'{out_folder}/', 'image.pulseId', include, tid,
modules=nmods, fillvalue=0)
_, raw = get_trains_data(f'{in_folder}/r{run:04d}/', 'image.data', include, tid, modules=nmods)
tid, corrected = get_trains_data(out_folder, 'image.data', include, karabo_id, modules=nmods)
_, gains = get_trains_data(out_folder, 'image.gain', include, tid, karabo_id, modules=nmods)
_, mask = get_trains_data(out_folder, 'image.mask', include, tid, karabo_id, modules=nmods)
_, blshift = get_trains_data(out_folder, 'image.blShift', include, tid, karabo_id, modules=nmods)
_, cellId = get_trains_data(out_folder, 'image.cellId', include, tid, karabo_id, modules=nmods)
_, pulseId = get_trains_data(out_folder, 'image.pulseId', include, tid, karabo_id, modules=nmods, fillvalue=0)
_, raw = get_trains_data(f'{in_folder}/r{run:04d}/', 'image.data', include, tid, karabo_id, modules=nmods)
```
%% Cell type:code id: tags:
``` python
display(Markdown(f'## Preview and statistics for {gains.shape[0]} images of the train {tid} ##\n'))
```
%% Cell type:markdown id: tags:
### Signal vs. Analogue Gain ###
%% Cell type:code id: tags:
``` python
hist, bins_x, bins_y = calgs.histogram2d(raw[:,0,...].flatten().astype(np.float32),
raw[:,1,...].flatten().astype(np.float32),
bins=(100, 100),
range=[[4000, 8192], [4000, 8192]])
do_2d_plot(hist, (bins_x, bins_y), "Signal (ADU)", "Analogue gain (ADU)")
do_3d_plot(hist, (bins_x, bins_y), "Signal (ADU)", "Analogue gain (ADU)")
```
%% Cell type:markdown id: tags:
### Signal vs. Digitized Gain ###
The following plot shows plots signal vs. digitized gain
%% Cell type:code id: tags:
``` python
hist, bins_x, bins_y = calgs.histogram2d(corrected.flatten().astype(np.float32),
gains.flatten().astype(np.float32), bins=(100, 3),
range=[[-50, 8192], [0, 3]])
do_2d_plot(hist, (bins_x, bins_y), "Signal (ADU)", "Gain bit value")
```
%% Cell type:code id: tags:
``` python
print(f"Gain statistics in %")
table = [[f'{gains[gains==0].size/gains.size*100:.02f}',
f'{gains[gains==1].size/gains.size*100:.03f}',
f'{gains[gains==2].size/gains.size*100:.03f}']]
md = display(Latex(tabulate.tabulate(table, tablefmt='latex',
headers=["High", "Medium", "Low"])))
```
%% Cell type:markdown id: tags:
### Intensity per Pulse ###
%% Cell type:code id: tags:
``` python
pulse_range = [np.min(pulseId[pulseId>=0]), np.max(pulseId[pulseId>=0])]
mean_data = np.nanmean(corrected, axis=(2, 3))
hist, bins_x, bins_y = calgs.histogram2d(mean_data.flatten().astype(np.float32),
pulseId.flatten().astype(np.float32),
bins=(100, int(pulse_range[1])),
range=[[-50, 1000], pulse_range])
do_2d_plot(hist, (bins_x, bins_y), "Signal (ADU)", "Pulse id")
do_3d_plot(hist, (bins_x, bins_y), "Signal (ADU)", "Pulse id")
hist, bins_x, bins_y = calgs.histogram2d(mean_data.flatten().astype(np.float32),
pulseId.flatten().astype(np.float32),
bins=(100, int(pulse_range[1])),
range=[[-50, 200000], pulse_range])
do_2d_plot(hist, (bins_x, bins_y), "Signal (ADU)", "Pulse id")
do_3d_plot(hist, (bins_x, bins_y), "Signal (ADU)", "Pulse id")
```
%% Cell type:markdown id: tags:
### Baseline shift ###
Estimated base-line shift with respect to the total ADU counts of corrected image.
%% Cell type:code id: tags:
``` python
fig = plt.figure(figsize=(20, 10))
ax = fig.add_subplot(111)
h = ax.hist(blshift.flatten(), bins=100, log=True)
_ = plt.xlabel('Baseline shift [ADU]')
_ = plt.ylabel('Counts')
_ = ax.grid()
```
%% Cell type:code id: tags:
``` python
fig = plt.figure(figsize=(10, 10))
corrected_ave = np.nansum(corrected, axis=(2, 3))
plt.scatter(corrected_ave.flatten()/10**6, blshift.flatten(), s=0.9)
plt.xlim(-1, 1000)
plt.grid()
plt.xlabel('Illuminated corrected [MADU] ')
_ = plt.ylabel('Estimated baseline shift [ADU]')
```
%% Cell type:code id: tags:
``` python
display(Markdown('### Raw preview ###\n'))
display(Markdown(f'Mean over images of the RAW data\n'))
```
%% Cell type:code id: tags:
``` python
fig = plt.figure(figsize=(20, 10))
ax = fig.add_subplot(111)
data = np.mean(raw[:, 0, ...], axis=0)
vmin, vmax = get_range(data, 5)
ax = geom.plot_data_fast(data, ax=ax, cmap="jet", vmin=vmin, vmax=vmax)
```
%% Cell type:code id: tags:
``` python
display(Markdown(f'Single shot of the RAW data from cell {np.max(cellId[cell_id_preview])} \n'))
fig = plt.figure(figsize=(20, 10))
ax = fig.add_subplot(111)
vmin, vmax = get_range(raw[cell_id_preview, 0, ...], 5)
ax = geom.plot_data_fast(raw[cell_id_preview, 0, ...], ax=ax, cmap="jet", vmin=vmin, vmax=vmax)
```
%% Cell type:code id: tags:
``` python
display(Markdown('### Corrected preview ###\n'))
display(Markdown(f'A single shot image from cell {np.max(cellId[cell_id_preview])} \n'))
```
%% Cell type:code id: tags:
``` python
fig = plt.figure(figsize=(20, 10))
ax = fig.add_subplot(111)
vmin, vmax = get_range(corrected[cell_id_preview], 7, -50)
vmin = - 50
ax = geom.plot_data_fast(corrected[cell_id_preview], ax=ax, cmap="jet", vmin=vmin, vmax=vmax)
```
%% Cell type:code id: tags:
``` python
fig = plt.figure(figsize=(20, 10))
ax = fig.add_subplot(111)
vmin, vmax = get_range(corrected[cell_id_preview], 5, -50)
nbins = np.int((vmax + 50) / 2)
h = ax.hist(corrected[cell_id_preview].flatten(),
bins=nbins, range=(-50, vmax),
histtype='stepfilled', log=True)
_ = plt.xlabel('[ADU]')
_ = plt.ylabel('Counts')
_ = ax.grid()
```
%% Cell type:code id: tags:
``` python
display(Markdown('### Mean CORRECTED Preview ###\n'))
display(Markdown(f'A mean across one train \n'))
```
%% Cell type:code id: tags:
``` python
fig = plt.figure(figsize=(20, 10))
ax = fig.add_subplot(111)
data = np.mean(corrected, axis=0)
vmin, vmax = get_range(data, 7)
ax = geom.plot_data_fast(data, ax=ax, cmap="jet", vmin=-50, vmax=vmax)
```
%% Cell type:code id: tags:
``` python
fig = plt.figure(figsize=(20, 10))
ax = fig.add_subplot(111)
vmin, vmax = get_range(corrected, 10, -100)
vmax = np.nanmax(corrected)
if vmax > 50000:
vmax=50000
nbins = np.int((vmax + 100) / 5)
h = ax.hist(corrected.flatten(), bins=nbins,
range=(-100, vmax), histtype='step', log=True, label = 'All')
_ = ax.hist(corrected[gains == 0].flatten(), bins=nbins, range=(-100, vmax),
alpha=0.5, log=True, label='High gain', color='green')
_ = ax.hist(corrected[gains == 1].flatten(), bins=nbins, range=(-100, vmax),
alpha=0.5, log=True, label='Medium gain', color='red')
_ = ax.hist(corrected[gains == 2].flatten(), bins=nbins,
range=(-100, vmax), alpha=0.5, log=True, label='Low gain', color='yellow')
_ = ax.legend()
_ = ax.grid()
_ = plt.xlabel('[ADU]')
_ = plt.ylabel('Counts')
```
%% Cell type:code id: tags:
``` python
display(Markdown('### Maximum GAIN Preview ###\n'))
display(Markdown(f'The per pixel maximum across one train for the digitized gain'))
```
%% Cell type:code id: tags:
``` python
fig = plt.figure(figsize=(20, 10))
ax = fig.add_subplot(111)
ax = geom.plot_data_fast(np.max(gains, axis=0), ax=ax,
cmap="jet", vmin=-1, vmax=3)
```
%% Cell type:markdown id: tags:
## Bad Pixels ##
The mask contains dedicated entries for all pixels and memory cells as well as all three gains stages. Each mask entry is encoded in 32 bits as:
%% Cell type:code id: tags:
``` python
table = []
for item in BadPixels:
table.append((item.name, "{:016b}".format(item.value)))
md = display(Latex(tabulate.tabulate(table, tablefmt='latex',
headers=["Bad pixel type", "Bit mask"])))
```
%% Cell type:code id: tags:
``` python
display(Markdown(f'### Single Shot Bad Pixels ### \n'))
display(Markdown(f'A single shot bad pixel map from cell {np.max(cellId[cell_id_preview])} \n'))
```
%% Cell type:code id: tags:
``` python
fig = plt.figure(figsize=(20, 10))
ax = fig.add_subplot(111)
ax = geom.plot_data_fast(np.log2(mask[cell_id_preview]), ax=ax, vmin=0, vmax=32, cmap="jet")
```
%% Cell type:markdown id: tags:
### Percentage of Bad Pixels across one train ###
%% Cell type:code id: tags:
``` python
fig = plt.figure(figsize=(20, 10))
ax = fig.add_subplot(111)
ax = geom.plot_data_fast(np.mean(mask>0, axis=0),
vmin=0, ax=ax, vmax=1, cmap="jet")
```
%% Cell type:markdown id: tags:
### Percentage of Bad Pixels across one train. Only Dark Related ###
%% Cell type:code id: tags:
``` python
fig = plt.figure(figsize=(20, 10))
ax = fig.add_subplot(111)
cm = np.copy(mask)
cm[cm > BadPixels.NO_DARK_DATA.value] = 0
ax = geom.plot_data_fast(np.mean(cm>0, axis=0),
vmin=0, ax=ax, vmax=1, cmap="jet")
```
......
import numpy as np
import h5py, os, re
import os
import re
import h5py
import numpy as np
from matplotlib import pylab as plt
......@@ -346,6 +348,8 @@ def returnPositioned2(geometry_file, modules, dquads):
return out
import re
def positionFileList(filelist, datapath, geometry_file, quad_pos, nImages='all'):
import glob
detector = "LPD" if "LPD" in datapath else "AGIPD"
......
import os, struct
import numpy as np
import os
import struct
from collections import OrderedDict
import numpy as np
class Frms6Reader(object):
""" This class allows to access frm6 files
......
import argparse
import asyncio
from datetime import datetime, timedelta
import logging
from datetime import datetime, timedelta
from dateutil import parser, tz
import yaml
import zmq
import zmq.asyncio
from dateutil import parser, tz
async def auto_run(cfg, timeout=3000):
......
import argparse
import asyncio
from asyncio.subprocess import PIPE
import copy
import glob
import logging
import os
import subprocess
from asyncio.subprocess import PIPE
from git import Repo, InvalidGitRepositoryError
import yaml
import zmq
import zmq.asyncio
from git import InvalidGitRepositoryError, Repo
from messages import Errors
loop = asyncio.get_event_loop()
......
from setuptools import setup
from setuptools.command.install import install
from subprocess import check_call, check_output
import sys
from distutils.command.build import build
from Cython.Distutils import build_ext
from distutils.extension import Extension
import sys
from subprocess import check_call, check_output
import numpy
from Cython.Distutils import build_ext
from setuptools import setup
from setuptools.command.install import install
extensions = [Extension("cal_tools.cython.agipdalgs",
['cal_tools/cython/agipdalgs.pyx'],
......@@ -48,6 +48,7 @@ class PreInstallCommand(build):
from xfel_calibrate.notebooks import notebooks
data_files = []
for ctypes in notebooks.values():
for nb in ctypes.values():
......
<?xml version="1.0" encoding="UTF-8"?>
<testsuite errors="1" failures="0" file="unittest/suite.py" name="unittest.suite._ErrorHolder-20210125191730" skipped="0" tests="1" time="0.000" timestamp="0001-01-01T00:00:00">
<testcase classname="setUpClass (__main__" name="TestAGIPDCorrection)" time="0.000" timestamp="0001-01-01T00:00:00">
<error message="Command '['xfel-calibrate', 'AGIPD', 'CORRECT', '--in-folder', '/gpfs/exfel/exp/XMPL/201750/p700001/raw/AGIPD', '--run', '412', '--out-folder', '/gpfs/exfel/exp/XMPL/201750/p700001/scratch/AGIPD', '--calfile', '/gpfs/exfel/exp/XMPL/201750/p700001/usr/agipd_store.h5', '--sequences', '0']' returned non-zero exit status 2." type="CalledProcessError">
<![CDATA[Traceback (most recent call last):
File "/home/danilevc/calibration/pycalibration/tests/correction_base.py", line 295, in setUpClass
out = sp.check_output(cmd)
File "/software/anaconda3/5.2/lib/python3.6/subprocess.py", line 336, in check_output
**kwargs).stdout
File "/software/anaconda3/5.2/lib/python3.6/subprocess.py", line 418, in run
output=stdout, stderr=stderr)
subprocess.CalledProcessError: Command '['xfel-calibrate', 'AGIPD', 'CORRECT', '--in-folder', '/gpfs/exfel/exp/XMPL/201750/p700001/raw/AGIPD', '--run', '412', '--out-folder', '/gpfs/exfel/exp/XMPL/201750/p700001/scratch/AGIPD', '--calfile', '/gpfs/exfel/exp/XMPL/201750/p700001/usr/agipd_store.h5', '--sequences', '0']' returned non-zero exit status 2.
]]> </error>
<system-out>
<![CDATA[Executing xfel-calibrate AGIPD CORRECT --in-folder /gpfs/exfel/exp/XMPL/201750/p700001/raw/AGIPD --run 412 --out-folder /gpfs/exfel/exp/XMPL/201750/p700001/scratch/AGIPD --calfile /gpfs/exfel/exp/XMPL/201750/p700001/usr/agipd_store.h5 --sequences 0
Creating data paths for artefacts
Last git commit is: 9088fa11013afe7bb5d4231fd4170c96e34db520
artefacts will be placed in: /home/danilevc/calibration/pycalibration/tests/artefacts/9088fa11013afe7bb5d4231fd4170c96e34db520/TestAGIPDCorrection_T
]]> </system-out>
<system-err>
<![CDATA[]]> </system-err>
</testcase>
</testsuite>