Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • calibration/pycalibration
1 result
Show changes
Commits on Source (20)
...@@ -5,10 +5,16 @@ from typing import Any, Dict, Optional, Tuple ...@@ -5,10 +5,16 @@ from typing import Any, Dict, Optional, Tuple
import h5py import h5py
import numpy as np import numpy as np
import sharedmem import sharedmem
from cal_tools.agipdutils import * from cal_tools.agipdutils import (assemble_constant_dict,
baseline_correct_via_noise,
baseline_correct_via_stripe,
correct_baseline_via_hist,
correct_baseline_via_hist_asic,
make_noisy_adc_mask, match_asic_borders,
melt_snowy_pixels)
from cal_tools.enums import BadPixels, SnowResolution from cal_tools.enums import BadPixels, SnowResolution
from cal_tools.tools import get_constant_from_db_and_time from cal_tools.tools import get_constant_from_db_and_time
from iCalibrationDB import Conditions, Constants, Detectors from iCalibrationDB import Conditions, Constants
from cal_tools.cython import agipdalgs as calgs from cal_tools.cython import agipdalgs as calgs
...@@ -80,7 +86,8 @@ def get_acq_rate(fast_paths: Tuple[str, str, int], ...@@ -80,7 +86,8 @@ def get_acq_rate(fast_paths: Tuple[str, str, int],
def get_gain_setting(fname: str, h5path_ctrl: str) -> int: def get_gain_setting(fname: str, h5path_ctrl: str) -> int:
""" """Retrieve Gain setting.
If the data is available from the middlelayer FPGA_COMP device, then it is If the data is available from the middlelayer FPGA_COMP device, then it is
retrieved from there. retrieved from there.
If not, the setting is calculated off `setupr` and `patternTypeIndex` If not, the setting is calculated off `setupr` and `patternTypeIndex`
...@@ -160,8 +167,8 @@ class AgipdCorrections: ...@@ -160,8 +167,8 @@ class AgipdCorrections:
image/data section image/data section
:param h5_index_path: path in HDF5 file which is prefixed to the :param h5_index_path: path in HDF5 file which is prefixed to the
index section index section
:param corr_bools: A dict with all of the correction booleans requested or :param corr_bools: A dict with all of the correction booleans requested
available or available
The following example shows a typical use case: The following example shows a typical use case:
.. code-block:: python .. code-block:: python
...@@ -203,7 +210,7 @@ class AgipdCorrections: ...@@ -203,7 +210,7 @@ class AgipdCorrections:
self.rng_pulses = max_pulses self.rng_pulses = max_pulses
# avoid list(range(*[0]])) # avoid list(range(*[0]]))
self.pulses_lst = list(range(*max_pulses)) \ self.pulses_lst = list(range(*max_pulses)) \
if not (len(max_pulses) == 1 and max_pulses[0] == 0) else max_pulses #noqa if not (len(max_pulses) == 1 and max_pulses[0] == 0) else max_pulses # noqa
self.max_cells = max_cells self.max_cells = max_cells
# Correction parameters # Correction parameters
...@@ -498,11 +505,11 @@ class AgipdCorrections: ...@@ -498,11 +505,11 @@ class AgipdCorrections:
# force into high or medium gain if requested # force into high or medium gain if requested
if self.corr_bools.get('force_mg_if_below'): if self.corr_bools.get('force_mg_if_below'):
gain[(gain == 2) & ( gain[(gain == 2) & (
(data - offsetb[1]) < self.mg_hard_threshold)] = 1 (data - offsetb[1]) < self.mg_hard_threshold)] = 1
if self.corr_bools.get('force_hg_if_below'): if self.corr_bools.get('force_hg_if_below'):
gain[(gain > 0) & ( gain[(gain > 0) & (
(data - offsetb[0]) < self.hg_hard_threshold)] = 0 (data - offsetb[0]) < self.hg_hard_threshold)] = 0
# choose constants according to gain setting # choose constants according to gain setting
off = calgs.gain_choose(gain, offsetb) off = calgs.gain_choose(gain, offsetb)
...@@ -512,7 +519,7 @@ class AgipdCorrections: ...@@ -512,7 +519,7 @@ class AgipdCorrections:
data -= off data -= off
del off del off
def baseline_correction(self, i_proc:int, first:int, last:int): def baseline_correction(self, i_proc: int, first: int, last: int):
""" """
Perform image-wise base-line shift correction for Perform image-wise base-line shift correction for
data in shared memory via histogram or stripe data in shared memory via histogram or stripe
...@@ -536,14 +543,12 @@ class AgipdCorrections: ...@@ -536,14 +543,12 @@ class AgipdCorrections:
# output is saved in sharedmem to pass for correct_agipd() # output is saved in sharedmem to pass for correct_agipd()
# as this function takes about 3 seconds. # as this function takes about 3 seconds.
self.shared_dict[i_proc]['msk'][first:last] = \ self.shared_dict[i_proc]['msk'][first:last] = \
calgs.gain_choose_int(gain, calgs.gain_choose_int(gain, self.mask[module_idx][:, cellid])
self.mask[module_idx][:, cellid]) # noqa
if hasattr(self, "rel_gain"): if hasattr(self, "rel_gain"):
# Get the correct rel_gain depending on cell-id # Get the correct rel_gain depending on cell-id
self.shared_dict[i_proc]['rel_corr'][first:last] = \ self.shared_dict[i_proc]['rel_corr'][first:last] = \
calgs.gain_choose(gain, calgs.gain_choose(gain, self.rel_gain[module_idx][:, cellid])
self.rel_gain[module_idx][:, cellid]) # noqa
# do this image wise, as the shift is per image # do this image wise, as the shift is per image
for i in range(data.shape[0]): for i in range(data.shape[0]):
...@@ -647,9 +652,9 @@ class AgipdCorrections: ...@@ -647,9 +652,9 @@ class AgipdCorrections:
# after calculating it while offset correcting. # after calculating it while offset correcting.
if self.corr_bools.get('melt_snow'): if self.corr_bools.get('melt_snow'):
_ = melt_snowy_pixels(self.shared_dict[i_proc]['raw_data'][first:last], # noqa _ = melt_snowy_pixels(self.shared_dict[i_proc]['raw_data'][first:last], # noqa
data, gain, data, gain,
self.shared_dict[i_proc]['t0_rgain'][first:last], # noqa self.shared_dict[i_proc]['t0_rgain'][first:last], # noqa
self.snow_resolution) self.snow_resolution)
# Inner ASIC borders are matched to the same signal level # Inner ASIC borders are matched to the same signal level
if self.corr_bools.get("match_asics"): if self.corr_bools.get("match_asics"):
...@@ -712,7 +717,7 @@ class AgipdCorrections: ...@@ -712,7 +717,7 @@ class AgipdCorrections:
valid_indices = np.concatenate([np.arange(validf[i], valid_indices = np.concatenate([np.arange(validf[i],
validf[i]+validc[i]) validf[i]+validc[i])
for i in range(validf.size)], for i in range(validf.size)],
axis=0) axis=0)
valid_indices = np.squeeze(valid_indices).astype(np.int32) valid_indices = np.squeeze(valid_indices).astype(np.int32)
elif index_v == 1: elif index_v == 1:
...@@ -753,8 +758,8 @@ class AgipdCorrections: ...@@ -753,8 +758,8 @@ class AgipdCorrections:
allpulses = data_dict['pulseId'][:n_img] allpulses = data_dict['pulseId'][:n_img]
# Initializing can_calibrate array # Initializing can_calibrate array
can_calibrate = self.choose_selected_pulses(allpulses, can_calibrate = self.choose_selected_pulses(
can_calibrate=[True]*len(allpulses)) allpulses, can_calibrate=[True]*len(allpulses))
# Only select data corresponding to selected pulses # Only select data corresponding to selected pulses
# and overwrite data in shared-memory leaving # and overwrite data in shared-memory leaving
...@@ -779,7 +784,7 @@ class AgipdCorrections: ...@@ -779,7 +784,7 @@ class AgipdCorrections:
return n_img return n_img
def validate_selected_pulses(self, allpulses: np.array def validate_selected_pulses(self, allpulses: np.array
) -> Tuple[int, int, int]: ) -> Tuple[int, int, int]:
"""Validate the selected pulses given from the notebook """Validate the selected pulses given from the notebook
Validate that the given range of pulses to correct Validate that the given range of pulses to correct
...@@ -816,7 +821,6 @@ class AgipdCorrections: ...@@ -816,7 +821,6 @@ class AgipdCorrections:
def choose_selected_pulses(self, allpulses: np.array, def choose_selected_pulses(self, allpulses: np.array,
can_calibrate: np.array) -> np.array: can_calibrate: np.array) -> np.array:
""" """
Choose given selected pulse from pulseId array of Choose given selected pulse from pulseId array of
raw data. The selected pulses range is validated then raw data. The selected pulses range is validated then
...@@ -831,7 +835,7 @@ class AgipdCorrections: ...@@ -831,7 +835,7 @@ class AgipdCorrections:
""" """
(first_pulse, last_pulse, (first_pulse, last_pulse,
pulse_step) = self.validate_selected_pulses(allpulses) pulse_step) = self.validate_selected_pulses(allpulses)
# collect the pulses to be calibrated # collect the pulses to be calibrated
cal_pulses = allpulses[first_pulse: last_pulse: pulse_step] cal_pulses = allpulses[first_pulse: last_pulse: pulse_step]
...@@ -853,7 +857,8 @@ class AgipdCorrections: ...@@ -853,7 +857,8 @@ class AgipdCorrections:
return can_calibrate return can_calibrate
def gen_valid_range(self, first_index: int, last_index: int, def gen_valid_range(self, first_index: int, last_index: int,
max_cells: int, allcells: np.array, allpulses: np.array, max_cells: int, allcells: np.array,
allpulses: np.array,
valid_indices: Optional[np.array] = None, valid_indices: Optional[np.array] = None,
apply_sel_pulses: Optional[bool] = True apply_sel_pulses: Optional[bool] = True
) -> np.array: ) -> np.array:
...@@ -890,8 +895,8 @@ class AgipdCorrections: ...@@ -890,8 +895,8 @@ class AgipdCorrections:
return return
if apply_sel_pulses: if apply_sel_pulses:
can_calibrate = self.choose_selected_pulses(allpulses, can_calibrate = self.choose_selected_pulses(
can_calibrate=can_calibrate) allpulses, can_calibrate=can_calibrate)
if valid_indices is None: if valid_indices is None:
firange = np.arange(first_index, last_index) firange = np.arange(first_index, last_index)
else: else:
...@@ -1075,7 +1080,7 @@ class AgipdCorrections: ...@@ -1075,7 +1080,7 @@ class AgipdCorrections:
self.offset[module_idx][...] = cons_data["Offset"].transpose()[...] self.offset[module_idx][...] = cons_data["Offset"].transpose()[...]
self.noise[module_idx][...] = cons_data["Noise"].transpose()[...] self.noise[module_idx][...] = cons_data["Noise"].transpose()[...]
self.thresholds[module_idx][...] = cons_data["ThresholdsDark"].transpose()[:3,...] # noqa self.thresholds[module_idx][...] = cons_data["ThresholdsDark"].transpose()[:3, ...] # noqa
if self.corr_bools.get("low_medium_gap"): if self.corr_bools.get("low_medium_gap"):
t0 = self.thresholds[module_idx][0] t0 = self.thresholds[module_idx][0]
...@@ -1090,7 +1095,7 @@ class AgipdCorrections: ...@@ -1090,7 +1095,7 @@ class AgipdCorrections:
:bpixels.shape[2], # noqa :bpixels.shape[2], # noqa
None] None]
if when["SlopesFF"]: # Checking if constant was retrieved if when["SlopesFF"]: # Checking if constant was retrieved
slopesFF = cons_data["SlopesFF"] slopesFF = cons_data["SlopesFF"]
# This could be used for backward compatibility # This could be used for backward compatibility
...@@ -1100,18 +1105,20 @@ class AgipdCorrections: ...@@ -1100,18 +1105,20 @@ class AgipdCorrections:
# This is for backward compatability for old FF constants # This is for backward compatability for old FF constants
# (128, 512, mem_cells) # (128, 512, mem_cells)
if slopesFF.shape[-1] == 2: if slopesFF.shape[-1] == 2:
xray_cor = np.squeeze(slopesFF[...,0]) xray_cor = np.squeeze(slopesFF[..., 0])
xray_cor_med = np.nanmedian(xray_cor) xray_cor_med = np.nanmedian(xray_cor)
xray_cor[np.isnan(xray_cor)]= xray_cor_med xray_cor[np.isnan(xray_cor)] = xray_cor_med
xray_cor[(xray_cor<0.8) | (xray_cor>1.2)] = xray_cor_med xray_cor[(xray_cor < 0.8) | (
xray_cor > 1.2)] = xray_cor_med
xray_cor = np.dstack([xray_cor]*self.max_cells) xray_cor = np.dstack([xray_cor]*self.max_cells)
else: else:
# Memory cell resolved xray_cor correction # Memory cell resolved xray_cor correction
xray_cor = slopesFF # (128, 512, mem_cells) xray_cor = slopesFF # (128, 512, mem_cells)
if xray_cor.shape[-1] < self.max_cells: if xray_cor.shape[-1] < self.max_cells:
# In case of having new constant with less memory cells, # When working with new constant with fewer memory
# due to lack of enough FF data or during development. # cells, eg. lacking enough FF data or during
# xray_cor should be expanded by last memory cell. # development, xray_cor must be expand its last memory
# cell to maintain a consistent shape.
xray_cor = np.dstack(xray_cor, xray_cor = np.dstack(xray_cor,
np.dstack([xray_cor[..., -1]] np.dstack([xray_cor[..., -1]]
* (self.max_cells - xray_cor.shape[-1]))) # noqa * (self.max_cells - xray_cor.shape[-1]))) # noqa
...@@ -1151,11 +1158,11 @@ class AgipdCorrections: ...@@ -1151,11 +1158,11 @@ class AgipdCorrections:
pc_med_l = slopesPC[..., :self.max_cells, 4] pc_med_l = slopesPC[..., :self.max_cells, 4]
# calculate median for slopes # calculate median for slopes
pc_high_med = np.nanmedian(pc_high_m, axis=(0,1)) pc_high_med = np.nanmedian(pc_high_m, axis=(0, 1))
pc_med_med = np.nanmedian(pc_med_m, axis=(0,1)) pc_med_med = np.nanmedian(pc_med_m, axis=(0, 1))
# calculate median for intercepts: # calculate median for intercepts:
pc_high_l_med = np.nanmedian(pc_high_l, axis=(0,1)) pc_high_l_med = np.nanmedian(pc_high_l, axis=(0, 1))
pc_med_l_med = np.nanmedian(pc_med_l, axis=(0,1)) pc_med_l_med = np.nanmedian(pc_med_l, axis=(0, 1))
# sanitize PC data # sanitize PC data
# (it should be done already on the level of constants) # (it should be done already on the level of constants)
......
import copy import copy
from typing import Tuple
import numpy as np import numpy as np
from cal_tools.enums import BadPixels, SnowResolution from cal_tools.enums import BadPixels, SnowResolution
from scipy.signal import cwt, find_peaks_cwt, ricker from scipy.signal import cwt, ricker
from sklearn.mixture import GaussianMixture from sklearn.mixture import GaussianMixture
from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import StandardScaler
...@@ -249,8 +250,10 @@ def correct_baseline_via_hist(d, pcm, g): ...@@ -249,8 +250,10 @@ def correct_baseline_via_hist(d, pcm, g):
return d, 0 return d, 0
it += 1 it += 1
def min_hist_distance(pc, bins=100, ran=(-10000, 10000), dec=20, def min_hist_distance(pc: int,
minbin=10): bins: int = 100,
ran: Tuple[int, int] = (-10000, 10000),
minbin: int = 10) -> float:
hh, e = np.histogram(dd[g == 0] - pc, bins=bins, range=ran) hh, e = np.histogram(dd[g == 0] - pc, bins=bins, range=ran)
hm, e = np.histogram((dd[g == 1] - pc) * pcm[g == 1], bins=bins, hm, e = np.histogram((dd[g == 1] - pc) * pcm[g == 1], bins=bins,
range=ran) range=ran)
......
...@@ -275,7 +275,9 @@ def get_dir_creation_date(directory: Union[str, Path], run: int, ...@@ -275,7 +275,9 @@ def get_dir_creation_date(directory: Union[str, Path], run: int,
rfile = sorted(rfiles, key=path.getmtime)[0] rfile = sorted(rfiles, key=path.getmtime)[0]
with h5py.File(rfile, 'r') as fin: with h5py.File(rfile, 'r') as fin:
cdate = fin['METADATA/creationDate'][0].decode() cdate = fin['METADATA/creationDate'][0].decode()
cdate = datetime.datetime.strptime(cdate, "%Y%m%dT%H%M%SZ") cdate = datetime.datetime.strptime(
cdate,
"%Y%m%dT%H%M%SZ").replace(tzinfo=datetime.timezone.utc)
return cdate return cdate
except (IndexError, IOError, ValueError): except (IndexError, IOError, ValueError):
ntries -= 1 ntries -= 1
......
...@@ -20,7 +20,7 @@ def test_dir_creation_date(): ...@@ -20,7 +20,7 @@ def test_dir_creation_date():
date = get_dir_creation_date(folder, 9983) date = get_dir_creation_date(folder, 9983)
assert isinstance(date, datetime) assert isinstance(date, datetime)
assert str(date) == '2020-09-23 13:30:50' assert str(date) == '2020-09-23 13:30:50+00:00'
with pytest.raises(ValueError) as e: with pytest.raises(ValueError) as e:
get_dir_creation_date(folder, 4) get_dir_creation_date(folder, 4)
......
...@@ -13,6 +13,7 @@ class Errors: ...@@ -13,6 +13,7 @@ class Errors:
MDC_RESPONSE = "FAILED: Response error from MDC: {}" MDC_RESPONSE = "FAILED: Response error from MDC: {}"
NOT_CONFIGURED = "FAILED: instrument not configured, please contact det-support@xfel.eu" NOT_CONFIGURED = "FAILED: instrument not configured, please contact det-support@xfel.eu"
NOT_SUBMITTED = "FAILED: correction of {} failed during submision, please contact det-support@xfel.eu" NOT_SUBMITTED = "FAILED: correction of {} failed during submision, please contact det-support@xfel.eu"
OTHER_ERROR = "FAILED: Error {}, please contact det-support@xfel.eu"
class MDC: class MDC:
......
This diff is collapsed.