diff --git a/cal_tools/cal_tools/enums.py b/cal_tools/cal_tools/enums.py index 0e30568a4683839b1c171d6d8b1bb28611db26c5..ed7a0457f9a896c72e7b32afccf821f4f1e790db 100644 --- a/cal_tools/cal_tools/enums.py +++ b/cal_tools/cal_tools/enums.py @@ -5,24 +5,25 @@ class BadPixels(Enum): """ The European XFEL Bad Pixel Encoding """ - OFFSET_OUT_OF_THRESHOLD = 0b00000000000000000001 # bit 1 - NOISE_OUT_OF_THRESHOLD = 0b00000000000000000010 # bit 2 - OFFSET_NOISE_EVAL_ERROR = 0b00000000000000000100 # bit 3 - NO_DARK_DATA = 0b00000000000000001000 # bit 4 - CI_GAIN_OF_OF_THRESHOLD = 0b00000000000000010000 # bit 5 - CI_LINEAR_DEVIATION = 0b00000000000000100000 # bit 6 - CI_EVAL_ERROR = 0b00000000000001000000 # bit 7 - FF_GAIN_EVAL_ERROR = 0b00000000000010000000 # bit 8 - FF_GAIN_DEVIATION = 0b00000000000100000000 # bit 9 - FF_NO_ENTRIES = 0b00000000001000000000 # bit 10 - CI2_EVAL_ERROR = 0b00000000010000000000 # bit 11 - VALUE_IS_NAN = 0b00000000100000000000 # bit 12 - VALUE_OUT_OF_RANGE = 0b00000001000000000000 # bit 13 - GAIN_THRESHOLDING_ERROR = 0b00000010000000000000 # bit 14 - DATA_STD_IS_ZERO = 0b00000100000000000000 # bit 15 - ASIC_STD_BELOW_NOISE = 0b00001000000000000000 # bit 16 - INTERPOLATED = 0b00010000000000000000 # bit 17 - NOISY_ADC = 0b00100000000000000000 # bit 18 - OVERSCAN = 0b01000000000000000000 # bit 19 - NON_SENSITIVE = 0b10000000000000000000 # bit 20 + OFFSET_OUT_OF_THRESHOLD = 0b000000000000000000001 # bit 1 + NOISE_OUT_OF_THRESHOLD = 0b000000000000000000010 # bit 2 + OFFSET_NOISE_EVAL_ERROR = 0b000000000000000000100 # bit 3 + NO_DARK_DATA = 0b000000000000000001000 # bit 4 + CI_GAIN_OF_OF_THRESHOLD = 0b000000000000000010000 # bit 5 + CI_LINEAR_DEVIATION = 0b000000000000000100000 # bit 6 + CI_EVAL_ERROR = 0b000000000000001000000 # bit 7 + FF_GAIN_EVAL_ERROR = 0b000000000000010000000 # bit 8 + FF_GAIN_DEVIATION = 0b000000000000100000000 # bit 9 + FF_NO_ENTRIES = 0b000000000001000000000 # bit 10 + CI2_EVAL_ERROR = 0b000000000010000000000 # bit 11 + VALUE_IS_NAN = 0b000000000100000000000 # bit 12 + VALUE_OUT_OF_RANGE = 0b000000001000000000000 # bit 13 + GAIN_THRESHOLDING_ERROR = 0b000000010000000000000 # bit 14 + DATA_STD_IS_ZERO = 0b000000100000000000000 # bit 15 + ASIC_STD_BELOW_NOISE = 0b000001000000000000000 # bit 16 + INTERPOLATED = 0b000010000000000000000 # bit 17 + NOISY_ADC = 0b000100000000000000000 # bit 18 + OVERSCAN = 0b001000000000000000000 # bit 19 + NON_SENSITIVE = 0b010000000000000000000 # bit 20 + NON_LIN_RESPONSE_REGION = 0b100000000000000000000 # bit 21 diff --git a/cal_tools/cal_tools/lpdlib.py b/cal_tools/cal_tools/lpdlib.py index d0c557aab8964aa847b3aba13d2b063006062254..7b9b8986525d6bc1f976834008a7373540300478 100644 --- a/cal_tools/cal_tools/lpdlib.py +++ b/cal_tools/cal_tools/lpdlib.py @@ -41,7 +41,8 @@ class LpdCorrections: raw_fmt_version=2, chunk_size=512, h5_data_path="INSTRUMENT/FXE_DET_LPD1M-1/DET/{}CH0:xtdf/", h5_index_path="INDEX/FXE_DET_LPD1M-1/DET/{}CH0:xtdf/", - do_ff=True, correct_non_linear=True, karabo_data_mode=False): + do_ff=True, correct_non_linear=True, karabo_data_mode=False, + linear_between=None, mark_non_lin_region=True, nlc_version=2): """ Initialize an LpdCorrections Class @@ -89,6 +90,9 @@ class LpdCorrections: self.filter_cells = [0, 1] if channel in filter_modules else [] self.cnl = True # correct_non_linear self.karabo_data_mode = karabo_data_mode + self.linear_between = linear_between + self.mark_nonlin = mark_non_lin_region + self.nlc_version = nlc_version # emprically determined from APD datasets p900038, r155,r156 # emprically determined from APD datasets p900038, r155,r156 self.cnl_const = { @@ -252,9 +256,22 @@ class LpdCorrections: # correct offset im -= og - + + nlf = 0 + if self.mark_nonlin and self.linear_between is not None: + for gl, lr in enumerate(self.linear_between): + + midx = (gain == gl) & ((im < lr[0]) | (im > lr[1])) + msk[midx] = BadPixels.NON_LIN_RESPONSE_REGION.value + numnonlin = np.count_nonzero(midx, axis=(1,2)) + nlf += numnonlin + nlf = nlf/float(im.shape[0] * im.shape[1]) + # hacky way of smoothening transition region between med and low - cfac = 0.314 * np.exp(-im * 0.001) + + cfac = 1 + if self.nlc_version == 1 and self.cnl: + cfac = 0.314 * np.exp(-im * 0.001) # perform relative gain correction with additional gain-deduced # offset @@ -263,10 +280,11 @@ class LpdCorrections: im /= self.flatfield[None, :, :] # hacky way of smoothening transition region between med and low - # im[gain == 2] -= im[gain == 2] * cfac[gain == 2] + if self.nlc_version == 1 and self.cnl: + im[gain == 2] -= im[gain == 2] * cfac[gain == 2] # perform non-linear corrections if requested - if self.cnl: + if self.cnl and self.nlc_version == 2: def lin_exp_fun(x, m, b, A, lam, c): return m * x + b + A * np.exp(lam * (x - c)) @@ -287,7 +305,7 @@ class LpdCorrections: cf = lin_exp_fun(x, cnl['m'], cnl['b'], cnl['A'], cnl['lam'], cnl['c']) im[(gain == 2)] -= np.minimum(cf, 0.45) * x - + # create bad pixels masks, here non-finite values bidx = ~np.isfinite(im) im[bidx] = 0 @@ -332,6 +350,8 @@ class LpdCorrections: self.outfile[lpd_base + "image/pulseId"][cidx:nidx] = pulseId self.outfile[lpd_base + "image/status"][cidx:nidx] = status self.outfile[lpd_base + "image/length"][cidx:nidx] = length + if self.mark_nonlin: + self.outfile[lpd_base + "image/nonLinear"][cidx:nidx] = nlf self.cidx = nidx else: irange['image.data'] = im @@ -522,6 +542,10 @@ class LpdCorrections: dtype=np.uint16, fletcher32=True) self.outfile.create_dataset(lpdbase + "image/length", fsz, dtype=np.uint32, fletcher32=True) + + if self.mark_nonlin: + self.outfile.create_dataset(lpdbase + "image/nonLinear", fsz, + dtype=np.float32, fletcher32=True) def get_histograms(self): """ Return preview histograms computed from the first chunk diff --git a/notebooks/LPD/LPD_Correct_and_Verify.ipynb b/notebooks/LPD/LPD_Correct_and_Verify.ipynb index 8cb6da49544621c71457d3ee5aa5f3f195dcd104..d09bc106d3d1306c11328e3e507940429a38cbbc 100644 --- a/notebooks/LPD/LPD_Correct_and_Verify.ipynb +++ b/notebooks/LPD/LPD_Correct_and_Verify.ipynb @@ -20,9 +20,9 @@ }, "outputs": [], "source": [ - "in_folder = \"/gpfs/exfel/exp/FXE/201931/p900088/raw/\" # the folder to read data from, required\n", + "in_folder = \"/gpfs/exfel/exp/FXE/201802/p002218/raw/\" # the folder to read data from, required\n", "run = 115 # runs to process, required\n", - "out_folder = \"/gpfs/exfel/exp/FXE/201931/p900088/proc/\" # the folder to output to, required\n", + "out_folder = \"/gpfs/exfel/data/scratch/xcal/lpd_test/exclude\" # the folder to output to, required\n", "calfile = \"/gpfs/exfel/data/scratch/xcal/lpd_store_0519.h5\" # path to constants extracted from the db into a file\n", "sequences = [-1] # sequences to correct, set to -1 for all, range allowed\n", "mem_cells = 512 # memory cells in data\n", @@ -47,6 +47,11 @@ "beam_center_offset = [1.5, 1] # offset from the beam center, MAR 2018\n", "sequences_per_node = 1 # sequence files to process per node\n", "timeout_cal_db = 30000 # timeout for calibration db requests in milliseconds\n", + "dont_mark_non_lin_region = False # do not mark non-linear regions in BP map\n", + "linear_between_high_gain = [-5000, 2500] # region in which high gain is considered linear, in ADU\n", + "linear_between_med_gain = [300, 3000] # region in which medium gain is considered linear, in ADU\n", + "linear_between_low_gain = [300, 3000] # region in which low gain is considered linear, in ADU\n", + "nlc_version = 2 # version of NLC to use\n", "\n", "def balance_sequences(in_folder, run, sequences, sequences_per_node):\n", " import glob\n", @@ -155,7 +160,10 @@ "logger = InfluxLogger(detector=\"LPD\", instrument=instrument, mem_cells=mem_cells,\n", " notebook=get_notebook_name(), proposal=proposal)\n", "\n", - "client = InfluxDBClient('exflqr18318', 8086, 'root', 'root', 'calstats')" + "client = InfluxDBClient('exflqr18318', 8086, 'root', 'root', 'calstats')\n", + "\n", + "mark_non_lin_region = not dont_mark_non_lin_region\n", + "linear_between = [linear_between_high_gain, linear_between_med_gain, linear_between_low_gain]" ] }, { @@ -271,7 +279,8 @@ "from functools import partial\n", "def correct_module(max_cells, do_ff, index_v, CHUNK_SIZE, total_sequences, sequences_qm, \n", " bins_gain_vs_signal, bins_signal_low_range, bins_signal_high_range, max_pulses,\n", - " dbparms, fileparms, nodb, no_non_linear_corrections, inp):\n", + " dbparms, fileparms, nodb, no_non_linear_corrections, mark_non_lin_region, linear_between,\n", + " nlc_version, inp):\n", " import numpy as np\n", " import copy\n", " import h5py\n", @@ -333,7 +342,9 @@ " lpd_corr = LpdCorrections(infile, outfile, max_cells, channel, max_pulses,\n", " bins_gain_vs_signal, bins_signal_low_range,\n", " bins_signal_high_range, do_ff=do_ff, raw_fmt_version=index_v,\n", - " correct_non_linear=(not no_non_linear_corrections))\n", + " correct_non_linear=(not no_non_linear_corrections),\n", + " mark_non_lin_region=mark_non_lin_region, linear_between=linear_between,\n", + " nlc_version=nlc_version)\n", " \n", " try:\n", " lpd_corr.get_valid_image_idx() \n", @@ -410,7 +421,7 @@ " print(\"Running {} tasks parallel\".format(len(inp)))\n", " p = partial(correct_module, max_cells, do_ff, index_v, CHUNK_SIZE, total_sequences, sequences_qm,\n", " bins_gain_vs_signal, bins_signal_low_range, bins_signal_high_range, max_pulses, dbparms,\n", - " fileparms, nodb, no_non_linear_corrections)\n", + " fileparms, nodb, no_non_linear_corrections, mark_non_lin_region, linear_between, nlc_version)\n", " \n", " r = view.map_sync(p, inp)\n", " #r = list(map(p, inp))\n",