Skip to content
Snippets Groups Projects
Commit f26be67a authored by Karim Ahmed's avatar Karim Ahmed
Browse files

put back chuncksize

parent af019974
No related branches found
No related tags found
1 merge request!841[JUNGFRAU][FF] Feat: new notebook for producing gain constants.
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
# Histogram of single photon spectra # Histogram of single photon spectra
Author: European XFEL Detector Group, Version: 1.0 Author: European XFEL Detector Group, Version: 1.0
Creates histograms from flat fields and store it in HDF5 files then perform fitting based on the selecting `fit_func`. Creates histograms from flat fields and store it in HDF5 files then perform fitting based on the selecting `fit_func`.
Histograms are on a pixel-by-pixel, memory cell by memory cell basis to save memory and space, histogram range are to be optimized around the desired peak. Histograms are on a pixel-by-pixel, memory cell by memory cell basis to save memory and space, histogram range are to be optimized around the desired peak.
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
in_folder = '/gpfs/exfel/exp/SPB/202330/p900343/raw' # RAW data path, required in_folder = '/gpfs/exfel/exp/SPB/202330/p900343/raw' # RAW data path, required
out_folder = '/gpfs/exfel/data/scratch/ahmedk/test/remove/JF4M_SPB_gain/r66/no_offset' # Output path for gain data, required out_folder = '/gpfs/exfel/data/scratch/ahmedk/test/remove/JF4M_SPB_gain/r66/no_offset' # Output path for gain data, required
metadata_folder = '.' # Directory containing calibration_metadata.yml when run by xfel-calibrate metadata_folder = '.' # Directory containing calibration_metadata.yml when run by xfel-calibrate
runs = [66] # can be a list of runs runs = [66] # can be a list of runs
sensor_size = [512, 1024] # size of the array in the 'row' and 'col' dimensions sensor_size = [512, 1024] # size of the array in the 'row' and 'col' dimensions
chunked_trains = 1000 # Number of trains per chunk. chunked_trains = 1000 # Number of trains per chunk.
gains = [0, 1, 2] # gain bit values gains = [0, 1, 2] # gain bit values
karabo_id = "SPB_IRDA_JF4M" # karabo prefix of Jungfrau devices karabo_id = "SPB_IRDA_JF4M" # karabo prefix of Jungfrau devices
karabo_da = [""] # Detector's data aggregators. Leave empty to derive from CALCAT. karabo_da = [""] # Detector's data aggregators. Leave empty to derive from CALCAT.
creation_time = '' # To overwrite the measured creation_time. Required Format: YYYY-MM-DD HR:MN:SC e.g. "2022-06-28 13:00:00" creation_time = '' # To overwrite the measured creation_time. Required Format: YYYY-MM-DD HR:MN:SC e.g. "2022-06-28 13:00:00"
# Parameter conditions # Parameter conditions
bias_voltage = -1 # bias voltage - Set to -1 to derive from CONTROL file. bias_voltage = -1 # bias voltage - Set to -1 to derive from CONTROL file.
integration_time = -1. # integration time - Set to -1 to derive from CONTROL file. integration_time = -1. # integration time - Set to -1 to derive from CONTROL file.
gain_mode = 0 # number of memory cells - Set to -1 to derive from CONTROL file. gain_mode = 0 # number of memory cells - Set to -1 to derive from CONTROL file.
gain_setting = -1 # gain setting parameter conditions (High or low CDS) - Set to -1 to derive from CONTROL file. gain_setting = -1 # gain setting parameter conditions (High or low CDS) - Set to -1 to derive from CONTROL file.
memory_cells = -1 # number of memory cells - Set to -1 to derive from CONTROL file. memory_cells = -1 # number of memory cells - Set to -1 to derive from CONTROL file.
sc_start = -1 # storage cell start value - should be derived from CONTROL file. sc_start = -1 # storage cell start value - should be derived from CONTROL file.
h_bins_s = 2 # bin width in ADC units of the histogram h_bins_s = 2 # bin width in ADC units of the histogram
h_range = (-50, 450.) # histogram range in ADC units h_range = (-50, 450.) # histogram range in ADC units
block_size = [256, 64] # dimension of the chunks in 'row' and 'col' block_size = [256, 64] # dimension of the chunks in 'row' and 'col'
control_src_template = '{}/DET/CONTROL' control_src_template = '{}/DET/CONTROL'
extra_dims = ['cell', 'row', 'col'] # labels for the DataArray dims after the first extra_dims = ['cell', 'row', 'col'] # labels for the DataArray dims after the first
fit_func = 'CHARGE_SHARING' # function used to fit the single photon peak fit_func = 'CHARGE_SHARING' # function used to fit the single photon peak
fit_h_range = (150, 350.) # range of the histogram in x-axis units used in fitting fit_h_range = (150, 350.) # range of the histogram in x-axis units used in fitting
chunk_size = 10
# parameters for the peak finder # parameters for the peak finder
n_sigma = 5. # n of sigma above pedestal threshold n_sigma = 5. # n of sigma above pedestal threshold
ratio = 0.99 # ratio of the next peak amplitude in the peak_finder ratio = 0.99 # ratio of the next peak amplitude in the peak_finder
rebin = 1 rebin = 1
def find_das(in_folder, runs, karabo_da): def find_das(in_folder, runs, karabo_da):
run = runs[0] run = runs[0]
if (karabo_da is not None) and karabo_da != [""]: if (karabo_da is not None) and karabo_da != [""]:
return karabo_da return karabo_da
from pathlib import Path from pathlib import Path
import re import re
karabo_da = set() karabo_da = set()
for file in Path(in_folder, f'r{run:04d}').iterdir(): for file in Path(in_folder, f'r{run:04d}').iterdir():
da = re.search(r'JNGFR\d{2}', file.name) da = re.search(r'JNGFR\d{2}', file.name)
if da: if da:
karabo_da.add(da.group()) karabo_da.add(da.group())
return sorted(karabo_da) return sorted(karabo_da)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
import multiprocessing import multiprocessing
import time import time
from functools import partial from functools import partial
from logging import warning from logging import warning
from pathlib import Path from pathlib import Path
import numpy as np import numpy as np
import pasha as psh import pasha as psh
from extra_data import H5File, RunDirectory, open_run from extra_data import H5File, RunDirectory, open_run
from h5py import File as h5file from h5py import File as h5file
from XFELDetAna.detectors.jungfrau.util import count_n_files from XFELDetAna.detectors.jungfrau.util import count_n_files
import cal_tools.restful_config as rest_cfg import cal_tools.restful_config as rest_cfg
from cal_tools.calcat_interface import JUNGFRAU_CalibrationData from cal_tools.calcat_interface import JUNGFRAU_CalibrationData
from cal_tools.jungfrau import jungfrau_ff from cal_tools.jungfrau import jungfrau_ff
from cal_tools.jungfrau.jungfraulib import JungfrauCtrl from cal_tools.jungfrau.jungfraulib import JungfrauCtrl
from cal_tools.step_timing import StepTimer from cal_tools.step_timing import StepTimer
from cal_tools.tools import calcat_creation_time from cal_tools.tools import calcat_creation_time
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
proposal = list(filter(None, in_folder.strip('/').split('/')))[-2] proposal = list(filter(None, in_folder.strip('/').split('/')))[-2]
in_folder = Path(in_folder) in_folder = Path(in_folder)
out_folder = Path(out_folder) out_folder = Path(out_folder)
out_folder.mkdir(exist_ok=True, parents=True) out_folder.mkdir(exist_ok=True, parents=True)
print(f'Creating directory {out_folder}') print(f'Creating directory {out_folder}')
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
begin_stuff = time.localtime() begin_stuff = time.localtime()
control_src = control_src_template.format(karabo_id, karabo_da[0]) control_src = control_src_template.format(karabo_id, karabo_da[0])
first_run_folder = in_folder / f'r{runs[0]:04d}' first_run_folder = in_folder / f'r{runs[0]:04d}'
ctrl_data = JungfrauCtrl(RunDirectory(first_run_folder), control_src) ctrl_data = JungfrauCtrl(RunDirectory(first_run_folder), control_src)
# Run's creation time: # Run's creation time:
creation_time = calcat_creation_time(in_folder, runs[0], creation_time) creation_time = calcat_creation_time(in_folder, runs[0], creation_time)
print(f"Creation time: {creation_time}") print(f"Creation time: {creation_time}")
print("Chunked trains: ", chunked_trains) print("Chunked trains: ", chunked_trains)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
def read_detector_conditions( def read_detector_conditions(
run_dc, run_dc,
run_number, run_number,
bias_voltage, bias_voltage,
memory_cells, memory_cells,
integration_time, integration_time,
gain_mode, gain_mode,
gain_setting, gain_setting,
): ):
"""Get parameter conditions for a run. """Get parameter conditions for a run.
Args: Args:
run_dc(extra_data.DataCollection): run_dc(extra_data.DataCollection):
run_number(int): run_number(int):
integration_time(float) integration_time(float)
bias_voltage(float): bias_voltage(float):
gain_mode(int): gain_mode(int):
gain_setting(int): gain_setting(int):
Returns: Returns:
float, int, float, int, int, int float, int, float, int, int, int
""" """
## Retrieve DB constants ## Retrieve DB constants
ctrl_data = JungfrauCtrl(run_dc, control_src) ctrl_data = JungfrauCtrl(run_dc, control_src)
if memory_cells < 0: if memory_cells < 0:
memory_cells, sc_start = ctrl_data.get_memory_cells() memory_cells, sc_start = ctrl_data.get_memory_cells()
if integration_time < 0: if integration_time < 0:
integration_time = ctrl_data.get_integration_time() integration_time = ctrl_data.get_integration_time()
if bias_voltage < 0: if bias_voltage < 0:
bias_voltage = ctrl_data.get_bias_voltage() bias_voltage = ctrl_data.get_bias_voltage()
if gain_setting < 0: if gain_setting < 0:
gain_setting = ctrl_data.get_gain_setting() gain_setting = ctrl_data.get_gain_setting()
if gain_mode < 0: if gain_mode < 0:
gain_mode = ctrl_data.get_gain_mode() gain_mode = ctrl_data.get_gain_mode()
return ( return (
bias_voltage, bias_voltage,
memory_cells, memory_cells,
sc_start, sc_start,
integration_time, integration_time,
gain_mode, gain_mode,
gain_setting, gain_setting,
run_number, run_number,
) )
condition_lists = { condition_lists = {
"integration_time": [], "integration_time": [],
"memory_cells": [], "memory_cells": [],
"sc_start": [], "sc_start": [],
"bias_voltage": [], "bias_voltage": [],
"gain_setting": [], "gain_setting": [],
"gain_mode": [], "gain_mode": [],
"runs": [], "runs": [],
} }
for run in runs: for run in runs:
voltage, cells, sc_start, integration, mode, setting, run_number = read_detector_conditions( # noqa voltage, cells, sc_start, integration, mode, setting, run_number = read_detector_conditions( # noqa
RunDirectory(in_folder / f"r{run:04d}"), RunDirectory(in_folder / f"r{run:04d}"),
run, run,
bias_voltage=bias_voltage, bias_voltage=bias_voltage,
memory_cells=memory_cells, memory_cells=memory_cells,
integration_time=integration_time, integration_time=integration_time,
gain_mode=gain_mode, gain_mode=gain_mode,
gain_setting=gain_setting) gain_setting=gain_setting)
condition_lists["bias_voltage"].append(voltage) condition_lists["bias_voltage"].append(voltage)
condition_lists["memory_cells"].append(cells) condition_lists["memory_cells"].append(cells)
condition_lists["integration_time"].append(integration) condition_lists["integration_time"].append(integration)
condition_lists["gain_mode"].append(mode) condition_lists["gain_mode"].append(mode)
condition_lists["gain_setting"].append(setting) condition_lists["gain_setting"].append(setting)
condition_lists["sc_start"].append(sc_start) condition_lists["sc_start"].append(sc_start)
# Validate run conditions # Validate run conditions
runs_results = condition_lists["runs"] runs_results = condition_lists["runs"]
for c, lst in condition_lists.items(): for c, lst in condition_lists.items():
if c == "runs": if c == "runs":
continue continue
if not all(val == lst[0] for val in lst): if not all(val == lst[0] for val in lst):
warning( warning(
f"{c} is not the same for all runs: {lst} " f"{c} is not the same for all runs: {lst} "
f"for runs {runs_results}, respectively" f"for runs {runs_results}, respectively"
) )
memory_cells = condition_lists["memory_cells"][0] memory_cells = condition_lists["memory_cells"][0]
sc_start = condition_lists["sc_start"][0] sc_start = condition_lists["sc_start"][0]
print(f"Memory cells: {memory_cells:d}") print(f"Memory cells: {memory_cells:d}")
bias_voltage = condition_lists["bias_voltage"][0] bias_voltage = condition_lists["bias_voltage"][0]
print(f"Bias Voltage: {bias_voltage:3.1f} V") print(f"Bias Voltage: {bias_voltage:3.1f} V")
integration_time = condition_lists["integration_time"][0] integration_time = condition_lists["integration_time"][0]
print(f"Exposure Time: {integration_time:1.2f} us") print(f"Exposure Time: {integration_time:1.2f} us")
gain_mode = condition_lists["gain_mode"][0] gain_mode = condition_lists["gain_mode"][0]
print(f"Gain mode: {gain_mode}") print(f"Gain mode: {gain_mode}")
gain_setting = condition_lists["gain_setting"][0] gain_setting = condition_lists["gain_setting"][0]
print(f"Gain setting: {gain_setting}") print(f"Gain setting: {gain_setting}")
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Retrieve calibration constants # Retrieve calibration constants
jf_cal = JUNGFRAU_CalibrationData( jf_cal = JUNGFRAU_CalibrationData(
detector_name=karabo_id, detector_name=karabo_id,
sensor_bias_voltage=bias_voltage, sensor_bias_voltage=bias_voltage,
event_at=creation_time, event_at=creation_time,
modules=karabo_da, modules=karabo_da,
memory_cells=memory_cells, memory_cells=memory_cells,
integration_time=integration_time, integration_time=integration_time,
gain_setting=gain_setting, gain_setting=gain_setting,
gain_mode=gain_mode, gain_mode=gain_mode,
client=rest_cfg.calibration_client(), client=rest_cfg.calibration_client(),
) )
jf_metadata = jf_cal.metadata( jf_metadata = jf_cal.metadata(
calibrations=["Offset10Hz", "Noise10Hz"]) calibrations=["Offset10Hz", "Noise10Hz"])
# TODO: display CCV timestamp # TODO: display CCV timestamp
const_data = jf_cal.ndarray_map(metadata=jf_metadata) const_data = jf_cal.ndarray_map(metadata=jf_metadata)
jf_cal.display_markdown_retrieved_constants(metadata=jf_metadata) jf_cal.display_markdown_retrieved_constants(metadata=jf_metadata)
for mod in karabo_da: for mod in karabo_da:
constants_data = const_data[mod] constants_data = const_data[mod]
if "Offset10Hz" in constants_data: if "Offset10Hz" in constants_data:
# From (512, 1024, 1, 3) to (3, 1, 512, 1024) # From (512, 1024, 1, 3) to (3, 1, 512, 1024)
constants_data["Offset10Hz"] = np.transpose( constants_data["Offset10Hz"] = np.transpose(
np.swapaxes(constants_data["Offset10Hz"], 0, 1)).astype(np.float32) np.swapaxes(constants_data["Offset10Hz"], 0, 1)).astype(np.float32)
else: else:
# Create offset empty constant. # Create offset empty constant.
constants_data["Offset10Hz"] = np.zeros( constants_data["Offset10Hz"] = np.zeros(
(3, memory_cells, 512, 1024), (3, memory_cells, 512, 1024),
dtype=np.float32, dtype=np.float32,
) )
warning(f"Offset map is not found for {mod}") warning(f"Offset map is not found for {mod}")
if "Noise10Hz" in constants_data: if "Noise10Hz" in constants_data:
# From (512, 1024, 1, 3) to (3, 1, 512, 1024) # From (512, 1024, 1, 3) to (3, 1, 512, 1024)
constants_data["Noise10Hz"] = np.transpose( constants_data["Noise10Hz"] = np.transpose(
np.swapaxes(constants_data["Noise10Hz"], 0, 1)).astype(np.float32) np.swapaxes(constants_data["Noise10Hz"], 0, 1)).astype(np.float32)
else: else:
constants_data["Noise10Hz"] = None constants_data["Noise10Hz"] = None
warning(f"Noise map is not found for {mod}") warning(f"Noise map is not found for {mod}")
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Filling the histogram ## Filling the histogram
* first it will retreive DB offsets for a given run * first it will retreive DB offsets for a given run
* on a file-by-file basis it will perform offset subtraction and fill the histogram * on a file-by-file basis it will perform offset subtraction and fill the histogram
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
### Creating empty histogram ### Creating empty histogram
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
h_n_bins = int((h_range[1] - h_range[0])/h_bins_s) h_n_bins = int((h_range[1] - h_range[0])/h_bins_s)
h_bins = np.linspace(h_range[0], h_range[1], h_n_bins) h_bins = np.linspace(h_range[0], h_range[1], h_n_bins)
h_spectra = dict() h_spectra = dict()
edges = dict() edges = dict()
for da in karabo_da: for da in karabo_da:
h_spectra[da] = np.zeros( h_spectra[da] = np.zeros(
(h_n_bins-1, memory_cells, sensor_size[0], sensor_size[1]), (h_n_bins-1, memory_cells, sensor_size[0], sensor_size[1]),
dtype=np.int32, dtype=np.int32,
) )
edges[da] = None edges[da] = None
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
step_timer = StepTimer() step_timer = StepTimer()
n_cpus = multiprocessing.cpu_count() n_cpus = multiprocessing.cpu_count()
context = psh.context.ProcessContext(num_workers=n_cpus) context = psh.context.ProcessContext(num_workers=n_cpus)
print(f"Using {n_cpus} workers for correction.") print(f"Using {n_cpus} workers for correction.")
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
for run in runs: for run in runs:
for da in karabo_da: for da in karabo_da:
det_src = f'{karabo_id}/DET/{da}:daqOutput' det_src = f'{karabo_id}/DET/{da}:daqOutput'
selection = { selection = {
det_src:{'data.adc', 'data.gain', 'data.memoryCell', 'data.trainId'}, det_src:{'data.adc', 'data.gain', 'data.memoryCell', 'data.trainId'},
control_src:{'exposureTime.timestamp', 'exposureTime.value', 'highVoltage.value'} control_src:{'exposureTime.timestamp', 'exposureTime.value', 'highVoltage.value'}
} }
offset_map = const_data[da]["Offset10Hz"] offset_map = const_data[da]["Offset10Hz"]
raw_fname_tmp = f'RAW-R{run:04d}-{da}-S{{:05d}}.h5' raw_fname_tmp = f'RAW-R{run:04d}-{da}-S{{:05d}}.h5'
r_dir = in_folder / f'r{run:04d}' r_dir = in_folder / f'r{run:04d}'
#r_data = RunDirectory(r_dir) #r_data = RunDirectory(r_dir)
r_data = open_run(proposal=proposal, run=run, data='raw') r_data = open_run(proposal=proposal, run=run, data='raw')
r_data.info() r_data.info()
sel = r_data.select(selection) sel = r_data.select(selection)
n_files = count_n_files(str(r_dir / raw_fname_tmp), da, starts_with='/RAW') n_files = count_n_files(str(r_dir / raw_fname_tmp), da, starts_with='/RAW')
## Retrieve DB constants ## Retrieve DB constants
## Offset subtraction & Histogram filling ## Offset subtraction & Histogram filling
### performs offset subtraction and fills the histogram ### performs offset subtraction and fills the histogram
### looping on individual files because single photon runs can be very large ### looping on individual files because single photon runs can be very large
for ifile in range(0, n_files): for ifile in range(0, n_files):
file = H5File(r_dir / raw_fname_tmp.format(ifile), inc_suspect_trains=False) file = H5File(r_dir / raw_fname_tmp.format(ifile), inc_suspect_trains=False)
trains = file.get_array(det_src, 'data.trainId') trains = file.get_array(det_src, 'data.trainId')
cellid = file.get_array(det_src, 'data.memoryCell', extra_dims=extra_dims[0:1]).astype(np.int16) cellid = file.get_array(det_src, 'data.memoryCell', extra_dims=extra_dims[0:1]).astype(np.int16)
if memory_cells == 1: if memory_cells == 1:
cellid -= sc_start cellid -= sc_start
i_train = 0 i_train = 0
cellid_coords = np.array(cellid[i_train]) cellid_coords = np.array(cellid[i_train])
n_cells = len(set(cellid_coords)) n_cells = len(set(cellid_coords))
while n_cells != memory_cells: while n_cells != memory_cells:
i_train += 1 i_train += 1
cellid_coords = np.array(cellid[i_train]) cellid_coords = np.array(cellid[i_train])
n_cells = len(set(cellid_coords)) n_cells = len(set(cellid_coords))
adc = file.get_array(det_src, 'data.adc', extra_dims=extra_dims).astype(np.float32) adc = file.get_array(det_src, 'data.adc', extra_dims=extra_dims).astype(np.float32)
adc = adc.where(~adc.isnull(), other=-1000.) adc = adc.where(~adc.isnull(), other=-1000.)
extra_coords = [cellid_coords, np.arange(0, adc.shape[2]), np.arange(0, adc.shape[3])] extra_coords = [cellid_coords, np.arange(0, adc.shape[2]), np.arange(0, adc.shape[3])]
coord_dict = dict(zip(extra_dims, extra_coords)) coord_dict = dict(zip(extra_dims, extra_coords))
adc = adc.assign_coords(coord_dict) adc = adc.assign_coords(coord_dict)
gain = file.get_array(det_src, 'data.gain', extra_dims=extra_dims).astype(np.int16) gain = file.get_array(det_src, 'data.gain', extra_dims=extra_dims).astype(np.int16)
gain = gain.assign_coords(coord_dict) gain = gain.assign_coords(coord_dict)
gain = gain.where(gain < 2, other = 2).astype(np.int16) gain = gain.where(gain < 2, other = 2).astype(np.int16)
train_chk = jungfrau_ff.chunk_trains([adc, gain], chunk_size) train_chk = jungfrau_ff.chunk_trains([adc, gain], chunk_size)
if offset_map is None: if offset_map is None:
offset_map = np.zeros((3, memory_cells, 512, 1024), dtype=np.float32) offset_map = np.zeros((3, memory_cells, 512, 1024), dtype=np.float32)
if off_sub: if off_sub:
step_timer.start() step_timer.start()
partial_off = partial(jungfrau_ff.subtract_offset, offset_map) partial_off = partial(jungfrau_ff.subtract_offset, offset_map)
with multiprocessing.Pool() as pool: with multiprocessing.Pool() as pool:
r_maps = pool.map(partial_off, train_chk) r_maps = pool.map(partial_off, train_chk)
tid_low = 0 tid_low = 0
for r in r_maps: for r in r_maps:
adc_chk = r adc_chk = r
tid_high = tid_low + adc_chk.shape[0] tid_high = tid_low + adc_chk.shape[0]
adc[tid_low:tid_high] = adc_chk adc[tid_low:tid_high] = adc_chk
tid_low = tid_high tid_low = tid_high
step_timer.done_step("Offset corrected") step_timer.done_step("Offset corrected")
step_timer.start() step_timer.start()
chunks = jungfrau_ff.chunk_Multi([adc,], block_size) chunks = jungfrau_ff.chunk_Multi([adc,], block_size)
with multiprocessing.Pool() as pool: with multiprocessing.Pool() as pool:
partial_fill = partial(jungfrau_ff.fill_histogram, h_bins) partial_fill = partial(jungfrau_ff.fill_histogram, h_bins)
r_maps = pool.map(partial_fill, chunks) r_maps = pool.map(partial_fill, chunks)
for i, r in enumerate(r_maps): for i, r in enumerate(r_maps):
h_chk, e_chk = r h_chk, e_chk = r
if edges[da] is None: if edges[da] is None:
edges[da] = np.array(e_chk) edges[da] = np.array(e_chk)
n_blocks_col = int(h_spectra[da].shape[-1]/block_size[1]) n_blocks_col = int(h_spectra[da].shape[-1]/block_size[1])
irow = int(np.floor(i/n_blocks_col)) * block_size[0] irow = int(np.floor(i/n_blocks_col)) * block_size[0]
icol = i%n_blocks_col * block_size[1] icol = i%n_blocks_col * block_size[1]
h_spectra[da][..., irow:irow+block_size[0], icol:icol+block_size[1]] += h_chk h_spectra[da][..., irow:irow+block_size[0], icol:icol+block_size[1]] += h_chk
step_timer.done_step("Histogram created") step_timer.done_step("Histogram created")
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
print(f"Total processing time {step_timer.timespan():.01f} s") print(f"Total processing time {step_timer.timespan():.01f} s")
step_timer.print_summary() step_timer.print_summary()
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
### Saving histogram ### Saving histogram
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
for da in karabo_da: for da in karabo_da:
# transpose h_spectra for the following cells. # transpose h_spectra for the following cells.
h_spectra[da] = h_spectra[da] h_spectra[da] = h_spectra[da]
fout_h_path = f'{out_folder}/R{runs[0]:04d}_{proposal.upper()}_Gain_Spectra_{da}_Histo.h5' fout_h_path = f'{out_folder}/R{runs[0]:04d}_{proposal.upper()}_Gain_Spectra_{da}_Histo.h5'
hists = h_spectra[da] hists = h_spectra[da]
with h5file(fout_h_path, 'w') as fout_h: with h5file(fout_h_path, 'w') as fout_h:
print(f"Saving histograms at {fout_h_path}.") print(f"Saving histograms at {fout_h_path}.")
dset_h = fout_h.create_dataset("histos", data=hists) dset_h = fout_h.create_dataset("histos", data=hists)
dset_e = fout_h.create_dataset("edges", data=edges[da]) dset_e = fout_h.create_dataset("edges", data=edges[da])
dset_noi = fout_h.create_dataset( dset_noi = fout_h.create_dataset(
"noise_map", "noise_map",
data=const_data[da]["Noise10Hz"]) data=const_data[da]["Noise10Hz"])
fout_h.attrs["memory_cells"] = memory_cells fout_h.attrs["memory_cells"] = memory_cells
fout_h.attrs["gain_setting"] = gain_setting fout_h.attrs["gain_setting"] = gain_setting
fout_h.attrs["gain_mode"] = gain_mode fout_h.attrs["gain_mode"] = gain_mode
fout_h.attrs["integration_time"] = integration_time fout_h.attrs["integration_time"] = integration_time
fout_h.attrs["bias_voltage"] = bias_voltage fout_h.attrs["bias_voltage"] = bias_voltage
fout_h.attrs["creation_time"] = str(creation_time) fout_h.attrs["creation_time"] = str(creation_time)
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Fitting histograms ## Fitting histograms
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
fout_temp = f"R{runs[0]:04d}_{proposal.upper()}_Gain_Spectra_{{}}_{fit_func}_Fit.h5" fout_temp = f"R{runs[0]:04d}_{proposal.upper()}_Gain_Spectra_{{}}_{fit_func}_Fit.h5"
for da in karabo_da: for da in karabo_da:
_edges = np.array(edges[da]) _edges = np.array(edges[da])
x = (_edges[1:] + _edges[:-1]) / 2.0 x = (_edges[1:] + _edges[:-1]) / 2.0
x, _h_spectra = jungfrau_ff.set_histo_range(x, h_spectra[da], fit_h_range) x, _h_spectra = jungfrau_ff.set_histo_range(x, h_spectra[da], fit_h_range)
print(f'adjusting histogram range: {x[0]} - {x[-1]}') print(f'adjusting histogram range: {x[0]} - {x[-1]}')
print(f'Histo shape updated from {h_spectra[da].shape} to {_h_spectra.shape}') print(f'Histo shape updated from {h_spectra[da].shape} to {_h_spectra.shape}')
print(f'x-axis: {x.shape}') print(f'x-axis: {x.shape}')
chunks = jungfrau_ff.chunk_Multi([_h_spectra], block_size) chunks = jungfrau_ff.chunk_Multi([_h_spectra], block_size)
pool = multiprocessing.Pool() pool = multiprocessing.Pool()
partial_fit = partial( partial_fit = partial(
jungfrau_ff.fit_histogram, jungfrau_ff.fit_histogram,
x, x,
fit_func, fit_func,
n_sigma, n_sigma,
rebin, rebin,
ratio, ratio,
const_data[da]["Noise10Hz"], const_data[da]["Noise10Hz"],
) )
print("starting spectra fit") print("starting spectra fit")
step_timer.start() step_timer.start()
with multiprocessing.Pool() as pool: with multiprocessing.Pool() as pool:
r_maps = pool.map(partial_fit, chunks) r_maps = pool.map(partial_fit, chunks)
step_timer.done_step("r_maps calculation") step_timer.done_step("r_maps calculation")
step_timer.start() step_timer.start()
g0_map = np.zeros((memory_cells, sensor_size[0], sensor_size[1]), dtype=np.float32) g0_map = np.zeros((memory_cells, sensor_size[0], sensor_size[1]), dtype=np.float32)
sigma_map = np.zeros((memory_cells, sensor_size[0], sensor_size[1]), dtype=np.float32) sigma_map = np.zeros((memory_cells, sensor_size[0], sensor_size[1]), dtype=np.float32)
chi2ndf_map = np.zeros((memory_cells, sensor_size[0], sensor_size[1]), dtype=np.float32) chi2ndf_map = np.zeros((memory_cells, sensor_size[0], sensor_size[1]), dtype=np.float32)
alpha_map = np.zeros((memory_cells, sensor_size[0], sensor_size[1]), dtype=np.float32) alpha_map = np.zeros((memory_cells, sensor_size[0], sensor_size[1]), dtype=np.float32)
for i, r in enumerate(r_maps): for i, r in enumerate(r_maps):
g0_chk, sigma_chk, chi2ndf_chk, alpha_chk = r g0_chk, sigma_chk, chi2ndf_chk, alpha_chk = r
n_blocks_col = int(g0_map.shape[-1] / block_size[1]) n_blocks_col = int(g0_map.shape[-1] / block_size[1])
irow = int(np.floor(i / n_blocks_col)) * block_size[0] irow = int(np.floor(i / n_blocks_col)) * block_size[0]
icol = i % n_blocks_col * block_size[1] icol = i % n_blocks_col * block_size[1]
g0_map[..., irow : irow + block_size[0], icol : icol + block_size[1]] = g0_chk g0_map[..., irow : irow + block_size[0], icol : icol + block_size[1]] = g0_chk
sigma_map[..., irow : irow + block_size[0], icol : icol + block_size[1]] = sigma_chk sigma_map[..., irow : irow + block_size[0], icol : icol + block_size[1]] = sigma_chk
chi2ndf_map[ chi2ndf_map[
..., irow : irow + block_size[0], icol : icol + block_size[1] ..., irow : irow + block_size[0], icol : icol + block_size[1]
] = chi2ndf_chk ] = chi2ndf_chk
alpha_map[..., irow : irow + block_size[0], icol : icol + block_size[1]] = alpha_chk alpha_map[..., irow : irow + block_size[0], icol : icol + block_size[1]] = alpha_chk
step_timer.done_step("Finished fitting") step_timer.done_step("Finished fitting")
fout_path = f"{out_folder}/{fout_temp.format(da)}" fout_path = f"{out_folder}/{fout_temp.format(da)}"
step_timer.start() step_timer.start()
with h5file(fout_path, "w") as fout: with h5file(fout_path, "w") as fout:
dset_chi2 = fout.create_dataset("chi2map", data=np.transpose(chi2ndf_map)) dset_chi2 = fout.create_dataset("chi2map", data=np.transpose(chi2ndf_map))
dset_gmap_fit = fout.create_dataset("gainMap_fit", data=np.transpose(g0_map)) dset_gmap_fit = fout.create_dataset("gainMap_fit", data=np.transpose(g0_map))
dset_std = fout.create_dataset("sigmamap", data=np.transpose(sigma_map)) dset_std = fout.create_dataset("sigmamap", data=np.transpose(sigma_map))
dset_alpha = fout.create_dataset("alphamap", data=np.transpose(alpha_map)) dset_alpha = fout.create_dataset("alphamap", data=np.transpose(alpha_map))
dset_noi = fout.create_dataset( dset_noi = fout.create_dataset(
"noise_map", "noise_map",
data=const_data[da]["Noise10Hz"]) data=const_data[da]["Noise10Hz"])
fout.attrs["memory_cells"] = memory_cells fout.attrs["memory_cells"] = memory_cells
fout.attrs["integration_time"] = integration_time fout.attrs["integration_time"] = integration_time
fout.attrs["bias_voltage"] = bias_voltage fout.attrs["bias_voltage"] = bias_voltage
fout.attrs["gain_setting"] = gain_setting fout.attrs["gain_setting"] = gain_setting
fout.attrs["gain_mode"] = gain_mode fout.attrs["gain_mode"] = gain_mode
fout.attrs["creation_time"] = str(creation_time) fout.attrs["creation_time"] = str(creation_time)
fout.attrs["karabo_id"] = karabo_id fout.attrs["karabo_id"] = karabo_id
step_timer.done_step("Saved fit results") step_timer.done_step("Saved fit results")
``` ```
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment