Skip to content
Snippets Groups Projects
Commit 4ec408f6 authored by Philipp Schmidt's avatar Philipp Schmidt
Browse files

Pad constant if needed when broadcasted to more cells than available

parent ce09e07c
Branches fix/broadcast-too-small-constants
No related tags found
1 merge request!112Pad constant if needed when broadcasted to more cells than available
......@@ -371,19 +371,48 @@ class AgipdBaseRunner(base_kernel_runner.BaseKernelRunner):
gain_map, # digitized gain
)
def _pad_constant(self, constant_data, value=0):
# This assumes the constant to already be transported to have
# memory cells first!
constant_cells = constant_data.shape[0]
target_cells = self._constant_memory_cells
if constant_cells >= target_cells:
return constant_data
if value == 0:
alloc_array = np.zeros
elif value == 1:
alloc_array = np.ones
else:
from functools import partial
alloc_array = partial(np.full, fill_value=value)
# Padding array to add.
padding = alloc_array(
(target_cells - constant_cells,) + constant_data.shape[1:],
dtype=constant_data.dtype)
# np.append flattens the array, so reshape it again.
return np.append(constant_data, padding).reshape(
(target_cells,) + constant_data.shape[1:])
def _load_constant(self, constant, constant_data):
if constant is Constants.ThresholdsDark:
# shape: y, x, memory cell, thresholds and gain values
# note: the gain values are something like means used to derive thresholds
self.gain_thresholds[:] = self._xp.asarray(
self.gain_thresholds[:] = self._pad_constant(self._xp.asarray(
constant_data[..., :2], dtype=np.float32
).transpose((2, 1, 0, 3))[:self._constant_memory_cells]
).transpose((2, 1, 0, 3)))[:self._constant_memory_cells]
elif constant is Constants.Offset:
# shape: y, x, memory cell, gain stage
self.offset_map[:] = self._xp.asarray(
self.offset_map[:] = self._pad_constant(self._xp.asarray(
constant_data, dtype=np.float32
).transpose((2, 1, 0, 3))[:self._constant_memory_cells]
).transpose((2, 1, 0, 3)))[:self._constant_memory_cells]
elif constant is Constants.SlopesCS:
# Always has 352 cells, so needs no fix.
if self._get("corrections.relGain.sourceOfSlopes") == "PC":
return
self.rel_gain_map.fill(1)
......@@ -416,7 +445,7 @@ class AgipdBaseRunner(base_kernel_runner.BaseKernelRunner):
rel_gain_map = np.ones(
(
3,
self._constant_memory_cells,
frac_hg_mg.shape[0], # Fixed this allocation
self.num_pixels_fs,
self.num_pixels_ss,
),
......@@ -424,23 +453,26 @@ class AgipdBaseRunner(base_kernel_runner.BaseKernelRunner):
)
rel_gain_map[1] = rel_gain_map[0] * frac_hg_mg
rel_gain_map[2] = rel_gain_map[1] * 4.48
self.rel_gain_map[:] = self._xp.asarray(
self.rel_gain_map[:] = self._pad_constant(self._xp.asarray(
rel_gain_map.transpose((1, 3, 2, 0)), dtype=np.float32
)[:self._constant_memory_cells]
), value=1)[:self._constant_memory_cells]
if self._get("corrections.relGain.adjustMgBaseline"):
if self._get("corrections.relGain.overrideMdAdditionalOffset"):
self.md_additional_offset.fill(
self._get("corrections.relGain.mdAdditionalOffset")
)
else:
self.md_additional_offset[:] = self._xp.asarray(
self.md_additional_offset[:] = self._pad_constant(self._xp.asarray(
(
hg_intercept - mg_intercept * frac_hg_mg
).astype(np.float32).transpose((0, 2, 1)), dtype=np.float32
)[:self._constant_memory_cells]
), value=1)[:self._constant_memory_cells]
else:
self.md_additional_offset.fill(0)
elif constant is Constants.SlopesFF:
# Always has 352 cells, so allocating the right size in
# rel_gain_map fixes the shape problems here.
# constant shape: y, x, memory cell
if constant_data.shape[2] == 2:
# TODO: remove support for old format
......@@ -499,7 +531,7 @@ class AgipdBaseRunner(base_kernel_runner.BaseKernelRunner):
# gain mapped constants seem consistent
constant_data = constant_data.transpose((2, 1, 0, 3))
constant_data &= self._xp.uint32(self.bad_pixel_subset)
self.bad_pixel_map |= constant_data[:self._constant_memory_cells]
self.bad_pixel_map |= self._pad_constant(constant_data)[:self._constant_memory_cells]
def flush_buffers(self, constants):
if Constants.Offset in constants:
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment