Skip to content
Snippets Groups Projects

Pad constant if needed when broadcasted to more cells than available

Merged Philipp Schmidt requested to merge fix/broadcast-too-small-constants into master
3 unresolved threads
1 file
+ 42
10
Compare changes
  • Side-by-side
  • Inline
@@ -371,19 +371,48 @@ class AgipdBaseRunner(base_kernel_runner.BaseKernelRunner):
gain_map, # digitized gain
)
def _pad_constant(self, constant_data, value=0):
# This assumes the constant to already be transported to have
# memory cells first!
constant_cells = constant_data.shape[0]
target_cells = self._constant_memory_cells
if constant_cells >= target_cells:
return constant_data
if value == 0:
alloc_array = np.zeros
elif value == 1:
alloc_array = np.ones
else:
from functools import partial
alloc_array = partial(np.full, fill_value=value)
# Padding array to add.
padding = alloc_array(
(target_cells - constant_cells,) + constant_data.shape[1:],
dtype=constant_data.dtype)
# np.append flattens the array, so reshape it again.
return np.append(constant_data, padding).reshape(
(target_cells,) + constant_data.shape[1:])
def _load_constant(self, constant, constant_data):
if constant is Constants.ThresholdsDark:
# shape: y, x, memory cell, thresholds and gain values
# note: the gain values are something like means used to derive thresholds
self.gain_thresholds[:] = self._xp.asarray(
self.gain_thresholds[:] = self._pad_constant(self._xp.asarray(
constant_data[..., :2], dtype=np.float32
).transpose((2, 1, 0, 3))[:self._constant_memory_cells]
).transpose((2, 1, 0, 3)))[:self._constant_memory_cells]
elif constant is Constants.Offset:
# shape: y, x, memory cell, gain stage
self.offset_map[:] = self._xp.asarray(
self.offset_map[:] = self._pad_constant(self._xp.asarray(
constant_data, dtype=np.float32
).transpose((2, 1, 0, 3))[:self._constant_memory_cells]
).transpose((2, 1, 0, 3)))[:self._constant_memory_cells]
elif constant is Constants.SlopesCS:
# Always has 352 cells, so needs no fix.
if self._get("corrections.relGain.sourceOfSlopes") == "PC":
return
self.rel_gain_map.fill(1)
@@ -416,7 +445,7 @@ class AgipdBaseRunner(base_kernel_runner.BaseKernelRunner):
rel_gain_map = np.ones(
(
3,
self._constant_memory_cells,
frac_hg_mg.shape[0], # Fixed this allocation
self.num_pixels_fs,
self.num_pixels_ss,
),
@@ -424,23 +453,26 @@ class AgipdBaseRunner(base_kernel_runner.BaseKernelRunner):
)
rel_gain_map[1] = rel_gain_map[0] * frac_hg_mg
rel_gain_map[2] = rel_gain_map[1] * 4.48
    • Unrelated to the MR.

      Based on our offline discussion for better performance using zeros V ones. We can here initialize rel_gain_map with np.empty and fill it later

      rel_gain_map[0] = 1.0
      rel_gain_map[1] = frac_hg_mg
      rel_gain_map[2] = frac_hg_mg * 4.48

      This should be faster.

      Edited by Karim Ahmed
      • This part is in offline, maybe I should modify it there for better readability as well

      • Fair point. But given this is fast enough and likely a workaround for a more permament solution, let's hold on this for now.

        @hammerd should investigate whether we can decouple this broadcasting from the memoryCells parameter and fold it into corrections, which should have to deal with this as well.

      • Please register or sign in to reply
Please register or sign in to reply
self.rel_gain_map[:] = self._xp.asarray(
self.rel_gain_map[:] = self._pad_constant(self._xp.asarray(
rel_gain_map.transpose((1, 3, 2, 0)), dtype=np.float32
)[:self._constant_memory_cells]
), value=1)[:self._constant_memory_cells]
if self._get("corrections.relGain.adjustMgBaseline"):
if self._get("corrections.relGain.overrideMdAdditionalOffset"):
self.md_additional_offset.fill(
self._get("corrections.relGain.mdAdditionalOffset")
)
else:
self.md_additional_offset[:] = self._xp.asarray(
self.md_additional_offset[:] = self._pad_constant(self._xp.asarray(
(
hg_intercept - mg_intercept * frac_hg_mg
).astype(np.float32).transpose((0, 2, 1)), dtype=np.float32
)[:self._constant_memory_cells]
), value=1)[:self._constant_memory_cells]
else:
self.md_additional_offset.fill(0)
elif constant is Constants.SlopesFF:
# Always has 352 cells, so allocating the right size in
# rel_gain_map fixes the shape problems here.
# constant shape: y, x, memory cell
if constant_data.shape[2] == 2:
# TODO: remove support for old format
@@ -499,7 +531,7 @@ class AgipdBaseRunner(base_kernel_runner.BaseKernelRunner):
# gain mapped constants seem consistent
constant_data = constant_data.transpose((2, 1, 0, 3))
constant_data &= self._xp.uint32(self.bad_pixel_subset)
self.bad_pixel_map |= constant_data[:self._constant_memory_cells]
self.bad_pixel_map |= self._pad_constant(constant_data)[:self._constant_memory_cells]
def flush_buffers(self, constants):
if Constants.Offset in constants:
Loading