Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
P
pycalibration
Manage
Activity
Members
Labels
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Deploy
Model registry
Analyze
Contributor analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
calibration
pycalibration
Commits
15270c76
Commit
15270c76
authored
5 years ago
by
Karim Ahmed
Browse files
Options
Downloads
Plain Diff
rebase onto master
parents
747d3de2
58897032
No related branches found
Branches containing commit
No related tags found
Tags containing commit
1 merge request
!200
Feat/only offset
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
notebooks/DSSC/DSSC_Correct_and_Verify.ipynb
+70
-25
70 additions, 25 deletions
notebooks/DSSC/DSSC_Correct_and_Verify.ipynb
with
70 additions
and
25 deletions
notebooks/DSSC/DSSC_Correct_and_Verify.ipynb
+
70
−
25
View file @
15270c76
...
...
@@ -22,9 +22,9 @@
},
"outputs": [],
"source": [
"in_folder = \"/gpfs/exfel/exp/SCS/2019
0
1/p00
2212
/raw/\" # the folder to read data from, required\n",
"run =
29
# runs to process, required\n",
"out_folder = \"/gpfs/exfel/data/scratch/
xcal
/test/\" # the folder to output to, required\n",
"in_folder = \"/gpfs/exfel/exp/SCS/2019
3
1/p
9
00
095
/raw/\" # the folder to read data from, required\n",
"run =
1515
# runs to process, required\n",
"out_folder = \"/gpfs/exfel/data/scratch/
ahmedk
/test/\" # the folder to output to, required\n",
"sequences = [-1] # sequences to correct, set to -1 for all, range allowed\n",
"mem_cells = 0 # number of memory cells used, set to 0 to automatically infer\n",
"overwrite = True # set to True if existing data should be overwritten\n",
...
...
@@ -336,6 +336,10 @@
" from cal_tools.tools import get_dir_creation_date, get_constant_from_db_and_time, get_random_db_interface\n",
" from iCalibrationDB import ConstantMetaData, Constants, Conditions, Detectors, Versions\n",
" \n",
" from hashlib import blake2b\n",
" import struct\n",
" import binascii\n",
" \n",
" filename, filename_out, channel, qm = inp\n",
" h5path = \"INSTRUMENT/{}/DET/{}CH0:xtdf/\".format(loc, channel)\n",
" h5path_idx = \"INDEX/{}/DET/{}CH0:xtdf/\".format(loc, channel)\n",
...
...
@@ -345,7 +349,8 @@
" high_edges = None\n",
" hists_signal_high = None\n",
" pulse_edges = None\n",
" \n",
" err = None\n",
" offset_not_found = False\n",
" def get_num_cells(fname, loc, module):\n",
" with h5py.File(fname, \"r\") as f:\n",
"\n",
...
...
@@ -355,18 +360,35 @@
" dists = np.array([(o-maxcell) for o in options])\n",
" dists[dists<0] = 10000 # assure to always go higher\n",
" return options[np.argmin(dists)]\n",
" \n",
" def get_checksum(fname, loc, module):\n",
" with h5py.File(fname, \"r\") as infile:\n",
" count = np.squeeze(infile[\"/INDEX/{}/DET/{}CH0:xtdf/image/count\".format(loc, channel)])\n",
" first = np.squeeze(infile[\"/INDEX/{}/DET/{}CH0:xtdf/image/first\".format(loc, channel)])\n",
" last_index = int(first[count != 0][-1]+count[count != 0][-1])\n",
" first_index = int(first[count != 0][0])\n",
" pulseids = infile[\"INSTRUMENT/{}/DET/{}CH0:xtdf/image/pulseId\".format(loc, channel)][first_index:int(first[count != 0][1])]\n",
" bveto = blake2b(pulseids.data, digest_size=8)\n",
" pulseid_checksum = struct.unpack('d', binascii.unhexlify(bveto.hexdigest()))[0]\n",
" return pulseid_checksum\n",
" \n",
"\n",
" if mem_cells == 0:\n",
" mem_cells = get_num_cells(filename, loc, channel)\n",
" \n",
" pulseid_checksum = get_checksum(filename, loc, channel)\n",
" \n",
" print(\"Memcells: {}\".format(mem_cells))\n",
" \n",
" condition = Conditions.Dark.DSSC(bias_voltage=bias_voltage, memory_cells=mem_cells)\n",
" condition = Conditions.Dark.DSSC(bias_voltage=bias_voltage, memory_cells=mem_cells,\n",
" pulseid_checksum=pulseid_checksum)\n",
" \n",
" \n",
" detinst = getattr(Detectors, dinstance)\n",
" device = getattr(detinst, qm)\n",
" with h5py.File(filename, \"r\", driver=\"core\") as infile:\n",
" y = infile[h5path+\"image/data\"].shape[2]\n",
" x = infile[h5path+\"image/data\"].shape[3]\n",
" offset, when = get_constant_from_db_and_time(device,\n",
" Constants.DSSC.Offset(),\n",
" condition,\n",
...
...
@@ -375,9 +397,12 @@
" creation_time=creation_time,\n",
" timeout=cal_db_timeout)\n",
" if offset is not None:\n",
" \n",
" offset = np.moveaxis(np.moveaxis(offset[...], 2, 0), 2, 1)\n",
" print(offset.shape)\n",
" else:\n",
" offset_not_found = True\n",
" offset = np.zeros((x, y, mem_cells))\n",
" offset = np.moveaxis(np.moveaxis(offset[...], 2, 0), 2, 1)\n",
" print(\"No offset found in the database\")\n",
" \n",
" \n",
" def copy_and_sanitize_non_cal_data(infile, outfile):\n",
...
...
@@ -415,7 +440,7 @@
" oshape, chunks=chunks,\n",
" dtype=np.float32,\n",
" fletcher32=True)\n",
"
\n",
"\n",
" mdset = outfile.create_dataset(h5path + \"image/mask\",\n",
" oshape, chunks=chunks,\n",
" dtype=np.uint32,\n",
...
...
@@ -423,24 +448,25 @@
" compression_opts=1,\n",
" shuffle=True,\n",
" fletcher32=True)\n",
"
\n",
"\n",
" for train in range(first_arr.size):\n",
" first = first_arr[train]\n",
" last = last_arr[train]\n",
" data = np.squeeze(infile[h5path+\"image/data\"][first:last, ...].astype(np.float32))\n",
" cellId = np.squeeze(infile[h5path+\"image/cellId\"][first:last, ...])\n",
" pulseId = np.squeeze(infile[h5path+\"image/pulseId\"][first:last, ...])\n",
" if offset_image != \"PP\" or offset is None:\n",
"\n",
" if offset_image != \"PP\" and offset_not_found:\n",
" data -= data[offset_image, ...]\n",
" else:\n",
" data -= offset[cellId,...]\n",
"
\n",
" data
[...]
-= offset[cellId,...]\n",
"\n",
" if train == 0:\n",
" pulseId = np.repeat(pulseId[:, None], data.shape[1], axis=1)\n",
" pulseId = np.repeat(pulseId[:,:,None], data.shape[2], axis=2)\n",
" bins = (55, pulseId.max())\n",
" rnge = [[-5, 50], [0, pulseId.max()]]\n",
"
\n",
"\n",
" hists_signal_low, low_edges, pulse_edges = np.histogram2d(data.flatten(),\n",
" pulseId.flatten(),\n",
" bins=bins,\n",
...
...
@@ -452,13 +478,12 @@
" range=rnge) \n",
" # data[data < 0] = 0\n",
" ddset[first:last, ...] = data\n",
" \n",
" # find static and noisy values in dark images\n",
" data = infile[h5path+\"image/data\"][last, ...].astype(np.float32)\n",
" bpix = np.zeros(oshape[1:], np.uint32)\n",
" dark_std = np.std(data, axis=0)\n",
" bpix[dark_std > noisy_pix_threshold] = BadPixels.NOISE_OUT_OF_THRESHOLD.value\n",
"
\n",
"\n",
" for i in range(8):\n",
" for j in range(2):\n",
" count_noise = np.count_nonzero(bpix[i*64:(i+1)*64, j*64:(j+1)*64])\n",
...
...
@@ -466,25 +491,29 @@
" if mask_noisy_asic:\n",
" if count_noise/(64*64) > mask_noisy_asic:\n",
" bpix[i*64:(i+1)*64, j*64:(j+1)*64] = BadPixels.NOISY_ADC.value\n",
"\n",
"
\n",
" if mask_cold_asic:\n",
" count_cold = np.count_nonzero(asic_std < 0.5)\n",
" if count_cold/(64*64) > mask_cold_asic:\n",
" bpix[i*64:(i+1)*64, j*64:(j+1)*64] = BadPixels.ASIC_STD_BELOW_NOISE.value\n",
" \n",
" for train in range(first_arr.size):\n",
" first = first_arr[train]\n",
" last = last_arr[train]\n",
" mdset[first:last, ...] = np.repeat(bpix[None,...], last-first, axis=0)\n",
"\n",
"# This was removed because last element in last is -1 and that produce errors\n",
"# Also because mdset is not clear why is it needed.\n",
" \n",
"# for train in range(first_arr.size):\n",
"# first = first_arr[train]\n",
"# last = last_arr[train]\n",
"# mdset[first:last, ...] = np.repeat(bpix[None,...], last-first, axis=0)\n",
" except Exception as e:\n",
" print(e)\n",
" success = False\n",
" reason = \"Error\"\n",
" \n",
"
err = e
\n",
" \n",
" if err is None and offset_not_found:\n",
" err = \"Offset was not found in the database!\"\n",
" \n",
" return (hists_signal_low, hists_signal_high, low_edges, high_edges, pulse_edges, when, qm)\n",
" return (hists_signal_low, hists_signal_high, low_edges, high_edges, pulse_edges, when, qm
, err
)\n",
" \n",
"done = False\n",
"first_files = []\n",
...
...
@@ -498,6 +527,7 @@
"\n",
"whens = []\n",
"qms = []\n",
"Errors = []\n",
"while not done:\n",
" \n",
" dones = []\n",
...
...
@@ -530,9 +560,10 @@
" \n",
" for rr in r:\n",
" if rr is not None:\n",
" hl, hh, low_edges, high_edges, pulse_edges, when, qm = rr \n",
" hl, hh, low_edges, high_edges, pulse_edges, when, qm
, err
= rr \n",
" whens.append(when)\n",
" qms.append(qm)\n",
" Errors.append(err)\n",
" if hl is not None: # any one being None will also make the others None\n",
" hists_signal_low += hl.astype(np.float64)\n",
" hists_signal_high += hh.astype(np.float64) \n",
...
...
@@ -542,7 +573,14 @@
"whens = [x for _,x in sorted(zip(qms,whens))]\n",
"qms = sorted(qms)\n",
"for i, qm in enumerate(qms):\n",
" print(\"Offset for {} was injected on {}\".format(qm, whens[i].isoformat()))"
" try:\n",
" when = whens[i].isoformat()\n",
" except:\n",
" when = whens[i]\n",
" if Errors[i] is not None:\n",
" print(\"Offset for {} was injected on {}, Error: {}\".format(qm, when, Errors[i]))\n",
" else:\n",
" print(\"Offset for {} was injected on {}\".format(qm, when))"
]
},
{
...
...
@@ -995,6 +1033,13 @@
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
...
...
%% Cell type:markdown id: tags:
# DSSC Offline Correction #
Author: European XFEL Detector Group, Version: 1.0
Offline Calibration for the DSSC Detector
%% Cell type:code id: tags:
```
python
in_folder
=
"
/gpfs/exfel/exp/SCS/2019
0
1/p00
2212
/raw/
"
# the folder to read data from, required
run
=
29
# runs to process, required
out_folder
=
"
/gpfs/exfel/data/scratch/
xcal
/test/
"
# the folder to output to, required
in_folder
=
"
/gpfs/exfel/exp/SCS/2019
3
1/p
9
00
095
/raw/
"
# the folder to read data from, required
run
=
1515
# runs to process, required
out_folder
=
"
/gpfs/exfel/data/scratch/
ahmedk
/test/
"
# the folder to output to, required
sequences
=
[
-
1
]
# sequences to correct, set to -1 for all, range allowed
mem_cells
=
0
# number of memory cells used, set to 0 to automatically infer
overwrite
=
True
# set to True if existing data should be overwritten
cluster_profile
=
"
noDB
"
# cluster profile to use
max_pulses
=
500
# maximum number of pulses per train
bias_voltage
=
100
# detector bias voltage
cal_db_interface
=
"
tcp://max-exfl016:8020#8025
"
# the database interface to use
use_dir_creation_date
=
True
# use the creation data of the input dir for database queries
sequences_per_node
=
1
# number of sequence files per cluster node if run as slurm job, set to 0 to not run SLURM parallel
cal_db_timeout
=
300000
# in milli seconds
chunk_size_idim
=
1
# chunking size of imaging dimension, adjust if user software is sensitive to this.
instrument
=
"
SCS
"
# the instrument the detector is installed at, required
mask_noisy_asic
=
0.25
# set to a value other than 0 and below 1 to mask entire ADC if fraction of noisy pixels is above
offset_image
=
"
PP
"
# last one
mask_cold_asic
=
0.25
# mask cold ASICS if number of pixels with negligable standard deviation is larger than this fraction
noisy_pix_threshold
=
1.
# threshold above which ap pixel is considered noisy.
geo_file
=
"
/gpfs/exfel/data/scratch/xcal/dssc_geo_june19.h5
"
# detector geometry file
dinstance
=
"
DSSC1M1
"
def
balance_sequences
(
in_folder
,
run
,
sequences
,
sequences_per_node
):
import
glob
import
re
import
numpy
as
np
if
sequences
[
0
]
==
-
1
:
sequence_files
=
glob
.
glob
(
"
{}/r{:04d}/*-S*.h5
"
.
format
(
in_folder
,
run
))
seq_nums
=
set
()
for
sf
in
sequence_files
:
seqnum
=
re
.
findall
(
r
"
.*-S([0-9]*).h5
"
,
sf
)[
0
]
seq_nums
.
add
(
int
(
seqnum
))
seq_nums
-=
set
(
sequences
)
else
:
seq_nums
=
set
(
sequences
)
nsplits
=
len
(
seq_nums
)
//
sequences_per_node
+
1
while
nsplits
>
32
:
sequences_per_node
+=
1
nsplits
=
len
(
seq_nums
)
//
sequences_per_node
+
1
print
(
"
Changed to {} sequences per node to have a maximum of 8 concurrent jobs
"
.
format
(
sequences_per_node
))
return
[
l
.
tolist
()
for
l
in
np
.
array_split
(
list
(
seq_nums
),
nsplits
)
if
l
.
size
>
0
]
```
%% Cell type:code id: tags:
```
python
import
sys
from
collections
import
OrderedDict
# make sure a cluster is running with ipcluster start --n=32, give it a while to start
import
os
import
h5py
import
numpy
as
np
import
matplotlib
matplotlib
.
use
(
"
agg
"
)
import
matplotlib.pyplot
as
plt
from
ipyparallel
import
Client
print
(
"
Connecting to profile {}
"
.
format
(
cluster_profile
))
view
=
Client
(
profile
=
cluster_profile
)[:]
view
.
use_dill
()
from
iCalibrationDB
import
ConstantMetaData
,
Constants
,
Conditions
,
Detectors
,
Versions
from
cal_tools.tools
import
(
gain_map_files
,
parse_runs
,
run_prop_seq_from_path
,
get_notebook_name
,
get_dir_creation_date
,
get_constant_from_db
)
from
dateutil
import
parser
from
datetime
import
timedelta
creation_time
=
None
if
use_dir_creation_date
:
creation_time
=
get_dir_creation_date
(
in_folder
,
run
)
print
(
"
Using {} as creation time
"
.
format
(
creation_time
))
in_folder
=
"
{}/r{:04d}
"
.
format
(
in_folder
,
run
)
if
sequences
[
0
]
==
-
1
:
sequences
=
None
QUADRANTS
=
4
MODULES_PER_QUAD
=
4
DET_FILE_INSET
=
"
DSSC
"
CHUNK_SIZE
=
512
MAX_PAR
=
32
if
in_folder
[
-
1
]
==
"
/
"
:
in_folder
=
in_folder
[:
-
1
]
out_folder
=
"
{}/{}
"
.
format
(
out_folder
,
os
.
path
.
split
(
in_folder
)[
-
1
])
print
(
"
Outputting to {}
"
.
format
(
out_folder
))
if
not
os
.
path
.
exists
(
out_folder
):
os
.
makedirs
(
out_folder
)
elif
not
overwrite
:
raise
AttributeError
(
"
Output path exists! Exiting
"
)
import
warnings
warnings
.
filterwarnings
(
'
ignore
'
)
loc
=
None
if
instrument
==
"
SCS
"
:
loc
=
"
SCS_DET_DSSC1M-1
"
dinstance
=
"
DSSC1M1
"
print
(
"
Detector in use is {}
"
.
format
(
loc
))
if
offset_image
.
upper
()
!=
"
PP
"
:
offset_image
=
int
(
offset_image
)
```
%% Cell type:code id: tags:
```
python
def
combine_stack
(
d
,
sdim
):
combined
=
np
.
zeros
((
sdim
,
2048
,
2048
))
combined
[...]
=
np
.
nan
d
=
np
.
moveaxis
(
d
,
2
,
3
)
dy
=
0
quad_pos
=
[
(
0
,
145
),
(
-
5
,
25
),
(
130
,
15
),
(
130
,
140
),
]
px
=
0.236
py
=
0.204
with
h5py
.
File
(
geo_file
,
"
r
"
)
as
gf
:
for
i
in
range
(
16
):
t1
=
gf
[
"
Q{}M{}
"
]
try
:
if
i
<
8
:
dx
=
-
512
if
i
>
3
:
dx
-=
25
mx
=
1
my
=
i
%
8
combined
[:,
my
*
128
+
dy
:(
my
+
1
)
*
128
+
dy
,
mx
*
512
-
dx
:(
mx
+
1
)
*
512
-
dx
]
=
np
.
rollaxis
(
d
[
i
],
2
,
1
)[:,:,::
-
1
]
dy
+=
30
if
i
==
3
:
dy
+=
30
elif
i
<
12
:
dx
=
80
-
50
if
i
==
8
:
dy
=
4
*
30
+
30
+
50
-
30
mx
=
1
my
=
i
%
8
+
4
combined
[:,
my
*
128
+
dy
:(
my
+
1
)
*
128
+
dy
,
mx
*
512
-
dx
:(
mx
+
1
)
*
512
-
dx
]
=
np
.
rollaxis
(
d
[
i
],
2
,
1
)[:,::
-
1
,:]
dy
+=
30
else
:
dx
=
100
-
50
if
i
==
11
:
dy
=
20
mx
=
1
my
=
i
-
14
combined
[:,
my
*
128
+
dy
:(
my
+
1
)
*
128
+
dy
,
mx
*
512
-
dx
:(
mx
+
1
)
*
512
-
dx
]
=
np
.
rollaxis
(
d
[
i
],
2
,
1
)[:,::
-
1
,:]
dy
+=
30
except
:
continue
return
combined
```
%% Cell type:code id: tags:
```
python
# set everything up filewise
from
queue
import
Queue
from
collections
import
OrderedDict
def
map_modules_from_files
(
filelist
):
module_files
=
OrderedDict
()
mod_ids
=
OrderedDict
()
total_sequences
=
0
sequences_qm
=
{}
one_module
=
None
for
quadrant
in
range
(
0
,
QUADRANTS
):
for
module
in
range
(
0
,
MODULES_PER_QUAD
):
name
=
"
Q{}M{}
"
.
format
(
quadrant
+
1
,
module
+
1
)
module_files
[
name
]
=
Queue
()
num
=
quadrant
*
4
+
module
mod_ids
[
name
]
=
num
file_infix
=
"
{}{:02d}
"
.
format
(
DET_FILE_INSET
,
num
)
sequences_qm
[
name
]
=
0
for
file
in
filelist
:
if
file_infix
in
file
:
if
not
one_module
:
one_module
=
file
,
num
module_files
[
name
].
put
(
file
)
total_sequences
+=
1
sequences_qm
[
name
]
+=
1
return
module_files
,
mod_ids
,
total_sequences
,
sequences_qm
,
one_module
dirlist
=
sorted
(
os
.
listdir
(
in_folder
))
file_list
=
[]
for
entry
in
dirlist
:
#only h5 file
abs_entry
=
"
{}/{}
"
.
format
(
in_folder
,
entry
)
if
os
.
path
.
isfile
(
abs_entry
)
and
os
.
path
.
splitext
(
abs_entry
)[
1
]
==
"
.h5
"
:
if
sequences
is
None
:
file_list
.
append
(
abs_entry
)
else
:
for
seq
in
sequences
:
if
"
{:05d}.h5
"
.
format
(
seq
)
in
abs_entry
:
file_list
.
append
(
os
.
path
.
abspath
(
abs_entry
))
mapped_files
,
mod_ids
,
total_sequences
,
sequences_qm
,
one_module
=
map_modules_from_files
(
file_list
)
MAX_PAR
=
min
(
MAX_PAR
,
total_sequences
)
```
%% Cell type:markdown id: tags:
## Processed Files ##
%% Cell type:code id: tags:
```
python
import
copy
from
IPython.display
import
HTML
,
display
,
Markdown
,
Latex
import
tabulate
print
(
"
Processing a total of {} sequence files in chunks of {}
"
.
format
(
total_sequences
,
MAX_PAR
))
table
=
[]
mfc
=
copy
.
copy
(
mapped_files
)
ti
=
0
for
k
,
files
in
mfc
.
items
():
i
=
0
while
not
files
.
empty
():
f
=
files
.
get
()
if
i
==
0
:
table
.
append
((
ti
,
k
,
i
,
f
))
else
:
table
.
append
((
ti
,
""
,
i
,
f
))
i
+=
1
ti
+=
1
if
len
(
table
):
md
=
display
(
Latex
(
tabulate
.
tabulate
(
table
,
tablefmt
=
'
latex
'
,
headers
=
[
"
#
"
,
"
module
"
,
"
# module
"
,
"
file
"
])))
# restore the queue
mapped_files
,
mod_ids
,
total_sequences
,
sequences_qm
,
one_module
=
map_modules_from_files
(
file_list
)
```
%% Cell type:code id: tags:
```
python
import
copy
from
functools
import
partial
def
correct_module
(
total_sequences
,
sequences_qm
,
loc
,
dinstance
,
offset_image
,
mask_noisy_asic
,
mask_cold_asic
,
noisy_pix_threshold
,
chunksize
,
mem_cells
,
bias_voltage
,
cal_db_timeout
,
creation_time
,
cal_db_interface
,
inp
):
import
numpy
as
np
import
copy
import
h5py
from
cal_tools.enums
import
BadPixels
from
cal_tools.tools
import
get_dir_creation_date
,
get_constant_from_db_and_time
,
get_random_db_interface
from
iCalibrationDB
import
ConstantMetaData
,
Constants
,
Conditions
,
Detectors
,
Versions
from
hashlib
import
blake2b
import
struct
import
binascii
filename
,
filename_out
,
channel
,
qm
=
inp
h5path
=
"
INSTRUMENT/{}/DET/{}CH0:xtdf/
"
.
format
(
loc
,
channel
)
h5path_idx
=
"
INDEX/{}/DET/{}CH0:xtdf/
"
.
format
(
loc
,
channel
)
low_edges
=
None
hists_signal_low
=
None
high_edges
=
None
hists_signal_high
=
None
pulse_edges
=
None
err
=
None
offset_not_found
=
False
def
get_num_cells
(
fname
,
loc
,
module
):
with
h5py
.
File
(
fname
,
"
r
"
)
as
f
:
cells
=
f
[
"
INSTRUMENT/{}/DET/{}CH0:xtdf/image/cellId
"
.
format
(
loc
,
module
)][()]
maxcell
=
np
.
max
(
cells
)
options
=
[
100
,
200
,
400
,
500
,
600
,
700
,
800
]
dists
=
np
.
array
([(
o
-
maxcell
)
for
o
in
options
])
dists
[
dists
<
0
]
=
10000
# assure to always go higher
return
options
[
np
.
argmin
(
dists
)]
def
get_checksum
(
fname
,
loc
,
module
):
with
h5py
.
File
(
fname
,
"
r
"
)
as
infile
:
count
=
np
.
squeeze
(
infile
[
"
/INDEX/{}/DET/{}CH0:xtdf/image/count
"
.
format
(
loc
,
channel
)])
first
=
np
.
squeeze
(
infile
[
"
/INDEX/{}/DET/{}CH0:xtdf/image/first
"
.
format
(
loc
,
channel
)])
last_index
=
int
(
first
[
count
!=
0
][
-
1
]
+
count
[
count
!=
0
][
-
1
])
first_index
=
int
(
first
[
count
!=
0
][
0
])
pulseids
=
infile
[
"
INSTRUMENT/{}/DET/{}CH0:xtdf/image/pulseId
"
.
format
(
loc
,
channel
)][
first_index
:
int
(
first
[
count
!=
0
][
1
])]
bveto
=
blake2b
(
pulseids
.
data
,
digest_size
=
8
)
pulseid_checksum
=
struct
.
unpack
(
'
d
'
,
binascii
.
unhexlify
(
bveto
.
hexdigest
()))[
0
]
return
pulseid_checksum
if
mem_cells
==
0
:
mem_cells
=
get_num_cells
(
filename
,
loc
,
channel
)
pulseid_checksum
=
get_checksum
(
filename
,
loc
,
channel
)
print
(
"
Memcells: {}
"
.
format
(
mem_cells
))
condition
=
Conditions
.
Dark
.
DSSC
(
bias_voltage
=
bias_voltage
,
memory_cells
=
mem_cells
)
condition
=
Conditions
.
Dark
.
DSSC
(
bias_voltage
=
bias_voltage
,
memory_cells
=
mem_cells
,
pulseid_checksum
=
pulseid_checksum
)
detinst
=
getattr
(
Detectors
,
dinstance
)
device
=
getattr
(
detinst
,
qm
)
with
h5py
.
File
(
filename
,
"
r
"
,
driver
=
"
core
"
)
as
infile
:
y
=
infile
[
h5path
+
"
image/data
"
].
shape
[
2
]
x
=
infile
[
h5path
+
"
image/data
"
].
shape
[
3
]
offset
,
when
=
get_constant_from_db_and_time
(
device
,
Constants
.
DSSC
.
Offset
(),
condition
,
None
,
get_random_db_interface
(
cal_db_interface
),
creation_time
=
creation_time
,
timeout
=
cal_db_timeout
)
if
offset
is
not
None
:
offset
=
np
.
moveaxis
(
np
.
moveaxis
(
offset
[...],
2
,
0
),
2
,
1
)
print
(
offset
.
shape
)
else
:
offset_not_found
=
True
offset
=
np
.
zeros
((
x
,
y
,
mem_cells
))
offset
=
np
.
moveaxis
(
np
.
moveaxis
(
offset
[...],
2
,
0
),
2
,
1
)
print
(
"
No offset found in the database
"
)
def
copy_and_sanitize_non_cal_data
(
infile
,
outfile
):
# these are touched in the correct function, do not copy them here
dont_copy
=
[
"
data
"
]
dont_copy
=
[
h5path
+
"
image/{}
"
.
format
(
do
)
for
do
in
dont_copy
]
# a visitor to copy everything else
def
visitor
(
k
,
item
):
if
k
not
in
dont_copy
:
if
isinstance
(
item
,
h5py
.
Group
):
outfile
.
create_group
(
k
)
elif
isinstance
(
item
,
h5py
.
Dataset
):
group
=
str
(
k
).
split
(
"
/
"
)
group
=
"
/
"
.
join
(
group
[:
-
1
])
infile
.
copy
(
k
,
outfile
[
group
])
infile
.
visititems
(
visitor
)
try
:
with
h5py
.
File
(
filename
,
"
r
"
,
driver
=
"
core
"
)
as
infile
:
with
h5py
.
File
(
filename_out
,
"
w
"
)
as
outfile
:
copy_and_sanitize_non_cal_data
(
infile
,
outfile
)
# get indices of last images in each train
first_arr
=
np
.
squeeze
(
infile
[
h5path_idx
+
"
image/first
"
]).
astype
(
np
.
int
)
last_arr
=
np
.
concatenate
((
first_arr
[
1
:],
np
.
array
([
-
1
,]))).
astype
(
np
.
int
)
assert
first_arr
.
size
==
last_arr
.
size
oshape
=
list
(
infile
[
h5path
+
"
image/data
"
].
shape
)
if
len
(
oshape
)
==
4
:
oshape
=
[
oshape
[
0
],]
+
oshape
[
2
:]
chunks
=
(
chunksize
,
oshape
[
1
],
oshape
[
2
])
ddset
=
outfile
.
create_dataset
(
h5path
+
"
image/data
"
,
oshape
,
chunks
=
chunks
,
dtype
=
np
.
float32
,
fletcher32
=
True
)
mdset
=
outfile
.
create_dataset
(
h5path
+
"
image/mask
"
,
oshape
,
chunks
=
chunks
,
dtype
=
np
.
uint32
,
compression
=
"
gzip
"
,
compression_opts
=
1
,
shuffle
=
True
,
fletcher32
=
True
)
for
train
in
range
(
first_arr
.
size
):
first
=
first_arr
[
train
]
last
=
last_arr
[
train
]
data
=
np
.
squeeze
(
infile
[
h5path
+
"
image/data
"
][
first
:
last
,
...].
astype
(
np
.
float32
))
cellId
=
np
.
squeeze
(
infile
[
h5path
+
"
image/cellId
"
][
first
:
last
,
...])
pulseId
=
np
.
squeeze
(
infile
[
h5path
+
"
image/pulseId
"
][
first
:
last
,
...])
if
offset_image
!=
"
PP
"
or
offset
is
None
:
if
offset_image
!=
"
PP
"
and
offset_not_found
:
data
-=
data
[
offset_image
,
...]
else
:
data
-=
offset
[
cellId
,...]
data
[...]
-=
offset
[
cellId
,...]
if
train
==
0
:
pulseId
=
np
.
repeat
(
pulseId
[:,
None
],
data
.
shape
[
1
],
axis
=
1
)
pulseId
=
np
.
repeat
(
pulseId
[:,:,
None
],
data
.
shape
[
2
],
axis
=
2
)
bins
=
(
55
,
pulseId
.
max
())
rnge
=
[[
-
5
,
50
],
[
0
,
pulseId
.
max
()]]
hists_signal_low
,
low_edges
,
pulse_edges
=
np
.
histogram2d
(
data
.
flatten
(),
pulseId
.
flatten
(),
bins
=
bins
,
range
=
rnge
)
rnge
=
[[
-
5
,
300
],
[
0
,
pulseId
.
max
()]]
hists_signal_high
,
high_edges
,
_
=
np
.
histogram2d
(
data
.
flatten
(),
pulseId
.
flatten
(),
bins
=
bins
,
range
=
rnge
)
# data[data < 0] = 0
ddset
[
first
:
last
,
...]
=
data
# find static and noisy values in dark images
data
=
infile
[
h5path
+
"
image/data
"
][
last
,
...].
astype
(
np
.
float32
)
bpix
=
np
.
zeros
(
oshape
[
1
:],
np
.
uint32
)
dark_std
=
np
.
std
(
data
,
axis
=
0
)
bpix
[
dark_std
>
noisy_pix_threshold
]
=
BadPixels
.
NOISE_OUT_OF_THRESHOLD
.
value
for
i
in
range
(
8
):
for
j
in
range
(
2
):
count_noise
=
np
.
count_nonzero
(
bpix
[
i
*
64
:(
i
+
1
)
*
64
,
j
*
64
:(
j
+
1
)
*
64
])
asic_std
=
np
.
std
(
data
[:,
i
*
64
:(
i
+
1
)
*
64
,
j
*
64
:(
j
+
1
)
*
64
])
if
mask_noisy_asic
:
if
count_noise
/
(
64
*
64
)
>
mask_noisy_asic
:
bpix
[
i
*
64
:(
i
+
1
)
*
64
,
j
*
64
:(
j
+
1
)
*
64
]
=
BadPixels
.
NOISY_ADC
.
value
if
mask_cold_asic
:
count_cold
=
np
.
count_nonzero
(
asic_std
<
0.5
)
if
count_cold
/
(
64
*
64
)
>
mask_cold_asic
:
bpix
[
i
*
64
:(
i
+
1
)
*
64
,
j
*
64
:(
j
+
1
)
*
64
]
=
BadPixels
.
ASIC_STD_BELOW_NOISE
.
value
for
train
in
range
(
first_arr
.
size
):
first
=
first_arr
[
train
]
last
=
last_arr
[
train
]
mdset
[
first
:
last
,
...]
=
np
.
repeat
(
bpix
[
None
,...],
last
-
first
,
axis
=
0
)
# This was removed because last element in last is -1 and that produce errors
# Also because mdset is not clear why is it needed.
# for train in range(first_arr.size):
# first = first_arr[train]
# last = last_arr[train]
# mdset[first:last, ...] = np.repeat(bpix[None,...], last-first, axis=0)
except
Exception
as
e
:
print
(
e
)
success
=
False
reason
=
"
Error
"
err
=
e
if
err
is
None
and
offset_not_found
:
err
=
"
Offset was not found in the database!
"
return
(
hists_signal_low
,
hists_signal_high
,
low_edges
,
high_edges
,
pulse_edges
,
when
,
qm
)
return
(
hists_signal_low
,
hists_signal_high
,
low_edges
,
high_edges
,
pulse_edges
,
when
,
qm
,
err
)
done
=
False
first_files
=
[]
inp
=
[]
left
=
total_sequences
hists_signal_low
=
0
hists_signal_high
=
0
low_edges
,
high_edges
,
pulse_edges
=
None
,
None
,
None
whens
=
[]
qms
=
[]
Errors
=
[]
while
not
done
:
dones
=
[]
first
=
True
for
i
in
range
(
16
):
qm
=
"
Q{}M{}
"
.
format
(
i
//
4
+
1
,
i
%
4
+
1
)
if
qm
in
mapped_files
and
not
mapped_files
[
qm
].
empty
():
fname_in
=
str
(
mapped_files
[
qm
].
get
())
dones
.
append
(
mapped_files
[
qm
].
empty
())
else
:
print
(
"
Skipping {}
"
.
format
(
qm
))
first_files
.
append
((
None
,
None
))
continue
fout
=
os
.
path
.
abspath
(
"
{}/{}
"
.
format
(
out_folder
,
(
os
.
path
.
split
(
fname_in
)[
-
1
]).
replace
(
"
RAW
"
,
"
CORR
"
)))
if
first
:
first_files
.
append
((
fname_in
,
fout
))
inp
.
append
((
fname_in
,
fout
,
i
,
qm
))
first
=
False
if
len
(
inp
)
>=
min
(
MAX_PAR
,
left
):
print
(
"
Running {} tasks parallel
"
.
format
(
len
(
inp
)))
p
=
partial
(
correct_module
,
total_sequences
,
sequences_qm
,
loc
,
dinstance
,
offset_image
,
mask_noisy_asic
,
mask_cold_asic
,
noisy_pix_threshold
,
chunk_size_idim
,
mem_cells
,
bias_voltage
,
cal_db_timeout
,
creation_time
,
cal_db_interface
)
r
=
view
.
map_sync
(
p
,
inp
)
#r = list(map(p, inp))
inp
=
[]
left
-=
MAX_PAR
for
rr
in
r
:
if
rr
is
not
None
:
hl
,
hh
,
low_edges
,
high_edges
,
pulse_edges
,
when
,
qm
=
rr
hl
,
hh
,
low_edges
,
high_edges
,
pulse_edges
,
when
,
qm
,
err
=
rr
whens
.
append
(
when
)
qms
.
append
(
qm
)
Errors
.
append
(
err
)
if
hl
is
not
None
:
# any one being None will also make the others None
hists_signal_low
+=
hl
.
astype
(
np
.
float64
)
hists_signal_high
+=
hh
.
astype
(
np
.
float64
)
done
=
all
(
dones
)
whens
=
[
x
for
_
,
x
in
sorted
(
zip
(
qms
,
whens
))]
qms
=
sorted
(
qms
)
for
i
,
qm
in
enumerate
(
qms
):
print
(
"
Offset for {} was injected on {}
"
.
format
(
qm
,
whens
[
i
].
isoformat
()))
try
:
when
=
whens
[
i
].
isoformat
()
except
:
when
=
whens
[
i
]
if
Errors
[
i
]
is
not
None
:
print
(
"
Offset for {} was injected on {}, Error: {}
"
.
format
(
qm
,
when
,
Errors
[
i
]))
else
:
print
(
"
Offset for {} was injected on {}
"
.
format
(
qm
,
when
))
```
%% Cell type:code id: tags:
```
python
from
mpl_toolkits.mplot3d
import
Axes3D
import
matplotlib.pyplot
as
plt
from
matplotlib
import
cm
from
matplotlib.ticker
import
LinearLocator
,
FormatStrFormatter
import
numpy
as
np
%
matplotlib
inline
def
do_3d_plot
(
data
,
edges
,
x_axis
,
y_axis
):
fig
=
plt
.
figure
(
figsize
=
(
10
,
10
))
ax
=
fig
.
gca
(
projection
=
'
3d
'
)
# Make data.
X
=
edges
[
0
][:
-
1
]
Y
=
edges
[
1
][:
-
1
]
X
,
Y
=
np
.
meshgrid
(
X
,
Y
)
Z
=
data
.
T
# Plot the surface.
surf
=
ax
.
plot_surface
(
X
,
Y
,
Z
,
cmap
=
cm
.
coolwarm
,
linewidth
=
0
,
antialiased
=
False
)
ax
.
set_xlabel
(
x_axis
)
ax
.
set_ylabel
(
y_axis
)
ax
.
set_zlabel
(
"
Counts
"
)
```
%% Cell type:code id: tags:
```
python
def
do_2d_plot
(
data
,
edges
,
y_axis
,
x_axis
):
from
matplotlib.colors
import
LogNorm
fig
=
plt
.
figure
(
figsize
=
(
10
,
10
))
ax
=
fig
.
add_subplot
(
111
)
extent
=
[
np
.
min
(
edges
[
1
]),
np
.
max
(
edges
[
1
]),
np
.
min
(
edges
[
0
]),
np
.
max
(
edges
[
0
])]
im
=
ax
.
imshow
(
data
[::
-
1
,:],
extent
=
extent
,
aspect
=
"
auto
"
,
norm
=
LogNorm
(
vmin
=
1
,
vmax
=
np
.
max
(
data
)))
ax
.
set_xlabel
(
x_axis
)
ax
.
set_ylabel
(
y_axis
)
cb
=
fig
.
colorbar
(
im
)
cb
.
set_label
(
"
Counts
"
)
```
%% Cell type:markdown id: tags:
## Mean Intensity per Pulse ##
The following plots show the mean signal for each pulse in a detailed and expanded intensity region.
%% Cell type:code id: tags:
```
python
do_3d_plot
(
hists_signal_low
,
[
low_edges
,
pulse_edges
],
"
Signal (ADU)
"
,
"
Pulse id
"
)
do_2d_plot
(
hists_signal_low
,
[
low_edges
,
pulse_edges
],
"
Signal (ADU)
"
,
"
Pulse id
"
)
do_3d_plot
(
hists_signal_high
,
[
high_edges
,
pulse_edges
],
"
Signal (ADU)
"
,
"
Pulse id
"
)
do_2d_plot
(
hists_signal_high
,
[
high_edges
,
pulse_edges
],
"
Signal (ADU)
"
,
"
Pulse id
"
)
```
%% Cell type:code id: tags:
```
python
corrected
=
[]
raw
=
[]
mask
=
[]
cell_fac
=
1
first_idx
=
400
*
10
+
40
last_idx
=
400
*
10
+
56
pulse_ids
=
[]
train_ids
=
[]
for
i
,
ff
in
enumerate
(
first_files
[:
16
]):
try
:
rf
,
cf
=
ff
if
rf
is
None
:
raise
Exception
(
"
File not present
"
)
infile
=
h5py
.
File
(
rf
,
"
r
"
)
raw
.
append
(
np
.
array
(
infile
[
"
/INSTRUMENT/{}/DET/{}CH0:xtdf/image/data
"
.
format
(
loc
,
i
)][
first_idx
:
last_idx
,
0
,...]))
infile
.
close
()
infile
=
h5py
.
File
(
cf
,
"
r
"
)
corrected
.
append
(
np
.
array
(
infile
[
"
/INSTRUMENT/{}/DET/{}CH0:xtdf/image/data
"
.
format
(
loc
,
i
)][
first_idx
:
last_idx
,...]))
mask
.
append
(
np
.
array
(
infile
[
"
/INSTRUMENT/{}/DET/{}CH0:xtdf/image/mask
"
.
format
(
loc
,
i
)][
first_idx
:
last_idx
,...]))
pulse_ids
.
append
(
np
.
squeeze
(
infile
[
"
/INSTRUMENT/{}/DET/{}CH0:xtdf/image/pulseId
"
.
format
(
loc
,
i
)][
first_idx
:
last_idx
,...]))
train_ids
.
append
(
np
.
squeeze
(
infile
[
"
/INSTRUMENT/{}/DET/{}CH0:xtdf/image/trainId
"
.
format
(
loc
,
i
)][
first_idx
:
last_idx
,...]))
infile
.
close
()
except
Exception
as
e
:
print
(
e
)
corrected
.
append
(
np
.
zeros
((
last_idx
-
first_idx
,
512
,
128
)))
mask
.
append
(
np
.
zeros
((
last_idx
-
first_idx
,
512
,
128
)))
raw
.
append
(
np
.
zeros
((
last_idx
-
first_idx
,
512
,
128
)))
```
%% Cell type:code id: tags:
```
python
def
combine_stack
(
d
,
sdim
):
combined
=
np
.
zeros
((
sdim
,
1300
,
1300
),
np
.
float32
)
combined
[...]
=
0
dy
=
0
quad_pos
=
[
(
0
,
145
),
(
130
,
140
),
(
125
,
15
),
(
0
,
15
),
]
px
=
0.236
py
=
0.204
with
h5py
.
File
(
geo_file
,
"
r
"
)
as
gf
:
for
i
in
range
(
16
):
mi
=
i
mi
=
3
-
(
i
%
4
)
mp
=
gf
[
"
Q{}/M{}/Position
"
.
format
(
i
//
4
+
1
,
mi
%
4
+
1
)][()]
t1
=
gf
[
"
Q{}/M{}/T01/Position
"
.
format
(
i
//
4
+
1
,
i
%
4
+
1
)][()]
t2
=
gf
[
"
Q{}/M{}/T02/Position
"
.
format
(
i
//
4
+
1
,
i
%
4
+
1
)][()]
if
i
//
4
<
2
:
t1
,
t2
=
t2
,
t1
if
i
//
4
==
0
or
i
//
4
==
1
:
td
=
d
[
i
][:,::
-
1
,:]
else
:
td
=
d
[
i
][:,:,::
-
1
]
t1d
=
td
[:,:,:
256
]
t2d
=
td
[:,:,
256
:]
x0t1
=
int
((
t1
[
0
]
+
mp
[
0
])
/
px
)
y0t1
=
int
((
t1
[
1
]
+
mp
[
1
])
/
py
)
x0t2
=
int
((
t2
[
0
]
+
mp
[
0
])
/
px
)
y0t2
=
int
((
t2
[
1
]
+
mp
[
1
])
/
py
)
x0t1
+=
int
(
quad_pos
[
i
//
4
][
1
]
/
px
)
x0t2
+=
int
(
quad_pos
[
i
//
4
][
1
]
/
px
)
y0t1
+=
int
(
quad_pos
[
i
//
4
][
0
]
/
py
)
+
combined
.
shape
[
1
]
//
16
y0t2
+=
int
(
quad_pos
[
i
//
4
][
0
]
/
py
)
+
combined
.
shape
[
1
]
//
16
combined
[:,
y0t1
:
y0t1
+
128
,
x0t1
:
x0t1
+
256
]
=
t1d
combined
[:,
y0t2
:
y0t2
+
128
,
x0t2
:
x0t2
+
256
]
=
t2d
return
combined
combined
=
combine_stack
(
corrected
,
last_idx
-
first_idx
)
```
%% Cell type:code id: tags:
```
python
combined
=
combine_stack
(
corrected
,
last_idx
-
first_idx
)
combined_raw
=
combine_stack
(
raw
,
last_idx
-
first_idx
)
combined_mask
=
combine_stack
(
mask
,
last_idx
-
first_idx
)
```
%% Cell type:markdown id: tags:
### Mean RAW Preview ###
The per pixel mean of the first 128 images of the RAW data
%% Cell type:code id: tags:
```
python
%
matplotlib
inline
fig
=
plt
.
figure
(
figsize
=
(
20
,
10
))
ax
=
fig
.
add_subplot
(
111
)
im
=
ax
.
imshow
(
np
.
mean
(
combined_raw
[:,...],
axis
=
0
),
vmin
=
min
(
0.75
*
np
.
median
(
combined_raw
[
combined_raw
>
0
]),
-
5
),
vmax
=
max
(
1.5
*
np
.
median
(
combined_raw
[
combined_raw
>
0
]),
50
),
cmap
=
"
jet
"
)
cb
=
fig
.
colorbar
(
im
,
ax
=
ax
)
```
%% Cell type:markdown id: tags:
### Single Shot Preview ###
A single shot image from cell 2 of the first train
%% Cell type:code id: tags:
```
python
fig
=
plt
.
figure
(
figsize
=
(
20
,
10
))
ax
=
fig
.
add_subplot
(
111
)
dim
=
combined
[
2
,...]
im
=
ax
.
imshow
(
dim
,
vmin
=-
0
,
vmax
=
max
(
1.5
*
np
.
median
(
dim
[
dim
>
0
]),
50
),
cmap
=
"
jet
"
,
interpolation
=
"
nearest
"
)
cb
=
fig
.
colorbar
(
im
,
ax
=
ax
)
```
%% Cell type:code id: tags:
```
python
fig
=
plt
.
figure
(
figsize
=
(
20
,
10
))
ax
=
fig
.
add_subplot
(
111
)
h
=
ax
.
hist
(
dim
.
flatten
(),
bins
=
100
,
range
=
(
0
,
100
))
```
%% Cell type:markdown id: tags:
### Mean CORRECTED Preview ###
The per pixel mean of the first 128 images of the CORRECTED data
%% Cell type:code id: tags:
```
python
fig
=
plt
.
figure
(
figsize
=
(
20
,
10
))
ax
=
fig
.
add_subplot
(
111
)
im
=
ax
.
imshow
(
np
.
mean
(
combined
[:,...],
axis
=
0
),
vmin
=
0
,
vmax
=
max
(
1.5
*
np
.
median
(
combined
[
combined
>
0
]),
10
),
cmap
=
"
jet
"
,
interpolation
=
"
nearest
"
)
cb
=
fig
.
colorbar
(
im
,
ax
=
ax
)
```
%% Cell type:markdown id: tags:
### Max CORRECTED Preview ###
The per pixel maximum of the first 128 images of the CORRECTED data
%% Cell type:code id: tags:
```
python
``
`
%%
Cell
type
:
code
id
:
tags
:
```
python
fig = plt.figure(figsize=(20,10))
ax = fig.add_subplot(111)
im = ax.imshow(np.max(combined[:,...], axis=0), vmin=0,
vmax=max(100
*
np.median(combined[combined > 0]), 20), cmap="jet", interpolation="nearest")
cb = fig.colorbar(im, ax=ax)
```
%% Cell type:code id: tags:
```
python
fig = plt.figure(figsize=(20,10))
ax = fig.add_subplot(111)
combined[combined <= 0] = 0
h = ax.hist(combined.flatten(), bins=100, range=(-5, 100), log=True)
```
%% Cell type:markdown id: tags:
## Bad Pixels ##
The mask contains dedicated entries for all pixels and memory cells as well as all three gains stages. Each mask entry is encoded in 32 bits as:
%% Cell type:code id: tags:
```
python
from cal_tools.enums import BadPixels
from IPython.display import HTML, display, Markdown, Latex
import tabulate
table = []
for item in BadPixels:
table.append((item.name, "{:016b}".format(item.value)))
md = display(Latex(tabulate.tabulate(table, tablefmt='latex', headers=["Bad pixel type", "Bit mask"])))
```
%% Cell type:markdown id: tags:
### Full Train Bad Pixels ###
%% Cell type:code id: tags:
```
python
fig = plt.figure(figsize=(20,10))
ax = fig.add_subplot(111)
im = ax.imshow(np.log2(np.max(combined_mask[:,...], axis=0)), vmin=0,
vmax=32, cmap="jet")
cb = fig.colorbar(im, ax=ax)
```
%% Cell type:markdown id: tags:
### Full Train Bad Pixels - Only Dark Char. Related ###
%% Cell type:code id: tags:
```
python
fig = plt.figure(figsize=(20,10))
ax = fig.add_subplot(111)
im = ax.imshow(np.max((combined_mask.astype(np.uint32)[:,...] & BadPixels.NOISY_ADC.value) != 0, axis=0), vmin=0,
vmax=1, cmap="jet")
cb = fig.colorbar(im, ax=ax)
```
%% Cell type:code id: tags:
```
python
```
%% Cell type:code id: tags:
```
python
```
%% Cell type:code id: tags:
```
python
```
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment