Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
P
pycalibration
Manage
Activity
Members
Labels
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Deploy
Model registry
Analyze
Contributor analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
calibration
pycalibration
Commits
1892b687
Commit
1892b687
authored
5 years ago
by
Karim Ahmed
Browse files
Options
Downloads
Patches
Plain Diff
add path_inset to balance sequences
parent
c603ba08
No related branches found
No related tags found
1 merge request
!229
add path_inset to balance sequences
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
notebooks/FastCCD/CorrectionNotebook_NewDAQ_FastCCD_NBC.ipynb
+1
-1
1 addition, 1 deletion
...books/FastCCD/CorrectionNotebook_NewDAQ_FastCCD_NBC.ipynb
with
1 addition
and
1 deletion
notebooks/FastCCD/CorrectionNotebook_NewDAQ_FastCCD_NBC.ipynb
+
1
−
1
View file @
1892b687
...
@@ -53,7 +53,7 @@
...
@@ -53,7 +53,7 @@
"flipped_between = [\"2019-02-01\", \"2019-04-02\"] # detector was flipped during this timespan\n",
"flipped_between = [\"2019-02-01\", \"2019-04-02\"] # detector was flipped during this timespan\n",
"temp_limits = 5 # limits within which temperature is considered the same\n",
"temp_limits = 5 # limits within which temperature is considered the same\n",
"\n",
"\n",
"def balance_sequences(in_folder, run, sequences, sequences_per_node):\n",
"def balance_sequences(in_folder, run, sequences, sequences_per_node
, path_inset
):\n",
" import glob\n",
" import glob\n",
" import re\n",
" import re\n",
" import numpy as np\n",
" import numpy as np\n",
...
...
%% Cell type:markdown id: tags:
%% Cell type:markdown id: tags:
# FastCCD Data Correction ##
# FastCCD Data Correction ##
Authors: I. Klačková, S. Hauf, Version 1.0
Authors: I. Klačková, S. Hauf, Version 1.0
The following notebook provides correction of images acquired with the FastCCD.
The following notebook provides correction of images acquired with the FastCCD.
%% Cell type:code id: tags:
%% Cell type:code id: tags:
```
python
```
python
in_folder
=
"
/gpfs/exfel/exp/SCS/201802/p002170/raw/
"
# input folder, required
in_folder
=
"
/gpfs/exfel/exp/SCS/201802/p002170/raw/
"
# input folder, required
out_folder
=
'
/gpfs/exfel/data/scratch/xcal/test/
'
# output folder, required
out_folder
=
'
/gpfs/exfel/data/scratch/xcal/test/
'
# output folder, required
path_template
=
'
RAW-R{:04d}-{}-S{{:05d}}.h5
'
# path template in hdf5 file
path_template
=
'
RAW-R{:04d}-{}-S{{:05d}}.h5
'
# path template in hdf5 file
path_inset
=
'
DA05
'
path_inset
=
'
DA05
'
run
=
277
# run number
run
=
277
# run number
h5path
=
'
/INSTRUMENT/SCS_CDIDET_FCCD2M/DAQ/FCCD:daqOutput/data/image
'
# path in HDF5 file
h5path
=
'
/INSTRUMENT/SCS_CDIDET_FCCD2M/DAQ/FCCD:daqOutput/data/image
'
# path in HDF5 file
h5path_t
=
'
/CONTROL/SCS_CDIDET_FCCD2M/CTRL/LSLAN/inputA/crdg/value
'
# temperature path in HDF5 file
h5path_t
=
'
/CONTROL/SCS_CDIDET_FCCD2M/CTRL/LSLAN/inputA/crdg/value
'
# temperature path in HDF5 file
h5path_cntrl
=
'
/RUN/SCS_CDIDET_FCCD2M/DET/FCCD
'
# path to control data
h5path_cntrl
=
'
/RUN/SCS_CDIDET_FCCD2M/DET/FCCD
'
# path to control data
cluster_profile
=
"
noDB
"
#ipcluster profile to use
cluster_profile
=
"
noDB
"
#ipcluster profile to use
cpuCores
=
16
#Specifies the number of running cpu cores
cpuCores
=
16
#Specifies the number of running cpu cores
operation_mode
=
"
FF
"
# FS stands for frame-store and FF for full-frame opeartion
operation_mode
=
"
FF
"
# FS stands for frame-store and FF for full-frame opeartion
split_evt_primary_threshold
=
7.
# primary threshold for split event classification in terms of n sigma noise
split_evt_primary_threshold
=
7.
# primary threshold for split event classification in terms of n sigma noise
split_evt_secondary_threshold
=
4.
# secondary threshold for split event classification in terms of n sigma noise
split_evt_secondary_threshold
=
4.
# secondary threshold for split event classification in terms of n sigma noise
split_evt_mip_threshold
=
1000.
# MIP threshold for event classification
split_evt_mip_threshold
=
1000.
# MIP threshold for event classification
cal_db_interface
=
"
tcp://max-exfl016:8015#8025
"
# calibration DB interface to use
cal_db_interface
=
"
tcp://max-exfl016:8015#8025
"
# calibration DB interface to use
cal_db_timeout
=
300000000
# timeout on caldb requests
cal_db_timeout
=
300000000
# timeout on caldb requests
sequences
=
[
-
1
]
# sequences to correct, set to -1 for all, range allowed
sequences
=
[
-
1
]
# sequences to correct, set to -1 for all, range allowed
chunk_size_idim
=
1
# H5 chunking size of output data
chunk_size_idim
=
1
# H5 chunking size of output data
overwrite
=
True
# overwrite existing files
overwrite
=
True
# overwrite existing files
do_pattern_classification
=
True
# classify split events
do_pattern_classification
=
True
# classify split events
sequences_per_node
=
1
# sequences to correct per node
sequences_per_node
=
1
# sequences to correct per node
limit_images
=
0
# limit images per file
limit_images
=
0
# limit images per file
correct_offset_drift
=
False
# correct for offset drifts
correct_offset_drift
=
False
# correct for offset drifts
use_dir_creation_date
=
True
# use dir creation data for calDB queries
use_dir_creation_date
=
True
# use dir creation data for calDB queries
time_offset_days
=
0
# offset in days for calibration parameters
time_offset_days
=
0
# offset in days for calibration parameters
photon_energy_gain_map
=
2.
# energy in keV
photon_energy_gain_map
=
2.
# energy in keV
fix_temperature
=
0.
# fix temperature to this value, set to 0 to use slow control value
fix_temperature
=
0.
# fix temperature to this value, set to 0 to use slow control value
flipped_between
=
[
"
2019-02-01
"
,
"
2019-04-02
"
]
# detector was flipped during this timespan
flipped_between
=
[
"
2019-02-01
"
,
"
2019-04-02
"
]
# detector was flipped during this timespan
temp_limits
=
5
# limits within which temperature is considered the same
temp_limits
=
5
# limits within which temperature is considered the same
def
balance_sequences
(
in_folder
,
run
,
sequences
,
sequences_per_node
):
def
balance_sequences
(
in_folder
,
run
,
sequences
,
sequences_per_node
,
path_inset
):
import
glob
import
glob
import
re
import
re
import
numpy
as
np
import
numpy
as
np
if
sequences
[
0
]
==
-
1
:
if
sequences
[
0
]
==
-
1
:
sequence_files
=
glob
.
glob
(
"
{}/r{:04d}/*{}-S*.h5
"
.
format
(
in_folder
,
run
,
path_inset
))
sequence_files
=
glob
.
glob
(
"
{}/r{:04d}/*{}-S*.h5
"
.
format
(
in_folder
,
run
,
path_inset
))
seq_nums
=
set
()
seq_nums
=
set
()
for
sf
in
sequence_files
:
for
sf
in
sequence_files
:
seqnum
=
re
.
findall
(
r
"
.*-S([0-9]*).h5
"
,
sf
)[
0
]
seqnum
=
re
.
findall
(
r
"
.*-S([0-9]*).h5
"
,
sf
)[
0
]
seq_nums
.
add
(
int
(
seqnum
))
seq_nums
.
add
(
int
(
seqnum
))
seq_nums
-=
set
(
sequences
)
seq_nums
-=
set
(
sequences
)
nsplits
=
len
(
seq_nums
)
//
sequences_per_node
+
1
nsplits
=
len
(
seq_nums
)
//
sequences_per_node
+
1
while
nsplits
>
8
:
while
nsplits
>
8
:
sequences_per_node
+=
1
sequences_per_node
+=
1
nsplits
=
len
(
seq_nums
)
//
sequences_per_node
+
1
nsplits
=
len
(
seq_nums
)
//
sequences_per_node
+
1
print
(
"
Changed to {} sequences per node to have a maximum of 8 concurrent jobs
"
.
format
(
sequences_per_node
))
print
(
"
Changed to {} sequences per node to have a maximum of 8 concurrent jobs
"
.
format
(
sequences_per_node
))
return
[
l
.
tolist
()
for
l
in
np
.
array_split
(
list
(
seq_nums
),
nsplits
)]
return
[
l
.
tolist
()
for
l
in
np
.
array_split
(
list
(
seq_nums
),
nsplits
)]
else
:
else
:
return
sequences
return
sequences
```
```
%% Cell type:code id: tags:
%% Cell type:code id: tags:
```
python
```
python
import
XFELDetAna.xfelprofiler
as
xprof
import
XFELDetAna.xfelprofiler
as
xprof
profiler
=
xprof
.
Profiler
()
profiler
=
xprof
.
Profiler
()
profiler
.
disable
()
profiler
.
disable
()
from
XFELDetAna.util
import
env
from
XFELDetAna.util
import
env
env
.
iprofile
=
cluster_profile
env
.
iprofile
=
cluster_profile
import
warnings
import
warnings
warnings
.
filterwarnings
(
'
ignore
'
)
warnings
.
filterwarnings
(
'
ignore
'
)
from
XFELDetAna
import
xfelpycaltools
as
xcal
from
XFELDetAna
import
xfelpycaltools
as
xcal
from
XFELDetAna
import
xfelpyanatools
as
xana
from
XFELDetAna
import
xfelpyanatools
as
xana
from
XFELDetAna.plotting.util
import
prettyPlotting
from
XFELDetAna.plotting.util
import
prettyPlotting
prettyPlotting
=
True
prettyPlotting
=
True
from
XFELDetAna.xfelreaders
import
ChunkReader
from
XFELDetAna.xfelreaders
import
ChunkReader
from
XFELDetAna.detectors.fastccd
import
readerh5
as
fastccdreaderh5
from
XFELDetAna.detectors.fastccd
import
readerh5
as
fastccdreaderh5
import
numpy
as
np
import
numpy
as
np
import
h5py
import
h5py
import
matplotlib.pyplot
as
plt
import
matplotlib.pyplot
as
plt
from
iminuit
import
Minuit
from
iminuit
import
Minuit
import
time
import
time
import
copy
import
copy
import
os
import
os
from
prettytable
import
PrettyTable
from
prettytable
import
PrettyTable
from
iCalibrationDB
import
ConstantMetaData
,
Constants
,
Conditions
,
Detectors
,
Versions
from
iCalibrationDB
import
ConstantMetaData
,
Constants
,
Conditions
,
Detectors
,
Versions
from
iCalibrationDB.detectors
import
DetectorTypes
from
iCalibrationDB.detectors
import
DetectorTypes
from
cal_tools.tools
import
get_dir_creation_date
from
cal_tools.tools
import
get_dir_creation_date
from
datetime
import
timedelta
from
datetime
import
timedelta
%
matplotlib
inline
%
matplotlib
inline
if
sequences
[
0
]
==
-
1
:
if
sequences
[
0
]
==
-
1
:
sequences
=
None
sequences
=
None
offset_correction_args
=
(
0.2459991787617141
,
243.21639920846485
)
offset_correction_args
=
(
0.2459991787617141
,
243.21639920846485
)
t_base
=
247.82
t_base
=
247.82
if
"
#
"
in
cal_db_interface
:
if
"
#
"
in
cal_db_interface
:
prot
,
serv
,
ran
=
cal_db_interface
.
split
(
"
:
"
)
prot
,
serv
,
ran
=
cal_db_interface
.
split
(
"
:
"
)
r1
,
r2
=
ran
.
split
(
"
#
"
)
r1
,
r2
=
ran
.
split
(
"
#
"
)
cal_db_interface
=
"
:
"
.
join
(
cal_db_interface
=
"
:
"
.
join
(
[
prot
,
serv
,
str
(
np
.
random
.
randint
(
int
(
r1
),
int
(
r2
)))])
[
prot
,
serv
,
str
(
np
.
random
.
randint
(
int
(
r1
),
int
(
r2
)))])
```
```
%% Cell type:code id: tags:
%% Cell type:code id: tags:
```
python
```
python
if
operation_mode
==
"
FS
"
:
if
operation_mode
==
"
FS
"
:
x
=
960
# rows of the FastCCD to analyze in FS mode
x
=
960
# rows of the FastCCD to analyze in FS mode
y
=
960
# columns of the FastCCD to analyze in FS mode
y
=
960
# columns of the FastCCD to analyze in FS mode
print
(
'
\n
You are analyzing data in FS mode.
'
)
print
(
'
\n
You are analyzing data in FS mode.
'
)
else
:
else
:
x
=
1934
# rows of the FastCCD to analyze in FF mode
x
=
1934
# rows of the FastCCD to analyze in FF mode
y
=
960
# columns of the FastCCD to analyze in FF mode
y
=
960
# columns of the FastCCD to analyze in FF mode
print
(
'
\n
You are analyzing data in FF mode.
'
)
print
(
'
\n
You are analyzing data in FF mode.
'
)
ped_dir
=
"
{}/r{:04d}
"
.
format
(
in_folder
,
run
)
ped_dir
=
"
{}/r{:04d}
"
.
format
(
in_folder
,
run
)
fp_name
=
path_template
.
format
(
run
,
path_inset
)
fp_name
=
path_template
.
format
(
run
,
path_inset
)
fp_path
=
'
{}/{}
'
.
format
(
ped_dir
,
fp_name
)
fp_path
=
'
{}/{}
'
.
format
(
ped_dir
,
fp_name
)
print
(
"
Reading data from: {}
\n
"
.
format
(
fp_path
))
print
(
"
Reading data from: {}
\n
"
.
format
(
fp_path
))
print
(
"
Run is: {}
"
.
format
(
run
))
print
(
"
Run is: {}
"
.
format
(
run
))
print
(
"
HDF5 path: {}
"
.
format
(
h5path
))
print
(
"
HDF5 path: {}
"
.
format
(
h5path
))
print
(
"
Data is output to: {}
"
.
format
(
out_folder
))
print
(
"
Data is output to: {}
"
.
format
(
out_folder
))
import
datetime
import
datetime
creation_time
=
None
creation_time
=
None
if
use_dir_creation_date
:
if
use_dir_creation_date
:
creation_time
=
get_dir_creation_date
(
in_folder
,
run
)
+
timedelta
(
days
=
time_offset_days
)
creation_time
=
get_dir_creation_date
(
in_folder
,
run
)
+
timedelta
(
days
=
time_offset_days
)
if
creation_time
:
if
creation_time
:
print
(
"
Using {} as creation time
"
.
format
(
creation_time
.
isoformat
()))
print
(
"
Using {} as creation time
"
.
format
(
creation_time
.
isoformat
()))
```
```
%% Cell type:code id: tags:
%% Cell type:code id: tags:
```
python
```
python
sensorSize
=
[
x
,
y
]
sensorSize
=
[
x
,
y
]
chunkSize
=
100
#Number of images to read per chunk
chunkSize
=
100
#Number of images to read per chunk
blockSize
=
[
sensorSize
[
0
]
//
2
,
sensorSize
[
1
]
//
4
]
#Sensor area will be analysed according to blocksize
blockSize
=
[
sensorSize
[
0
]
//
2
,
sensorSize
[
1
]
//
4
]
#Sensor area will be analysed according to blocksize
xcal
.
defaultBlockSize
=
blockSize
xcal
.
defaultBlockSize
=
blockSize
memoryCells
=
1
#FastCCD has 1 memory cell
memoryCells
=
1
#FastCCD has 1 memory cell
#Specifies total number of images to proceed
#Specifies total number of images to proceed
commonModeBlockSize
=
blockSize
commonModeBlockSize
=
blockSize
commonModeAxisR
=
'
row
'
#Axis along which common mode will be calculated
commonModeAxisR
=
'
row
'
#Axis along which common mode will be calculated
run_parallel
=
True
run_parallel
=
True
profile
=
False
profile
=
False
temperature_k
=
291
temperature_k
=
291
filename
=
fp_path
.
format
(
sequences
[
0
]
if
sequences
else
0
)
filename
=
fp_path
.
format
(
sequences
[
0
]
if
sequences
else
0
)
with
h5py
.
File
(
filename
,
'
r
'
)
as
f
:
with
h5py
.
File
(
filename
,
'
r
'
)
as
f
:
bias_voltage
=
int
(
f
[
'
{}/biasclock/bias/value
'
.
format
(
h5path_cntrl
)][
0
])
bias_voltage
=
int
(
f
[
'
{}/biasclock/bias/value
'
.
format
(
h5path_cntrl
)][
0
])
det_gain
=
int
(
f
[
'
{}/exposure/gain/value
'
.
format
(
h5path_cntrl
)][
0
])
det_gain
=
int
(
f
[
'
{}/exposure/gain/value
'
.
format
(
h5path_cntrl
)][
0
])
integration_time
=
int
(
f
[
'
{}/acquisitionTime/value
'
.
format
(
h5path_cntrl
)][
0
])
integration_time
=
int
(
f
[
'
{}/acquisitionTime/value
'
.
format
(
h5path_cntrl
)][
0
])
print
(
"
Bias voltage is {} V
"
.
format
(
bias_voltage
))
print
(
"
Bias voltage is {} V
"
.
format
(
bias_voltage
))
print
(
"
Detector gain is set to x{}
"
.
format
(
det_gain
))
print
(
"
Detector gain is set to x{}
"
.
format
(
det_gain
))
print
(
"
Detector integration time is set to {}
"
.
format
(
integration_time
))
print
(
"
Detector integration time is set to {}
"
.
format
(
integration_time
))
temperature
=
np
.
mean
(
f
[
h5path_t
])
temperature
=
np
.
mean
(
f
[
h5path_t
])
temperature_k
=
temperature
+
273.15
temperature_k
=
temperature
+
273.15
if
fix_temperature
!=
0.
:
if
fix_temperature
!=
0.
:
temperature_k
=
fix_temperature
temperature_k
=
fix_temperature
print
(
"
Using fixed temperature
"
)
print
(
"
Using fixed temperature
"
)
print
(
"
Mean temperature was {:0.2f} °C / {:0.2f} K at beginning of run
"
.
format
(
temperature
,
temperature_k
))
print
(
"
Mean temperature was {:0.2f} °C / {:0.2f} K at beginning of run
"
.
format
(
temperature
,
temperature_k
))
if
not
os
.
path
.
exists
(
out_folder
):
if
not
os
.
path
.
exists
(
out_folder
):
os
.
makedirs
(
out_folder
)
os
.
makedirs
(
out_folder
)
elif
not
overwrite
:
elif
not
overwrite
:
raise
AttributeError
(
"
Output path exists! Exiting
"
)
raise
AttributeError
(
"
Output path exists! Exiting
"
)
```
```
%% Cell type:code id: tags:
%% Cell type:code id: tags:
```
python
```
python
dirlist
=
sorted
(
os
.
listdir
(
ped_dir
))
dirlist
=
sorted
(
os
.
listdir
(
ped_dir
))
file_list
=
[]
file_list
=
[]
total_sequences
=
0
total_sequences
=
0
fsequences
=
[]
fsequences
=
[]
for
entry
in
dirlist
:
for
entry
in
dirlist
:
#only h5 file
#only h5 file
abs_entry
=
"
{}/{}
"
.
format
(
ped_dir
,
entry
)
abs_entry
=
"
{}/{}
"
.
format
(
ped_dir
,
entry
)
if
os
.
path
.
isfile
(
abs_entry
)
and
os
.
path
.
splitext
(
abs_entry
)[
1
]
==
"
.h5
"
:
if
os
.
path
.
isfile
(
abs_entry
)
and
os
.
path
.
splitext
(
abs_entry
)[
1
]
==
"
.h5
"
:
if
sequences
is
None
:
if
sequences
is
None
:
for
seq
in
range
(
len
(
dirlist
)):
for
seq
in
range
(
len
(
dirlist
)):
if
path_template
.
format
(
run
,
path_inset
).
format
(
seq
)
in
abs_entry
:
if
path_template
.
format
(
run
,
path_inset
).
format
(
seq
)
in
abs_entry
:
file_list
.
append
(
abs_entry
)
file_list
.
append
(
abs_entry
)
total_sequences
+=
1
total_sequences
+=
1
fsequences
.
append
(
seq
)
fsequences
.
append
(
seq
)
else
:
else
:
for
seq
in
sequences
:
for
seq
in
sequences
:
if
path_template
.
format
(
run
,
path_inset
).
format
(
seq
)
in
abs_entry
:
if
path_template
.
format
(
run
,
path_inset
).
format
(
seq
)
in
abs_entry
:
file_list
.
append
(
os
.
path
.
abspath
(
abs_entry
))
file_list
.
append
(
os
.
path
.
abspath
(
abs_entry
))
total_sequences
+=
1
total_sequences
+=
1
fsequences
.
append
(
seq
)
fsequences
.
append
(
seq
)
sequences
=
fsequences
sequences
=
fsequences
```
```
%% Cell type:code id: tags:
%% Cell type:code id: tags:
```
python
```
python
import
copy
import
copy
from
IPython.display
import
HTML
,
display
,
Markdown
,
Latex
from
IPython.display
import
HTML
,
display
,
Markdown
,
Latex
import
tabulate
import
tabulate
print
(
"
Processing a total of {} sequence files
"
.
format
(
total_sequences
))
print
(
"
Processing a total of {} sequence files
"
.
format
(
total_sequences
))
table
=
[]
table
=
[]
for
k
,
f
in
enumerate
(
file_list
):
for
k
,
f
in
enumerate
(
file_list
):
table
.
append
((
k
,
f
))
table
.
append
((
k
,
f
))
if
len
(
table
):
if
len
(
table
):
md
=
display
(
Latex
(
tabulate
.
tabulate
(
table
,
tablefmt
=
'
latex
'
,
headers
=
[
"
#
"
,
"
file
"
])))
md
=
display
(
Latex
(
tabulate
.
tabulate
(
table
,
tablefmt
=
'
latex
'
,
headers
=
[
"
#
"
,
"
file
"
])))
```
```
%% Cell type:markdown id: tags:
%% Cell type:markdown id: tags:
As a first step, dark maps have to be loaded.
As a first step, dark maps have to be loaded.
%% Cell type:code id: tags:
%% Cell type:code id: tags:
```
python
```
python
offsetMap
=
None
offsetMap
=
None
badPixelMap
=
None
badPixelMap
=
None
noiseMap
=
None
noiseMap
=
None
for
i
,
g
in
enumerate
([
8
,
2
,
1
]):
for
i
,
g
in
enumerate
([
8
,
2
,
1
]):
## offset
## offset
metadata
=
ConstantMetaData
()
metadata
=
ConstantMetaData
()
offset
=
Constants
.
CCD
(
DetectorTypes
.
fastCCD
).
Offset
()
offset
=
Constants
.
CCD
(
DetectorTypes
.
fastCCD
).
Offset
()
metadata
.
calibration_constant
=
offset
metadata
.
calibration_constant
=
offset
# set the operating condition
# set the operating condition
condition
=
Conditions
.
Dark
.
CCD
(
bias_voltage
=
bias_voltage
,
condition
=
Conditions
.
Dark
.
CCD
(
bias_voltage
=
bias_voltage
,
integration_time
=
integration_time
,
integration_time
=
integration_time
,
gain_setting
=
g
,
gain_setting
=
g
,
temperature
=
temperature_k
,
temperature
=
temperature_k
,
pixels_x
=
1934
,
pixels_x
=
1934
,
pixels_y
=
960
)
pixels_y
=
960
)
for
parm
in
condition
.
parameters
:
for
parm
in
condition
.
parameters
:
if
parm
.
name
==
"
Sensor Temperature
"
:
if
parm
.
name
==
"
Sensor Temperature
"
:
parm
.
lower_deviation
=
temp_limits
parm
.
lower_deviation
=
temp_limits
parm
.
upper_deviation
=
temp_limits
parm
.
upper_deviation
=
temp_limits
device
=
Detectors
.
fastCCD1
device
=
Detectors
.
fastCCD1
metadata
.
detector_condition
=
condition
metadata
.
detector_condition
=
condition
# specify the version for this constant
# specify the version for this constant
if
creation_time
is
None
:
if
creation_time
is
None
:
metadata
.
calibration_constant_version
=
Versions
.
Now
(
device
=
device
)
metadata
.
calibration_constant_version
=
Versions
.
Now
(
device
=
device
)
metadata
.
retrieve
(
cal_db_interface
)
metadata
.
retrieve
(
cal_db_interface
)
else
:
else
:
metadata
.
calibration_constant_version
=
Versions
.
Timespan
(
device
=
device
,
metadata
.
calibration_constant_version
=
Versions
.
Timespan
(
device
=
device
,
start
=
creation_time
)
start
=
creation_time
)
metadata
.
retrieve
(
cal_db_interface
,
when
=
creation_time
.
isoformat
(),
timeout
=
3000000
)
metadata
.
retrieve
(
cal_db_interface
,
when
=
creation_time
.
isoformat
(),
timeout
=
3000000
)
if
offsetMap
is
None
:
if
offsetMap
is
None
:
offsetMap
=
np
.
zeros
(
list
(
offset
.
data
.
shape
)
+
[
3
],
np
.
float32
)
offsetMap
=
np
.
zeros
(
list
(
offset
.
data
.
shape
)
+
[
3
],
np
.
float32
)
offsetMap
[...,
i
]
=
offset
.
data
offsetMap
[...,
i
]
=
offset
.
data
offset_temperature
=
None
offset_temperature
=
None
for
parm
in
condition
.
parameters
:
for
parm
in
condition
.
parameters
:
if
parm
.
name
==
"
Sensor Temperature
"
:
if
parm
.
name
==
"
Sensor Temperature
"
:
offset_temperature
=
parm
.
value
offset_temperature
=
parm
.
value
print
(
"
Temperature of detector when dark images (gain {}) for offset calculation
"
.
format
(
g
)
+
print
(
"
Temperature of detector when dark images (gain {}) for offset calculation
"
.
format
(
g
)
+
"
were taken at: {:0.2f} K @ {}
"
.
format
(
offset_temperature
,
"
were taken at: {:0.2f} K @ {}
"
.
format
(
offset_temperature
,
metadata
.
calibration_constant_version
.
begin_at
))
metadata
.
calibration_constant_version
.
begin_at
))
## noise
## noise
metadata
=
ConstantMetaData
()
metadata
=
ConstantMetaData
()
noise
=
Constants
.
CCD
(
DetectorTypes
.
fastCCD
).
Noise
()
noise
=
Constants
.
CCD
(
DetectorTypes
.
fastCCD
).
Noise
()
metadata
.
calibration_constant
=
noise
metadata
.
calibration_constant
=
noise
# set the operating condition
# set the operating condition
condition
=
Conditions
.
Dark
.
CCD
(
bias_voltage
=
bias_voltage
,
condition
=
Conditions
.
Dark
.
CCD
(
bias_voltage
=
bias_voltage
,
integration_time
=
integration_time
,
integration_time
=
integration_time
,
gain_setting
=
g
,
gain_setting
=
g
,
temperature
=
temperature_k
,
temperature
=
temperature_k
,
pixels_x
=
1934
,
pixels_x
=
1934
,
pixels_y
=
960
)
pixels_y
=
960
)
for
parm
in
condition
.
parameters
:
for
parm
in
condition
.
parameters
:
if
parm
.
name
==
"
Sensor Temperature
"
:
if
parm
.
name
==
"
Sensor Temperature
"
:
parm
.
lower_deviation
=
temp_limits
parm
.
lower_deviation
=
temp_limits
parm
.
upper_deviation
=
temp_limits
parm
.
upper_deviation
=
temp_limits
device
=
Detectors
.
fastCCD1
device
=
Detectors
.
fastCCD1
metadata
.
detector_condition
=
condition
metadata
.
detector_condition
=
condition
# specify the version for this constant
# specify the version for this constant
if
creation_time
is
None
:
if
creation_time
is
None
:
metadata
.
calibration_constant_version
=
Versions
.
Now
(
device
=
device
)
metadata
.
calibration_constant_version
=
Versions
.
Now
(
device
=
device
)
metadata
.
retrieve
(
cal_db_interface
)
metadata
.
retrieve
(
cal_db_interface
)
else
:
else
:
metadata
.
calibration_constant_version
=
Versions
.
Timespan
(
device
=
device
,
metadata
.
calibration_constant_version
=
Versions
.
Timespan
(
device
=
device
,
start
=
creation_time
)
start
=
creation_time
)
metadata
.
retrieve
(
cal_db_interface
,
when
=
creation_time
.
isoformat
(),
timeout
=
3000000
)
metadata
.
retrieve
(
cal_db_interface
,
when
=
creation_time
.
isoformat
(),
timeout
=
3000000
)
if
noiseMap
is
None
:
if
noiseMap
is
None
:
noiseMap
=
np
.
zeros
(
list
(
noise
.
data
.
shape
)
+
[
3
],
np
.
float32
)
noiseMap
=
np
.
zeros
(
list
(
noise
.
data
.
shape
)
+
[
3
],
np
.
float32
)
noiseMap
[...,
i
]
=
noise
.
data
noiseMap
[...,
i
]
=
noise
.
data
## bad pixels
## bad pixels
metadata
=
ConstantMetaData
()
metadata
=
ConstantMetaData
()
bpix
=
Constants
.
CCD
(
DetectorTypes
.
fastCCD
).
BadPixelsDark
()
bpix
=
Constants
.
CCD
(
DetectorTypes
.
fastCCD
).
BadPixelsDark
()
metadata
.
calibration_constant
=
bpix
metadata
.
calibration_constant
=
bpix
# set the operating condition
# set the operating condition
condition
=
Conditions
.
Dark
.
CCD
(
bias_voltage
=
bias_voltage
,
condition
=
Conditions
.
Dark
.
CCD
(
bias_voltage
=
bias_voltage
,
integration_time
=
integration_time
,
integration_time
=
integration_time
,
gain_setting
=
g
,
gain_setting
=
g
,
temperature
=
temperature_k
,
temperature
=
temperature_k
,
pixels_x
=
1934
,
pixels_x
=
1934
,
pixels_y
=
960
)
pixels_y
=
960
)
for
parm
in
condition
.
parameters
:
for
parm
in
condition
.
parameters
:
if
parm
.
name
==
"
Sensor Temperature
"
:
if
parm
.
name
==
"
Sensor Temperature
"
:
parm
.
lower_deviation
=
temp_limits
parm
.
lower_deviation
=
temp_limits
parm
.
upper_deviation
=
temp_limits
parm
.
upper_deviation
=
temp_limits
device
=
Detectors
.
fastCCD1
device
=
Detectors
.
fastCCD1
metadata
.
detector_condition
=
condition
metadata
.
detector_condition
=
condition
# specify the version for this constant
# specify the version for this constant
if
creation_time
is
None
:
if
creation_time
is
None
:
metadata
.
calibration_constant_version
=
Versions
.
Now
(
device
=
device
)
metadata
.
calibration_constant_version
=
Versions
.
Now
(
device
=
device
)
metadata
.
retrieve
(
cal_db_interface
)
metadata
.
retrieve
(
cal_db_interface
)
else
:
else
:
metadata
.
calibration_constant_version
=
Versions
.
Timespan
(
device
=
device
,
metadata
.
calibration_constant_version
=
Versions
.
Timespan
(
device
=
device
,
start
=
creation_time
)
start
=
creation_time
)
metadata
.
retrieve
(
cal_db_interface
,
when
=
creation_time
.
isoformat
(),
timeout
=
3000000
)
metadata
.
retrieve
(
cal_db_interface
,
when
=
creation_time
.
isoformat
(),
timeout
=
3000000
)
if
badPixelMap
is
None
:
if
badPixelMap
is
None
:
badPixelMap
=
np
.
zeros
(
list
(
bpix
.
data
.
shape
)
+
[
3
],
np
.
uint32
)
badPixelMap
=
np
.
zeros
(
list
(
bpix
.
data
.
shape
)
+
[
3
],
np
.
uint32
)
badPixelMap
[...,
i
]
=
bpix
.
data
badPixelMap
[...,
i
]
=
bpix
.
data
```
```
%% Cell type:markdown id: tags:
%% Cell type:markdown id: tags:
Loading cti and relative gain values
Loading cti and relative gain values
%% Cell type:code id: tags:
%% Cell type:code id: tags:
```
python
```
python
## relative gain
## relative gain
metadata
=
ConstantMetaData
()
metadata
=
ConstantMetaData
()
relgain
=
Constants
.
CCD
(
DetectorTypes
.
fastCCD
).
RelativeGain
()
relgain
=
Constants
.
CCD
(
DetectorTypes
.
fastCCD
).
RelativeGain
()
metadata
.
calibration_constant
=
relgain
metadata
.
calibration_constant
=
relgain
# set the operating condition
# set the operating condition
condition
=
Conditions
.
Illuminated
.
CCD
(
bias_voltage
=
bias_voltage
,
condition
=
Conditions
.
Illuminated
.
CCD
(
bias_voltage
=
bias_voltage
,
integration_time
=
integration_time
,
integration_time
=
integration_time
,
gain_setting
=
0
,
gain_setting
=
0
,
temperature
=
temperature_k
,
temperature
=
temperature_k
,
pixels_x
=
1934
,
pixels_x
=
1934
,
pixels_y
=
960
,
photon_energy
=
photon_energy_gain_map
)
pixels_y
=
960
,
photon_energy
=
photon_energy_gain_map
)
device
=
Detectors
.
fastCCD1
device
=
Detectors
.
fastCCD1
metadata
.
detector_condition
=
condition
metadata
.
detector_condition
=
condition
# specify the a version for this constant
# specify the a version for this constant
metadata
.
calibration_constant_version
=
Versions
.
Now
(
device
=
device
)
metadata
.
calibration_constant_version
=
Versions
.
Now
(
device
=
device
)
metadata
.
retrieve
(
cal_db_interface
)
metadata
.
retrieve
(
cal_db_interface
)
relGain
=
relgain
.
data
[::
-
1
,...]
relGain
=
relgain
.
data
[::
-
1
,...]
```
```
%% Cell type:code id: tags:
%% Cell type:code id: tags:
```
python
```
python
relGainCA
=
copy
.
copy
(
relGain
)
relGainCA
=
copy
.
copy
(
relGain
)
relGainC
=
relGainCA
[:
relGainCA
.
shape
[
0
]
//
2
,...]
relGainC
=
relGainCA
[:
relGainCA
.
shape
[
0
]
//
2
,...]
ctiA
=
np
.
ones
(
relGainCA
.
shape
[:
2
])
ctiA
=
np
.
ones
(
relGainCA
.
shape
[:
2
])
cti
=
np
.
ones
(
relGainC
.
shape
[:
2
])
cti
=
np
.
ones
(
relGainC
.
shape
[:
2
])
i
=
0
i
=
0
idx
=
(
relGainC
[
i
,
:,
0
]
<
0.9
)
|
(
relGainC
[
i
,:,
0
]
>
1.1
)
idx
=
(
relGainC
[
i
,
:,
0
]
<
0.9
)
|
(
relGainC
[
i
,:,
0
]
>
1.1
)
mn1
=
np
.
nanmean
(
relGainC
[
i
,
~
idx
,
0
])
mn1
=
np
.
nanmean
(
relGainC
[
i
,
~
idx
,
0
])
for
i
in
range
(
1
,
relGainC
.
shape
[
0
]):
for
i
in
range
(
1
,
relGainC
.
shape
[
0
]):
idx
=
(
relGainC
[
i
,
:,
0
]
<
0.9
)
|
(
relGainC
[
i
,:,
0
]
>
1.1
)
idx
=
(
relGainC
[
i
,
:,
0
]
<
0.9
)
|
(
relGainC
[
i
,:,
0
]
>
1.1
)
mn2
=
np
.
nanmean
(
relGainC
[
i
,
~
idx
,
0
])
mn2
=
np
.
nanmean
(
relGainC
[
i
,
~
idx
,
0
])
cti
[
i
,:]
=
mn2
/
mn1
cti
[
i
,:]
=
mn2
/
mn1
ctiA
[:
relGainCA
.
shape
[
0
]
//
2
,...]
=
cti
ctiA
[:
relGainCA
.
shape
[
0
]
//
2
,...]
=
cti
relGainC
=
relGainCA
[
relGainCA
.
shape
[
0
]
//
2
:,...]
relGainC
=
relGainCA
[
relGainCA
.
shape
[
0
]
//
2
:,...]
cti
=
np
.
ones
(
relGainC
.
shape
[:
2
])
cti
=
np
.
ones
(
relGainC
.
shape
[:
2
])
i
=
-
1
i
=
-
1
idx
=
(
relGainC
[
i
,
:,
0
]
<
0.9
)
|
(
relGainC
[
i
,:,
0
]
>
1.1
)
idx
=
(
relGainC
[
i
,
:,
0
]
<
0.9
)
|
(
relGainC
[
i
,:,
0
]
>
1.1
)
mn1
=
np
.
nanmean
(
relGainC
[
i
,
~
idx
,
0
])
mn1
=
np
.
nanmean
(
relGainC
[
i
,
~
idx
,
0
])
for
i
in
range
(
relGainC
.
shape
[
0
]
-
1
,
1
,
-
1
):
for
i
in
range
(
relGainC
.
shape
[
0
]
-
1
,
1
,
-
1
):
idx
=
(
relGainC
[
i
,
:,
0
]
<
0.9
)
|
(
relGainC
[
i
,:,
0
]
>
1.1
)
idx
=
(
relGainC
[
i
,
:,
0
]
<
0.9
)
|
(
relGainC
[
i
,:,
0
]
>
1.1
)
mn2
=
np
.
nanmean
(
relGainC
[
i
,
~
idx
,
0
])
mn2
=
np
.
nanmean
(
relGainC
[
i
,
~
idx
,
0
])
cti
[
i
,:]
=
mn2
/
mn1
cti
[
i
,:]
=
mn2
/
mn1
ctiA
[
relGainCA
.
shape
[
0
]
//
2
:,...]
=
cti
ctiA
[
relGainCA
.
shape
[
0
]
//
2
:,...]
=
cti
relGainCA
=
copy
.
copy
(
relGain
)
relGainCA
=
copy
.
copy
(
relGain
)
relGainC
=
relGainCA
[:
relGainCA
.
shape
[
0
]
//
2
,...]
relGainC
=
relGainCA
[:
relGainCA
.
shape
[
0
]
//
2
,...]
for
i
in
range
(
relGainC
.
shape
[
1
]):
for
i
in
range
(
relGainC
.
shape
[
1
]):
idx
=
(
relGainC
[:,
i
,
0
]
<
0.95
)
|
(
relGainC
[:,
i
,
0
]
>
1.05
)
idx
=
(
relGainC
[:,
i
,
0
]
<
0.95
)
|
(
relGainC
[:,
i
,
0
]
>
1.05
)
relGainC
[
idx
,
i
,
0
]
=
np
.
nanmean
(
relGainC
[
~
idx
,
i
,
0
])
relGainC
[
idx
,
i
,
0
]
=
np
.
nanmean
(
relGainC
[
~
idx
,
i
,
0
])
relGainC
[
idx
,
i
,
1
]
=
np
.
nanmean
(
relGainC
[
~
idx
,
i
,
1
])
relGainC
[
idx
,
i
,
1
]
=
np
.
nanmean
(
relGainC
[
~
idx
,
i
,
1
])
relGainC
[
idx
,
i
,
2
]
=
np
.
nanmean
(
relGainC
[
~
idx
,
i
,
2
])
relGainC
[
idx
,
i
,
2
]
=
np
.
nanmean
(
relGainC
[
~
idx
,
i
,
2
])
relGainCA
[:
relGainCA
.
shape
[
0
]
//
2
,...]
=
relGainC
relGainCA
[:
relGainCA
.
shape
[
0
]
//
2
,...]
=
relGainC
relGainC
=
relGainCA
[
relGainCA
.
shape
[
0
]
//
2
:,...]
relGainC
=
relGainCA
[
relGainCA
.
shape
[
0
]
//
2
:,...]
for
i
in
range
(
relGainC
.
shape
[
1
]):
for
i
in
range
(
relGainC
.
shape
[
1
]):
idx
=
(
relGainC
[:,
i
,
0
]
<
0.95
)
|
(
relGainC
[:,
i
,
0
]
>
1.05
)
idx
=
(
relGainC
[:,
i
,
0
]
<
0.95
)
|
(
relGainC
[:,
i
,
0
]
>
1.05
)
relGainC
[
idx
,
i
,
0
]
=
np
.
nanmean
(
relGainC
[
~
idx
,
i
,
0
])
relGainC
[
idx
,
i
,
0
]
=
np
.
nanmean
(
relGainC
[
~
idx
,
i
,
0
])
relGainC
[
idx
,
i
,
1
]
=
np
.
nanmean
(
relGainC
[
~
idx
,
i
,
1
])
relGainC
[
idx
,
i
,
1
]
=
np
.
nanmean
(
relGainC
[
~
idx
,
i
,
1
])
relGainC
[
idx
,
i
,
2
]
=
np
.
nanmean
(
relGainC
[
~
idx
,
i
,
2
])
relGainC
[
idx
,
i
,
2
]
=
np
.
nanmean
(
relGainC
[
~
idx
,
i
,
2
])
relGainCA
[
relGainCA
.
shape
[
0
]
//
2
:,...]
=
relGainC
relGainCA
[
relGainCA
.
shape
[
0
]
//
2
:,...]
=
relGainC
relGainC
=
relGainCA
*
ctiA
[...,
None
]
relGainC
=
relGainCA
*
ctiA
[...,
None
]
relGain
=
relGainC
relGain
=
relGainC
```
```
%% Cell type:code id: tags:
%% Cell type:code id: tags:
```
python
```
python
import
dateutil.parser
import
dateutil.parser
flipped_between
=
[
dateutil
.
parser
.
parse
(
d
)
for
d
in
flipped_between
]
flipped_between
=
[
dateutil
.
parser
.
parse
(
d
)
for
d
in
flipped_between
]
flip_rgain
=
creation_time
>=
flipped_between
[
0
]
and
creation_time
<=
flipped_between
[
1
]
flip_rgain
=
creation_time
>=
flipped_between
[
0
]
and
creation_time
<=
flipped_between
[
1
]
flip_rgain
&=
(
metadata
.
calibration_constant_version
.
begin_at
.
replace
(
tzinfo
=
None
)
>=
flipped_between
[
0
]
flip_rgain
&=
(
metadata
.
calibration_constant_version
.
begin_at
.
replace
(
tzinfo
=
None
)
>=
flipped_between
[
0
]
and
metadata
.
calibration_constant_version
.
begin_at
.
replace
(
tzinfo
=
None
)
<=
flipped_between
[
1
])
and
metadata
.
calibration_constant_version
.
begin_at
.
replace
(
tzinfo
=
None
)
<=
flipped_between
[
1
])
print
(
"
Accounting for flipped detector: {}
"
.
format
(
flip_rgain
))
print
(
"
Accounting for flipped detector: {}
"
.
format
(
flip_rgain
))
```
```
%% Cell type:code id: tags:
%% Cell type:code id: tags:
```
python
```
python
#************************Calculators************************#
#************************Calculators************************#
cmCorrection
=
xcal
.
CommonModeCorrection
([
x
,
y
],
cmCorrection
=
xcal
.
CommonModeCorrection
([
x
,
y
],
commonModeBlockSize
,
commonModeBlockSize
,
commonModeAxisR
,
commonModeAxisR
,
nCells
=
memoryCells
,
nCells
=
memoryCells
,
noiseMap
=
noiseMap
,
noiseMap
=
noiseMap
,
runParallel
=
True
,
runParallel
=
True
,
stats
=
True
)
stats
=
True
)
patternClassifierLH
=
xcal
.
PatternClassifier
([
x
//
2
,
y
],
patternClassifierLH
=
xcal
.
PatternClassifier
([
x
//
2
,
y
],
noiseMap
[:
x
//
2
,
:],
noiseMap
[:
x
//
2
,
:],
split_evt_primary_threshold
,
split_evt_primary_threshold
,
split_evt_secondary_threshold
,
split_evt_secondary_threshold
,
split_evt_mip_threshold
,
split_evt_mip_threshold
,
tagFirstSingles
=
0
,
tagFirstSingles
=
0
,
nCells
=
memoryCells
,
nCells
=
memoryCells
,
cores
=
cpuCores
,
cores
=
cpuCores
,
allowElongated
=
False
,
allowElongated
=
False
,
blockSize
=
[
x
//
2
,
y
],
blockSize
=
[
x
//
2
,
y
],
runParallel
=
True
)
runParallel
=
True
)
patternClassifierUH
=
xcal
.
PatternClassifier
([
x
//
2
,
y
],
patternClassifierUH
=
xcal
.
PatternClassifier
([
x
//
2
,
y
],
noiseMap
[
x
//
2
:,
:],
noiseMap
[
x
//
2
:,
:],
split_evt_primary_threshold
,
split_evt_primary_threshold
,
split_evt_secondary_threshold
,
split_evt_secondary_threshold
,
split_evt_mip_threshold
,
split_evt_mip_threshold
,
tagFirstSingles
=
0
,
tagFirstSingles
=
0
,
nCells
=
memoryCells
,
nCells
=
memoryCells
,
cores
=
cpuCores
,
cores
=
cpuCores
,
allowElongated
=
False
,
allowElongated
=
False
,
blockSize
=
[
x
//
2
,
y
],
blockSize
=
[
x
//
2
,
y
],
runParallel
=
True
)
runParallel
=
True
)
```
```
%% Cell type:code id: tags:
%% Cell type:code id: tags:
```
python
```
python
#*****************Histogram Calculators******************#
#*****************Histogram Calculators******************#
histCalOffsetCor
=
xcal
.
HistogramCalculator
([
x
,
y
],
histCalOffsetCor
=
xcal
.
HistogramCalculator
([
x
,
y
],
bins
=
500
,
bins
=
500
,
range
=
[
-
50
,
1000
],
range
=
[
-
50
,
1000
],
nCells
=
memoryCells
,
nCells
=
memoryCells
,
cores
=
cpuCores
,
cores
=
cpuCores
,
blockSize
=
blockSize
)
blockSize
=
blockSize
)
histCalPcorr
=
xcal
.
HistogramCalculator
([
x
,
y
],
histCalPcorr
=
xcal
.
HistogramCalculator
([
x
,
y
],
bins
=
500
,
bins
=
500
,
range
=
[
-
50
,
1000
],
range
=
[
-
50
,
1000
],
nCells
=
memoryCells
,
nCells
=
memoryCells
,
cores
=
cpuCores
,
cores
=
cpuCores
,
blockSize
=
blockSize
)
blockSize
=
blockSize
)
histCalPcorrS
=
xcal
.
HistogramCalculator
([
x
,
y
],
histCalPcorrS
=
xcal
.
HistogramCalculator
([
x
,
y
],
bins
=
500
,
bins
=
500
,
range
=
[
-
50
,
1000
],
range
=
[
-
50
,
1000
],
nCells
=
memoryCells
,
nCells
=
memoryCells
,
cores
=
cpuCores
,
cores
=
cpuCores
,
blockSize
=
blockSize
)
blockSize
=
blockSize
)
```
```
%% Cell type:markdown id: tags:
%% Cell type:markdown id: tags:
Applying corrections
Applying corrections
%% Cell type:code id: tags:
%% Cell type:code id: tags:
```
python
```
python
patternClassifierLH
.
_imagesPerChunk
=
500
patternClassifierLH
.
_imagesPerChunk
=
500
patternClassifierUH
.
_imagesPerChunk
=
500
patternClassifierUH
.
_imagesPerChunk
=
500
patternClassifierLH
.
debug
()
patternClassifierLH
.
debug
()
patternClassifierUH
.
debug
()
patternClassifierUH
.
debug
()
```
```
%% Cell type:code id: tags:
%% Cell type:code id: tags:
```
python
```
python
histCalOffsetCor
.
debug
()
histCalOffsetCor
.
debug
()
histCalPcorr
.
debug
()
histCalPcorr
.
debug
()
```
```
%% Cell type:code id: tags:
%% Cell type:code id: tags:
```
python
```
python
def
copy_and_sanitize_non_cal_data
(
infile
,
outfile
,
h5base
):
def
copy_and_sanitize_non_cal_data
(
infile
,
outfile
,
h5base
):
if
h5base
.
startswith
(
"
/
"
):
if
h5base
.
startswith
(
"
/
"
):
h5base
=
h5base
[
1
:]
h5base
=
h5base
[
1
:]
dont_copy
=
[
'
pixels
'
]
dont_copy
=
[
'
pixels
'
]
dont_copy
=
[
h5base
+
"
/{}
"
.
format
(
do
)
dont_copy
=
[
h5base
+
"
/{}
"
.
format
(
do
)
for
do
in
dont_copy
]
for
do
in
dont_copy
]
def
visitor
(
k
,
item
):
def
visitor
(
k
,
item
):
if
k
not
in
dont_copy
:
if
k
not
in
dont_copy
:
if
isinstance
(
item
,
h5py
.
Group
):
if
isinstance
(
item
,
h5py
.
Group
):
outfile
.
create_group
(
k
)
outfile
.
create_group
(
k
)
elif
isinstance
(
item
,
h5py
.
Dataset
):
elif
isinstance
(
item
,
h5py
.
Dataset
):
group
=
str
(
k
).
split
(
"
/
"
)
group
=
str
(
k
).
split
(
"
/
"
)
group
=
"
/
"
.
join
(
group
[:
-
1
])
group
=
"
/
"
.
join
(
group
[:
-
1
])
infile
.
copy
(
k
,
outfile
[
group
])
infile
.
copy
(
k
,
outfile
[
group
])
infile
.
visititems
(
visitor
)
infile
.
visititems
(
visitor
)
```
```
%% Cell type:code id: tags:
%% Cell type:code id: tags:
```
python
```
python
mean_im
=
None
mean_im
=
None
single_im
=
None
single_im
=
None
mean_im_cc
=
None
mean_im_cc
=
None
single_im_cc
=
None
single_im_cc
=
None
drift_lh
=
[]
drift_lh
=
[]
drift_uh
=
[]
drift_uh
=
[]
offsetMap
=
np
.
squeeze
(
offsetMap
)
offsetMap
=
np
.
squeeze
(
offsetMap
)
noiseMap
=
np
.
squeeze
(
noiseMap
)
noiseMap
=
np
.
squeeze
(
noiseMap
)
badPixelMap
=
np
.
squeeze
(
badPixelMap
)
badPixelMap
=
np
.
squeeze
(
badPixelMap
)
relGain
=
np
.
squeeze
(
relGain
)
relGain
=
np
.
squeeze
(
relGain
)
for
k
,
f
in
enumerate
(
file_list
):
for
k
,
f
in
enumerate
(
file_list
):
with
h5py
.
File
(
f
,
'
r
'
,
driver
=
'
core
'
)
as
infile
:
with
h5py
.
File
(
f
,
'
r
'
,
driver
=
'
core
'
)
as
infile
:
out_fileb
=
"
{}/{}
"
.
format
(
out_folder
,
f
.
split
(
"
/
"
)[
-
1
])
out_fileb
=
"
{}/{}
"
.
format
(
out_folder
,
f
.
split
(
"
/
"
)[
-
1
])
out_file
=
out_fileb
.
replace
(
"
RAW
"
,
"
CORR
"
)
out_file
=
out_fileb
.
replace
(
"
RAW
"
,
"
CORR
"
)
#out_filed = out_fileb.replace("RAW", "CORR-SC")
#out_filed = out_fileb.replace("RAW", "CORR-SC")
data
=
None
data
=
None
noise
=
None
noise
=
None
try
:
try
:
with
h5py
.
File
(
out_file
,
"
w
"
)
as
ofile
:
with
h5py
.
File
(
out_file
,
"
w
"
)
as
ofile
:
copy_and_sanitize_non_cal_data
(
infile
,
ofile
,
h5path
)
copy_and_sanitize_non_cal_data
(
infile
,
ofile
,
h5path
)
data
=
infile
[
h5path
+
"
/pixels
"
][()]
data
=
infile
[
h5path
+
"
/pixels
"
][()]
nzidx
=
np
.
count_nonzero
(
data
,
axis
=
(
1
,
2
))
nzidx
=
np
.
count_nonzero
(
data
,
axis
=
(
1
,
2
))
data
=
data
[
nzidx
!=
0
,
...]
data
=
data
[
nzidx
!=
0
,
...]
if
limit_images
>
0
:
if
limit_images
>
0
:
data
=
data
[:
limit_images
,...]
data
=
data
[:
limit_images
,...]
oshape
=
data
.
shape
oshape
=
data
.
shape
data
=
np
.
moveaxis
(
data
,
0
,
2
)
data
=
np
.
moveaxis
(
data
,
0
,
2
)
ddset
=
ofile
.
create_dataset
(
h5path
+
"
/pixels
"
,
ddset
=
ofile
.
create_dataset
(
h5path
+
"
/pixels
"
,
oshape
,
oshape
,
chunks
=
(
chunk_size_idim
,
oshape
[
1
],
oshape
[
2
]),
chunks
=
(
chunk_size_idim
,
oshape
[
1
],
oshape
[
2
]),
dtype
=
np
.
float32
)
dtype
=
np
.
float32
)
ddsetm
=
ofile
.
create_dataset
(
h5path
+
"
/mask
"
,
ddsetm
=
ofile
.
create_dataset
(
h5path
+
"
/mask
"
,
oshape
,
oshape
,
chunks
=
(
chunk_size_idim
,
oshape
[
1
],
oshape
[
2
]),
chunks
=
(
chunk_size_idim
,
oshape
[
1
],
oshape
[
2
]),
dtype
=
np
.
uint32
,
compression
=
"
gzip
"
)
dtype
=
np
.
uint32
,
compression
=
"
gzip
"
)
ddsetg
=
ofile
.
create_dataset
(
h5path
+
"
/gain
"
,
ddsetg
=
ofile
.
create_dataset
(
h5path
+
"
/gain
"
,
oshape
,
oshape
,
chunks
=
(
chunk_size_idim
,
oshape
[
1
],
oshape
[
2
]),
chunks
=
(
chunk_size_idim
,
oshape
[
1
],
oshape
[
2
]),
dtype
=
np
.
uint8
,
compression
=
"
gzip
"
)
dtype
=
np
.
uint8
,
compression
=
"
gzip
"
)
gain
=
np
.
right_shift
(
data
,
14
)
gain
=
np
.
right_shift
(
data
,
14
)
gain
[
gain
!=
0
]
-=
1
gain
[
gain
!=
0
]
-=
1
fstride
=
1
fstride
=
1
if
not
flip_rgain
:
# rgain was taken during flipped orientation
if
not
flip_rgain
:
# rgain was taken during flipped orientation
fstride
=
-
1
fstride
=
-
1
data
=
np
.
bitwise_and
(
data
,
0b0011111111111111
).
astype
(
np
.
float32
)
data
=
np
.
bitwise_and
(
data
,
0b0011111111111111
).
astype
(
np
.
float32
)
omap
=
np
.
repeat
(
offsetMap
[...,
None
,:],
data
.
shape
[
2
],
axis
=
2
)
omap
=
np
.
repeat
(
offsetMap
[...,
None
,:],
data
.
shape
[
2
],
axis
=
2
)
rmap
=
np
.
repeat
(
relGain
[:,::
fstride
,
None
,:],
data
.
shape
[
2
],
axis
=
2
)
rmap
=
np
.
repeat
(
relGain
[:,::
fstride
,
None
,:],
data
.
shape
[
2
],
axis
=
2
)
nmap
=
np
.
repeat
(
noiseMap
[...,
None
,:],
data
.
shape
[
2
],
axis
=
2
)
nmap
=
np
.
repeat
(
noiseMap
[...,
None
,:],
data
.
shape
[
2
],
axis
=
2
)
bmap
=
np
.
repeat
(
badPixelMap
[...,
None
,:],
data
.
shape
[
2
],
axis
=
2
)
bmap
=
np
.
repeat
(
badPixelMap
[...,
None
,:],
data
.
shape
[
2
],
axis
=
2
)
offset
=
np
.
choose
(
gain
,
(
omap
[...,
0
],
omap
[...,
1
],
omap
[...,
2
]))
offset
=
np
.
choose
(
gain
,
(
omap
[...,
0
],
omap
[...,
1
],
omap
[...,
2
]))
rg
=
np
.
choose
(
gain
,
(
rmap
[...,
0
],
rmap
[...,
1
],
rmap
[...,
2
]))
rg
=
np
.
choose
(
gain
,
(
rmap
[...,
0
],
rmap
[...,
1
],
rmap
[...,
2
]))
noise
=
np
.
choose
(
gain
,
(
nmap
[...,
0
],
nmap
[...,
1
],
nmap
[...,
2
]))
noise
=
np
.
choose
(
gain
,
(
nmap
[...,
0
],
nmap
[...,
1
],
nmap
[...,
2
]))
bpix
=
np
.
choose
(
gain
,
(
bmap
[...,
0
],
bmap
[...,
1
],
bmap
[...,
2
]))
bpix
=
np
.
choose
(
gain
,
(
bmap
[...,
0
],
bmap
[...,
1
],
bmap
[...,
2
]))
data
-=
offset
data
-=
offset
data
*=
rg
data
*=
rg
if
correct_offset_drift
:
if
correct_offset_drift
:
lhd
=
np
.
mean
(
data
[
x
//
2
-
10
:
x
//
2
,
y
//
2
-
5
:
y
//
2
+
5
,:],
axis
=
(
0
,
1
))
lhd
=
np
.
mean
(
data
[
x
//
2
-
10
:
x
//
2
,
y
//
2
-
5
:
y
//
2
+
5
,:],
axis
=
(
0
,
1
))
data
[:
x
//
2
,
:,
:]
-=
lhd
data
[:
x
//
2
,
:,
:]
-=
lhd
drift_lh
.
append
(
lhd
)
drift_lh
.
append
(
lhd
)
uhd
=
np
.
mean
(
data
[
x
//
2
:
x
//
2
+
10
,
y
//
2
-
5
:
y
//
2
+
5
,:],
axis
=
(
0
,
1
))
uhd
=
np
.
mean
(
data
[
x
//
2
:
x
//
2
+
10
,
y
//
2
-
5
:
y
//
2
+
5
,:],
axis
=
(
0
,
1
))
data
[
x
//
2
:,
:,
:]
-=
uhd
data
[
x
//
2
:,
:,
:]
-=
uhd
drift_uh
.
append
(
lhd
)
drift_uh
.
append
(
lhd
)
histCalOffsetCor
.
fill
(
data
)
histCalOffsetCor
.
fill
(
data
)
ddset
[...]
=
np
.
moveaxis
(
data
,
2
,
0
)
ddset
[...]
=
np
.
moveaxis
(
data
,
2
,
0
)
ddsetm
[...]
=
np
.
moveaxis
(
bpix
,
2
,
0
)
ddsetm
[...]
=
np
.
moveaxis
(
bpix
,
2
,
0
)
ddsetg
[...]
=
np
.
moveaxis
(
gain
,
2
,
0
).
astype
(
np
.
uint8
)
ddsetg
[...]
=
np
.
moveaxis
(
gain
,
2
,
0
).
astype
(
np
.
uint8
)
if
mean_im
is
None
:
if
mean_im
is
None
:
mean_im
=
np
.
nanmean
(
data
,
axis
=
2
)
mean_im
=
np
.
nanmean
(
data
,
axis
=
2
)
single_im
=
data
[...,
0
]
single_im
=
data
[...,
0
]
if
do_pattern_classification
:
if
do_pattern_classification
:
ddsetcm
=
ofile
.
create_dataset
(
h5path
+
"
/pixels_cm
"
,
ddsetcm
=
ofile
.
create_dataset
(
h5path
+
"
/pixels_cm
"
,
oshape
,
oshape
,
chunks
=
(
chunk_size_idim
,
oshape
[
1
],
oshape
[
2
]),
chunks
=
(
chunk_size_idim
,
oshape
[
1
],
oshape
[
2
]),
dtype
=
np
.
float32
)
dtype
=
np
.
float32
)
ddsetc
=
ofile
.
create_dataset
(
h5path
+
"
/pixels_classified
"
,
ddsetc
=
ofile
.
create_dataset
(
h5path
+
"
/pixels_classified
"
,
oshape
,
oshape
,
chunks
=
(
chunk_size_idim
,
oshape
[
1
],
oshape
[
2
]),
chunks
=
(
chunk_size_idim
,
oshape
[
1
],
oshape
[
2
]),
dtype
=
np
.
float32
,
compression
=
"
gzip
"
)
dtype
=
np
.
float32
,
compression
=
"
gzip
"
)
ddsetp
=
ofile
.
create_dataset
(
h5path
+
"
/patterns
"
,
ddsetp
=
ofile
.
create_dataset
(
h5path
+
"
/patterns
"
,
oshape
,
oshape
,
chunks
=
(
chunk_size_idim
,
oshape
[
1
],
oshape
[
2
]),
chunks
=
(
chunk_size_idim
,
oshape
[
1
],
oshape
[
2
]),
dtype
=
np
.
int32
,
compression
=
"
gzip
"
)
dtype
=
np
.
int32
,
compression
=
"
gzip
"
)
patternClassifierLH
.
_noisemap
=
noise
[:
x
//
2
,
:,
:]
patternClassifierLH
.
_noisemap
=
noise
[:
x
//
2
,
:,
:]
patternClassifierUH
.
_noisemap
=
noise
[
x
//
2
:,
:,
:]
patternClassifierUH
.
_noisemap
=
noise
[
x
//
2
:,
:,
:]
data
=
cmCorrection
.
correct
(
data
)
# correct for the row common mode
data
=
cmCorrection
.
correct
(
data
)
# correct for the row common mode
ddsetcm
[...]
=
np
.
moveaxis
(
data
,
2
,
0
)
ddsetcm
[...]
=
np
.
moveaxis
(
data
,
2
,
0
)
dataLH
=
data
[:
x
//
2
,
:,
:]
dataLH
=
data
[:
x
//
2
,
:,
:]
dataUH
=
data
[
x
//
2
:,
:,
:]
dataUH
=
data
[
x
//
2
:,
:,
:]
dataLH
,
patternsLH
=
patternClassifierLH
.
classify
(
dataLH
)
dataLH
,
patternsLH
=
patternClassifierLH
.
classify
(
dataLH
)
dataUH
,
patternsUH
=
patternClassifierUH
.
classify
(
dataUH
)
dataUH
,
patternsUH
=
patternClassifierUH
.
classify
(
dataUH
)
data
[:
x
//
2
,
:,
:]
=
dataLH
data
[:
x
//
2
,
:,
:]
=
dataLH
data
[
x
//
2
:,
:,
:]
=
dataUH
data
[
x
//
2
:,
:,
:]
=
dataUH
patterns
=
np
.
zeros
(
data
.
shape
,
patternsLH
.
dtype
)
patterns
=
np
.
zeros
(
data
.
shape
,
patternsLH
.
dtype
)
patterns
[:
x
//
2
,
:,
:]
=
patternsLH
patterns
[:
x
//
2
,
:,
:]
=
patternsLH
patterns
[
x
//
2
:,
:,
:]
=
patternsUH
patterns
[
x
//
2
:,
:,
:]
=
patternsUH
data
[
data
<
split_evt_primary_threshold
*
noise
]
=
0
data
[
data
<
split_evt_primary_threshold
*
noise
]
=
0
ddsetc
[...]
=
np
.
moveaxis
(
data
,
2
,
0
)
ddsetc
[...]
=
np
.
moveaxis
(
data
,
2
,
0
)
ddsetp
[...]
=
np
.
moveaxis
(
patterns
,
2
,
0
)
ddsetp
[...]
=
np
.
moveaxis
(
patterns
,
2
,
0
)
histCalPcorr
.
fill
(
data
)
histCalPcorr
.
fill
(
data
)
data
[
patterns
!=
100
]
=
np
.
nan
data
[
patterns
!=
100
]
=
np
.
nan
histCalPcorrS
.
fill
(
data
)
histCalPcorrS
.
fill
(
data
)
if
mean_im_cc
is
None
:
if
mean_im_cc
is
None
:
mean_im_cc
=
np
.
nanmean
(
data
,
axis
=
2
)
mean_im_cc
=
np
.
nanmean
(
data
,
axis
=
2
)
single_im_cc
=
data
[...,
0
]
single_im_cc
=
data
[...,
0
]
except
Exception
as
e
:
except
Exception
as
e
:
print
(
"
Couldn
'
t calibrate data in {}: {}
"
.
format
(
f
,
e
))
print
(
"
Couldn
'
t calibrate data in {}: {}
"
.
format
(
f
,
e
))
```
```
%% Cell type:code id: tags:
%% Cell type:code id: tags:
```
python
```
python
if
correct_offset_drift
:
if
correct_offset_drift
:
lhds
=
np
.
concatenate
(
drift_lh
)
lhds
=
np
.
concatenate
(
drift_lh
)
uhds
=
np
.
concatenate
(
drift_uh
)
uhds
=
np
.
concatenate
(
drift_uh
)
fig
=
plt
.
figure
(
figsize
=
(
10
,
5
))
fig
=
plt
.
figure
(
figsize
=
(
10
,
5
))
ax
=
fig
.
add_subplot
(
111
)
ax
=
fig
.
add_subplot
(
111
)
ax
.
plot
(
lhds
,
label
=
"
Lower hem.
"
)
ax
.
plot
(
lhds
,
label
=
"
Lower hem.
"
)
ax
.
plot
(
uhds
,
label
=
"
Upper hem.
"
)
ax
.
plot
(
uhds
,
label
=
"
Upper hem.
"
)
ax
.
set_xlabel
(
"
Frame #
"
)
ax
.
set_xlabel
(
"
Frame #
"
)
ax
.
set_xlabel
(
"
Offset drift (ADU)
"
)
ax
.
set_xlabel
(
"
Offset drift (ADU)
"
)
```
```
%% Cell type:code id: tags:
%% Cell type:code id: tags:
```
python
```
python
if
do_pattern_classification
:
if
do_pattern_classification
:
print
(
"
******************LOWER HEMISPHERE******************
\n
"
)
print
(
"
******************LOWER HEMISPHERE******************
\n
"
)
patternStatsLH
=
patternClassifierLH
.
getPatternStats
()
patternStatsLH
=
patternClassifierLH
.
getPatternStats
()
fig
=
plt
.
figure
(
figsize
=
(
15
,
15
))
fig
=
plt
.
figure
(
figsize
=
(
15
,
15
))
ax
=
fig
.
add_subplot
(
4
,
4
,
1
)
ax
=
fig
.
add_subplot
(
4
,
4
,
1
)
sfields
=
[
"
singles
"
,
"
first singles
"
,
"
clusters
"
]
sfields
=
[
"
singles
"
,
"
first singles
"
,
"
clusters
"
]
mfields
=
[
"
doubles
"
,
"
triples
"
,
"
quads
"
]
mfields
=
[
"
doubles
"
,
"
triples
"
,
"
quads
"
]
relativeOccurances
=
[]
relativeOccurances
=
[]
labels
=
[]
labels
=
[]
for
i
,
f
in
enumerate
(
sfields
):
for
i
,
f
in
enumerate
(
sfields
):
relativeOccurances
.
append
(
patternStatsLH
[
f
])
relativeOccurances
.
append
(
patternStatsLH
[
f
])
labels
.
append
(
f
)
labels
.
append
(
f
)
for
i
,
f
in
enumerate
(
mfields
):
for
i
,
f
in
enumerate
(
mfields
):
for
k
in
range
(
len
(
patternStatsLH
[
f
])):
for
k
in
range
(
len
(
patternStatsLH
[
f
])):
relativeOccurances
.
append
(
patternStatsLH
[
f
][
k
])
relativeOccurances
.
append
(
patternStatsLH
[
f
][
k
])
labels
.
append
(
f
+
"
(
"
+
str
(
k
)
+
"
)
"
)
labels
.
append
(
f
+
"
(
"
+
str
(
k
)
+
"
)
"
)
relativeOccurances
=
np
.
array
(
relativeOccurances
,
np
.
float
)
relativeOccurances
=
np
.
array
(
relativeOccurances
,
np
.
float
)
relativeOccurances
/=
np
.
sum
(
relativeOccurances
)
relativeOccurances
/=
np
.
sum
(
relativeOccurances
)
pie
=
ax
.
pie
(
relativeOccurances
,
labels
=
labels
,
autopct
=
'
%1.1f%%
'
,
shadow
=
True
)
pie
=
ax
.
pie
(
relativeOccurances
,
labels
=
labels
,
autopct
=
'
%1.1f%%
'
,
shadow
=
True
)
ax
.
set_title
(
"
Pattern occurrence
"
)
ax
.
set_title
(
"
Pattern occurrence
"
)
# Set aspect ratio to be equal so that pie is drawn as a circle.
# Set aspect ratio to be equal so that pie is drawn as a circle.
a
=
ax
.
axis
(
'
equal
'
)
a
=
ax
.
axis
(
'
equal
'
)
smaps
=
[
"
singlemap
"
,
"
firstsinglemap
"
,
"
clustermap
"
]
smaps
=
[
"
singlemap
"
,
"
firstsinglemap
"
,
"
clustermap
"
]
for
i
,
m
in
enumerate
(
smaps
):
for
i
,
m
in
enumerate
(
smaps
):
ax
=
fig
.
add_subplot
(
4
,
4
,
2
+
i
)
ax
=
fig
.
add_subplot
(
4
,
4
,
2
+
i
)
pmap
=
ax
.
imshow
(
patternStatsLH
[
m
],
interpolation
=
"
nearest
"
,
vmax
=
2
*
np
.
nanmedian
(
patternStatsLH
[
m
]))
pmap
=
ax
.
imshow
(
patternStatsLH
[
m
],
interpolation
=
"
nearest
"
,
vmax
=
2
*
np
.
nanmedian
(
patternStatsLH
[
m
]))
ax
.
set_title
(
m
)
ax
.
set_title
(
m
)
cb
=
fig
.
colorbar
(
pmap
)
cb
=
fig
.
colorbar
(
pmap
)
mmaps
=
[
"
doublemap
"
,
"
triplemap
"
,
"
quadmap
"
]
mmaps
=
[
"
doublemap
"
,
"
triplemap
"
,
"
quadmap
"
]
k
=
0
k
=
0
for
i
,
m
in
enumerate
(
mmaps
):
for
i
,
m
in
enumerate
(
mmaps
):
for
j
in
range
(
4
):
for
j
in
range
(
4
):
ax
=
fig
.
add_subplot
(
4
,
4
,
2
+
len
(
smaps
)
+
k
)
ax
=
fig
.
add_subplot
(
4
,
4
,
2
+
len
(
smaps
)
+
k
)
pmap
=
ax
.
imshow
(
patternStatsLH
[
m
][
j
],
interpolation
=
"
nearest
"
,
vmax
=
2
*
np
.
median
(
patternStatsLH
[
m
][
j
]))
pmap
=
ax
.
imshow
(
patternStatsLH
[
m
][
j
],
interpolation
=
"
nearest
"
,
vmax
=
2
*
np
.
median
(
patternStatsLH
[
m
][
j
]))
ax
.
set_title
(
m
+
"
(
"
+
str
(
j
)
+
"
)
"
)
ax
.
set_title
(
m
+
"
(
"
+
str
(
j
)
+
"
)
"
)
cb
=
fig
.
colorbar
(
pmap
)
cb
=
fig
.
colorbar
(
pmap
)
k
+=
1
k
+=
1
```
```
%% Cell type:code id: tags:
%% Cell type:code id: tags:
```
python
```
python
if
do_pattern_classification
:
if
do_pattern_classification
:
patternStatsUH
=
patternClassifierUH
.
getPatternStats
()
patternStatsUH
=
patternClassifierUH
.
getPatternStats
()
fig
=
plt
.
figure
(
figsize
=
(
15
,
15
))
fig
=
plt
.
figure
(
figsize
=
(
15
,
15
))
ax
=
fig
.
add_subplot
(
4
,
4
,
1
)
ax
=
fig
.
add_subplot
(
4
,
4
,
1
)
sfields
=
[
"
singles
"
,
"
first singles
"
,
"
clusters
"
]
sfields
=
[
"
singles
"
,
"
first singles
"
,
"
clusters
"
]
mfields
=
[
"
doubles
"
,
"
triples
"
,
"
quads
"
]
mfields
=
[
"
doubles
"
,
"
triples
"
,
"
quads
"
]
relativeOccurances
=
[]
relativeOccurances
=
[]
labels
=
[]
labels
=
[]
for
i
,
f
in
enumerate
(
sfields
):
for
i
,
f
in
enumerate
(
sfields
):
relativeOccurances
.
append
(
patternStatsUH
[
f
])
relativeOccurances
.
append
(
patternStatsUH
[
f
])
labels
.
append
(
f
)
labels
.
append
(
f
)
for
i
,
f
in
enumerate
(
mfields
):
for
i
,
f
in
enumerate
(
mfields
):
for
k
in
range
(
len
(
patternStatsUH
[
f
])):
for
k
in
range
(
len
(
patternStatsUH
[
f
])):
relativeOccurances
.
append
(
patternStatsUH
[
f
][
k
])
relativeOccurances
.
append
(
patternStatsUH
[
f
][
k
])
labels
.
append
(
f
+
"
(
"
+
str
(
k
)
+
"
)
"
)
labels
.
append
(
f
+
"
(
"
+
str
(
k
)
+
"
)
"
)
relativeOccurances
=
np
.
array
(
relativeOccurances
,
np
.
float
)
relativeOccurances
=
np
.
array
(
relativeOccurances
,
np
.
float
)
relativeOccurances
/=
np
.
sum
(
relativeOccurances
)
relativeOccurances
/=
np
.
sum
(
relativeOccurances
)
pie
=
ax
.
pie
(
relativeOccurances
,
labels
=
labels
,
autopct
=
'
%1.1f%%
'
,
shadow
=
True
)
pie
=
ax
.
pie
(
relativeOccurances
,
labels
=
labels
,
autopct
=
'
%1.1f%%
'
,
shadow
=
True
)
ax
.
set_title
(
"
Pattern occurrence
"
)
ax
.
set_title
(
"
Pattern occurrence
"
)
# Set aspect ratio to be equal so that pie is drawn as a circle.
# Set aspect ratio to be equal so that pie is drawn as a circle.
a
=
ax
.
axis
(
'
equal
'
)
a
=
ax
.
axis
(
'
equal
'
)
smaps
=
[
"
singlemap
"
,
"
firstsinglemap
"
,
"
clustermap
"
]
smaps
=
[
"
singlemap
"
,
"
firstsinglemap
"
,
"
clustermap
"
]
for
i
,
m
in
enumerate
(
smaps
):
for
i
,
m
in
enumerate
(
smaps
):
ax
=
fig
.
add_subplot
(
4
,
4
,
2
+
i
)
ax
=
fig
.
add_subplot
(
4
,
4
,
2
+
i
)
pmap
=
ax
.
imshow
(
patternStatsUH
[
m
],
interpolation
=
"
nearest
"
,
vmax
=
2
*
np
.
nanmedian
(
patternStatsUH
[
m
]))
pmap
=
ax
.
imshow
(
patternStatsUH
[
m
],
interpolation
=
"
nearest
"
,
vmax
=
2
*
np
.
nanmedian
(
patternStatsUH
[
m
]))
ax
.
set_title
(
m
)
ax
.
set_title
(
m
)
cb
=
fig
.
colorbar
(
pmap
)
cb
=
fig
.
colorbar
(
pmap
)
mmaps
=
[
"
doublemap
"
,
"
triplemap
"
,
"
quadmap
"
]
mmaps
=
[
"
doublemap
"
,
"
triplemap
"
,
"
quadmap
"
]
k
=
0
k
=
0
for
i
,
m
in
enumerate
(
mmaps
):
for
i
,
m
in
enumerate
(
mmaps
):
for
j
in
range
(
4
):
for
j
in
range
(
4
):
ax
=
fig
.
add_subplot
(
4
,
4
,
2
+
len
(
smaps
)
+
k
)
ax
=
fig
.
add_subplot
(
4
,
4
,
2
+
len
(
smaps
)
+
k
)
pmap
=
ax
.
imshow
(
patternStatsUH
[
m
][
j
],
interpolation
=
"
nearest
"
,
vmax
=
np
.
median
(
patternStatsUH
[
m
][
j
]))
pmap
=
ax
.
imshow
(
patternStatsUH
[
m
][
j
],
interpolation
=
"
nearest
"
,
vmax
=
np
.
median
(
patternStatsUH
[
m
][
j
]))
ax
.
set_title
(
m
+
"
(
"
+
str
(
j
)
+
"
)
"
)
ax
.
set_title
(
m
+
"
(
"
+
str
(
j
)
+
"
)
"
)
cb
=
fig
.
colorbar
(
pmap
)
cb
=
fig
.
colorbar
(
pmap
)
k
+=
1
k
+=
1
print
(
"
******************UPPER HEMISPHERE******************
\n
"
)
print
(
"
******************UPPER HEMISPHERE******************
\n
"
)
```
```
%% Cell type:code id: tags:
%% Cell type:code id: tags:
```
python
```
python
if
do_pattern_classification
:
if
do_pattern_classification
:
t0
=
PrettyTable
()
t0
=
PrettyTable
()
t0
.
title
=
"
Total number of Counts after all corrections
"
t0
.
title
=
"
Total number of Counts after all corrections
"
t0
.
field_names
=
[
"
Hemisphere
"
,
"
Singles
"
,
"
First-singles
"
,
"
Clusters
"
]
t0
.
field_names
=
[
"
Hemisphere
"
,
"
Singles
"
,
"
First-singles
"
,
"
Clusters
"
]
t0
.
add_row
([
"
LH
"
,
patternStatsLH
[
'
singles
'
],
patternStatsLH
[
'
first singles
'
],
patternStatsLH
[
'
clusters
'
]])
t0
.
add_row
([
"
LH
"
,
patternStatsLH
[
'
singles
'
],
patternStatsLH
[
'
first singles
'
],
patternStatsLH
[
'
clusters
'
]])
t0
.
add_row
([
"
UH
"
,
patternStatsUH
[
'
singles
'
],
patternStatsUH
[
'
first singles
'
],
patternStatsUH
[
'
clusters
'
]])
t0
.
add_row
([
"
UH
"
,
patternStatsUH
[
'
singles
'
],
patternStatsUH
[
'
first singles
'
],
patternStatsUH
[
'
clusters
'
]])
print
(
t0
)
print
(
t0
)
t1
=
PrettyTable
()
t1
=
PrettyTable
()
t1
.
field_names
=
[
"
Index
"
,
"
D-LH
"
,
"
D-UH
"
,
"
T-LH
"
,
"
T-UH
"
,
"
Q-LH
"
,
"
Q-UH
"
]
t1
.
field_names
=
[
"
Index
"
,
"
D-LH
"
,
"
D-UH
"
,
"
T-LH
"
,
"
T-UH
"
,
"
Q-LH
"
,
"
Q-UH
"
]
t1
.
add_row
([
0
,
patternStatsLH
[
'
doubles
'
][
0
],
patternStatsUH
[
'
doubles
'
][
0
],
patternStatsLH
[
'
triples
'
][
0
],
patternStatsUH
[
'
triples
'
][
0
],
patternStatsLH
[
'
quads
'
][
0
],
patternStatsUH
[
'
quads
'
][
0
]])
t1
.
add_row
([
0
,
patternStatsLH
[
'
doubles
'
][
0
],
patternStatsUH
[
'
doubles
'
][
0
],
patternStatsLH
[
'
triples
'
][
0
],
patternStatsUH
[
'
triples
'
][
0
],
patternStatsLH
[
'
quads
'
][
0
],
patternStatsUH
[
'
quads
'
][
0
]])
t1
.
add_row
([
1
,
patternStatsLH
[
'
doubles
'
][
1
],
patternStatsUH
[
'
doubles
'
][
1
],
patternStatsLH
[
'
triples
'
][
1
],
patternStatsUH
[
'
triples
'
][
1
],
patternStatsLH
[
'
quads
'
][
1
],
patternStatsUH
[
'
quads
'
][
1
]])
t1
.
add_row
([
1
,
patternStatsLH
[
'
doubles
'
][
1
],
patternStatsUH
[
'
doubles
'
][
1
],
patternStatsLH
[
'
triples
'
][
1
],
patternStatsUH
[
'
triples
'
][
1
],
patternStatsLH
[
'
quads
'
][
1
],
patternStatsUH
[
'
quads
'
][
1
]])
t1
.
add_row
([
2
,
patternStatsLH
[
'
doubles
'
][
2
],
patternStatsUH
[
'
doubles
'
][
2
],
patternStatsLH
[
'
triples
'
][
2
],
patternStatsUH
[
'
triples
'
][
2
],
patternStatsLH
[
'
quads
'
][
2
],
patternStatsUH
[
'
quads
'
][
2
]])
t1
.
add_row
([
2
,
patternStatsLH
[
'
doubles
'
][
2
],
patternStatsUH
[
'
doubles
'
][
2
],
patternStatsLH
[
'
triples
'
][
2
],
patternStatsUH
[
'
triples
'
][
2
],
patternStatsLH
[
'
quads
'
][
2
],
patternStatsUH
[
'
quads
'
][
2
]])
t1
.
add_row
([
3
,
patternStatsLH
[
'
doubles
'
][
3
],
patternStatsUH
[
'
doubles
'
][
3
],
patternStatsLH
[
'
triples
'
][
3
],
patternStatsUH
[
'
triples
'
][
3
],
patternStatsLH
[
'
quads
'
][
3
],
patternStatsUH
[
'
quads
'
][
3
]])
t1
.
add_row
([
3
,
patternStatsLH
[
'
doubles
'
][
3
],
patternStatsUH
[
'
doubles
'
][
3
],
patternStatsLH
[
'
triples
'
][
3
],
patternStatsUH
[
'
triples
'
][
3
],
patternStatsLH
[
'
quads
'
][
3
],
patternStatsUH
[
'
quads
'
][
3
]])
print
(
t1
)
print
(
t1
)
```
```
%% Cell type:code id: tags:
%% Cell type:code id: tags:
```
python
```
python
if
do_pattern_classification
:
if
do_pattern_classification
:
doublesLH
=
patternStatsLH
[
'
doubles
'
][
0
]
+
patternStatsLH
[
'
doubles
'
][
1
]
+
patternStatsLH
[
'
doubles
'
][
2
]
+
patternStatsLH
[
'
doubles
'
][
3
]
doublesLH
=
patternStatsLH
[
'
doubles
'
][
0
]
+
patternStatsLH
[
'
doubles
'
][
1
]
+
patternStatsLH
[
'
doubles
'
][
2
]
+
patternStatsLH
[
'
doubles
'
][
3
]
triplesLH
=
patternStatsLH
[
'
triples
'
][
0
]
+
patternStatsLH
[
'
triples
'
][
1
]
+
patternStatsLH
[
'
triples
'
][
2
]
+
patternStatsLH
[
'
triples
'
][
3
]
triplesLH
=
patternStatsLH
[
'
triples
'
][
0
]
+
patternStatsLH
[
'
triples
'
][
1
]
+
patternStatsLH
[
'
triples
'
][
2
]
+
patternStatsLH
[
'
triples
'
][
3
]
quadsLH
=
patternStatsLH
[
'
quads
'
][
0
]
+
patternStatsLH
[
'
quads
'
][
1
]
+
patternStatsLH
[
'
quads
'
][
2
]
+
patternStatsLH
[
'
quads
'
][
3
]
quadsLH
=
patternStatsLH
[
'
quads
'
][
0
]
+
patternStatsLH
[
'
quads
'
][
1
]
+
patternStatsLH
[
'
quads
'
][
2
]
+
patternStatsLH
[
'
quads
'
][
3
]
allsinglesLH
=
patternStatsLH
[
'
singles
'
]
+
patternStatsLH
[
'
first singles
'
]
allsinglesLH
=
patternStatsLH
[
'
singles
'
]
+
patternStatsLH
[
'
first singles
'
]
eventsLH
=
allsinglesLH
+
doublesLH
+
triplesLH
+
quadsLH
eventsLH
=
allsinglesLH
+
doublesLH
+
triplesLH
+
quadsLH
doublesUH
=
patternStatsUH
[
'
doubles
'
][
0
]
+
patternStatsUH
[
'
doubles
'
][
1
]
+
patternStatsUH
[
'
doubles
'
][
2
]
+
patternStatsUH
[
'
doubles
'
][
3
]
doublesUH
=
patternStatsUH
[
'
doubles
'
][
0
]
+
patternStatsUH
[
'
doubles
'
][
1
]
+
patternStatsUH
[
'
doubles
'
][
2
]
+
patternStatsUH
[
'
doubles
'
][
3
]
triplesUH
=
patternStatsUH
[
'
triples
'
][
0
]
+
patternStatsUH
[
'
triples
'
][
1
]
+
patternStatsUH
[
'
triples
'
][
2
]
+
patternStatsUH
[
'
triples
'
][
3
]
triplesUH
=
patternStatsUH
[
'
triples
'
][
0
]
+
patternStatsUH
[
'
triples
'
][
1
]
+
patternStatsUH
[
'
triples
'
][
2
]
+
patternStatsUH
[
'
triples
'
][
3
]
quadsUH
=
patternStatsUH
[
'
quads
'
][
0
]
+
patternStatsUH
[
'
quads
'
][
1
]
+
patternStatsUH
[
'
quads
'
][
2
]
+
patternStatsUH
[
'
quads
'
][
3
]
quadsUH
=
patternStatsUH
[
'
quads
'
][
0
]
+
patternStatsUH
[
'
quads
'
][
1
]
+
patternStatsUH
[
'
quads
'
][
2
]
+
patternStatsUH
[
'
quads
'
][
3
]
allsinglesUH
=
patternStatsUH
[
'
singles
'
]
+
patternStatsUH
[
'
first singles
'
]
allsinglesUH
=
patternStatsUH
[
'
singles
'
]
+
patternStatsUH
[
'
first singles
'
]
eventsUH
=
allsinglesUH
+
doublesUH
+
triplesUH
+
quadsUH
eventsUH
=
allsinglesUH
+
doublesUH
+
triplesUH
+
quadsUH
reloccurLH
=
np
.
array
([
allsinglesLH
/
eventsLH
,
doublesLH
/
eventsLH
,
triplesLH
/
eventsLH
,
quadsLH
/
eventsLH
])
reloccurLH
=
np
.
array
([
allsinglesLH
/
eventsLH
,
doublesLH
/
eventsLH
,
triplesLH
/
eventsLH
,
quadsLH
/
eventsLH
])
reloccurUH
=
np
.
array
([
allsinglesUH
/
eventsUH
,
doublesUH
/
eventsUH
,
triplesUH
/
eventsUH
,
quadsUH
/
eventsUH
])
reloccurUH
=
np
.
array
([
allsinglesUH
/
eventsUH
,
doublesUH
/
eventsUH
,
triplesUH
/
eventsUH
,
quadsUH
/
eventsUH
])
```
```
%% Cell type:code id: tags:
%% Cell type:code id: tags:
```
python
```
python
if
do_pattern_classification
:
if
do_pattern_classification
:
fig
=
plt
.
figure
(
figsize
=
(
10
,
5
))
fig
=
plt
.
figure
(
figsize
=
(
10
,
5
))
ax
=
fig
.
add_subplot
(
1
,
2
,
1
)
ax
=
fig
.
add_subplot
(
1
,
2
,
1
)
labels
=
[
'
singles
'
,
'
doubles
'
,
'
triples
'
,
'
quads
'
]
labels
=
[
'
singles
'
,
'
doubles
'
,
'
triples
'
,
'
quads
'
]
pie
=
ax
.
pie
(
reloccurLH
,
labels
=
labels
,
autopct
=
'
%1.1f%%
'
,
shadow
=
True
)
pie
=
ax
.
pie
(
reloccurLH
,
labels
=
labels
,
autopct
=
'
%1.1f%%
'
,
shadow
=
True
)
ax
.
set_title
(
"
Pattern occurrence LH
"
)
ax
.
set_title
(
"
Pattern occurrence LH
"
)
# Set aspect ratio to be equal so that pie is drawn as a circle.
# Set aspect ratio to be equal so that pie is drawn as a circle.
a
=
ax
.
axis
(
'
equal
'
)
a
=
ax
.
axis
(
'
equal
'
)
ax
=
fig
.
add_subplot
(
1
,
2
,
2
)
ax
=
fig
.
add_subplot
(
1
,
2
,
2
)
pie
=
ax
.
pie
(
reloccurUH
,
labels
=
labels
,
autopct
=
'
%1.1f%%
'
,
shadow
=
True
)
pie
=
ax
.
pie
(
reloccurUH
,
labels
=
labels
,
autopct
=
'
%1.1f%%
'
,
shadow
=
True
)
ax
.
set_title
(
"
Pattern occurrence UH
"
)
ax
.
set_title
(
"
Pattern occurrence UH
"
)
# Set aspect ratio to be equal so that pie is drawn as a circle.
# Set aspect ratio to be equal so that pie is drawn as a circle.
a
=
ax
.
axis
(
'
equal
'
)
a
=
ax
.
axis
(
'
equal
'
)
```
```
%% Cell type:code id: tags:
%% Cell type:code id: tags:
```
python
```
python
ho
,
eo
,
co
,
so
=
histCalOffsetCor
.
get
()
ho
,
eo
,
co
,
so
=
histCalOffsetCor
.
get
()
d
=
[{
'
x
'
:
co
,
d
=
[{
'
x
'
:
co
,
'
y
'
:
ho
,
'
y
'
:
ho
,
'
y_err
'
:
np
.
sqrt
(
ho
[:]),
'
y_err
'
:
np
.
sqrt
(
ho
[:]),
'
drawstyle
'
:
'
steps-mid
'
,
'
drawstyle
'
:
'
steps-mid
'
,
'
errorstyle
'
:
'
bars
'
,
'
errorstyle
'
:
'
bars
'
,
'
errorcoarsing
'
:
2
,
'
errorcoarsing
'
:
2
,
'
label
'
:
'
Offset corr.
'
'
label
'
:
'
Offset corr.
'
},
},
]
]
fig
=
xana
.
simplePlot
(
d
,
aspect
=
1
,
x_label
=
'
Energy(ADU)
'
,
fig
=
xana
.
simplePlot
(
d
,
aspect
=
1
,
x_label
=
'
Energy(ADU)
'
,
y_label
=
'
Number of occurrences
'
,
figsize
=
'
2col
'
,
y_label
=
'
Number of occurrences
'
,
figsize
=
'
2col
'
,
y_log
=
True
,
x_range
=
(
-
50
,
500
),
y_log
=
True
,
x_range
=
(
-
50
,
500
),
legend
=
'
top-center-frame-2col
'
)
legend
=
'
top-center-frame-2col
'
)
```
```
%% Cell type:code id: tags:
%% Cell type:code id: tags:
```
python
```
python
if
do_pattern_classification
:
if
do_pattern_classification
:
h1
,
e1L
,
c1L
,
s1L
=
histCalPcorr
.
get
()
h1
,
e1L
,
c1L
,
s1L
=
histCalPcorr
.
get
()
h1s
,
e1Ls
,
c1Ls
,
s1Ls
=
histCalPcorrS
.
get
()
h1s
,
e1Ls
,
c1Ls
,
s1Ls
=
histCalPcorrS
.
get
()
d
=
[
d
=
[
{
'
x
'
:
c1L
,
{
'
x
'
:
c1L
,
'
y
'
:
h1
,
'
y
'
:
h1
,
'
y_err
'
:
np
.
sqrt
(
h1
[:]),
'
y_err
'
:
np
.
sqrt
(
h1
[:]),
'
drawstyle
'
:
'
steps-mid
'
,
'
drawstyle
'
:
'
steps-mid
'
,
'
label
'
:
'
Split event corrected
'
},
'
label
'
:
'
Split event corrected
'
},
{
'
x
'
:
c1Ls
,
{
'
x
'
:
c1Ls
,
'
y
'
:
h1s
,
'
y
'
:
h1s
,
'
y_err
'
:
np
.
sqrt
(
h1s
[:]),
'
y_err
'
:
np
.
sqrt
(
h1s
[:]),
'
drawstyle
'
:
'
steps-mid
'
,
'
drawstyle
'
:
'
steps-mid
'
,
'
label
'
:
'
Single pixel hits
'
}
'
label
'
:
'
Single pixel hits
'
}
]
]
fig
=
xana
.
simplePlot
(
d
,
aspect
=
1
,
x_label
=
'
Energy(ADU)
'
,
fig
=
xana
.
simplePlot
(
d
,
aspect
=
1
,
x_label
=
'
Energy(ADU)
'
,
y_label
=
'
Number of occurrences
'
,
figsize
=
'
2col
'
,
y_label
=
'
Number of occurrences
'
,
figsize
=
'
2col
'
,
y_log
=
True
,
x_range
=
(
0
,
200
),
x_log
=
False
,
y_log
=
True
,
x_range
=
(
0
,
200
),
x_log
=
False
,
legend
=
'
top-center-frame-2col
'
)
legend
=
'
top-center-frame-2col
'
)
```
```
%% Cell type:markdown id: tags:
%% Cell type:markdown id: tags:
## Mean Image of first Sequence ##
## Mean Image of first Sequence ##
%% Cell type:code id: tags:
%% Cell type:code id: tags:
```
python
```
python
fig
=
xana
.
heatmapPlot
(
mean_im
,
fig
=
xana
.
heatmapPlot
(
mean_im
,
x_label
=
'
Columns
'
,
y_label
=
'
Rows
'
,
x_label
=
'
Columns
'
,
y_label
=
'
Rows
'
,
lut_label
=
'
Signal (ADU)
'
,
lut_label
=
'
Signal (ADU)
'
,
x_range
=
(
0
,
y
),
x_range
=
(
0
,
y
),
y_range
=
(
0
,
x
),
vmin
=-
50
,
vmax
=
500
)
y_range
=
(
0
,
x
),
vmin
=-
50
,
vmax
=
500
)
if
do_pattern_classification
:
if
do_pattern_classification
:
fig
=
xana
.
heatmapPlot
(
mean_im_cc
,
fig
=
xana
.
heatmapPlot
(
mean_im_cc
,
x_label
=
'
Columns
'
,
y_label
=
'
Rows
'
,
x_label
=
'
Columns
'
,
y_label
=
'
Rows
'
,
lut_label
=
'
Signal (ADU)
'
,
lut_label
=
'
Signal (ADU)
'
,
x_range
=
(
0
,
y
),
x_range
=
(
0
,
y
),
y_range
=
(
0
,
x
),
vmin
=-
50
,
vmax
=
500
)
y_range
=
(
0
,
x
),
vmin
=-
50
,
vmax
=
500
)
```
```
%% Cell type:markdown id: tags:
%% Cell type:markdown id: tags:
## Single Shot of first Sequnce ##
## Single Shot of first Sequnce ##
%% Cell type:code id: tags:
%% Cell type:code id: tags:
```
python
```
python
fig
=
xana
.
heatmapPlot
(
single_im
,
fig
=
xana
.
heatmapPlot
(
single_im
,
x_label
=
'
Columns
'
,
y_label
=
'
Rows
'
,
x_label
=
'
Columns
'
,
y_label
=
'
Rows
'
,
lut_label
=
'
Signal (ADU)
'
,
lut_label
=
'
Signal (ADU)
'
,
x_range
=
(
0
,
y
),
x_range
=
(
0
,
y
),
y_range
=
(
0
,
x
),
vmin
=-
50
,
vmax
=
500
)
y_range
=
(
0
,
x
),
vmin
=-
50
,
vmax
=
500
)
if
do_pattern_classification
:
if
do_pattern_classification
:
fig
=
xana
.
heatmapPlot
(
single_im_cc
,
fig
=
xana
.
heatmapPlot
(
single_im_cc
,
x_label
=
'
Columns
'
,
y_label
=
'
Rows
'
,
x_label
=
'
Columns
'
,
y_label
=
'
Rows
'
,
lut_label
=
'
Signal (ADU)
'
,
lut_label
=
'
Signal (ADU)
'
,
x_range
=
(
0
,
y
),
x_range
=
(
0
,
y
),
y_range
=
(
0
,
x
),
vmin
=-
50
,
vmax
=
500
)
y_range
=
(
0
,
x
),
vmin
=-
50
,
vmax
=
500
)
```
```
%% Cell type:code id: tags:
%% Cell type:code id: tags:
```
python
```
python
```
```
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment