Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
P
pycalibration
Manage
Activity
Members
Labels
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Deploy
Model registry
Analyze
Contributor analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
calibration
pycalibration
Commits
e3cacdf8
Commit
e3cacdf8
authored
5 years ago
by
Karim Ahmed
Browse files
Options
Downloads
Patches
Plain Diff
revert unrelated changes
parent
5c496d77
No related branches found
Branches containing commit
No related tags found
Tags containing commit
1 merge request
!228
Feat/batch prioritization darks
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
cal_tools/cal_tools/agipdlib.py
+34
-39
34 additions, 39 deletions
cal_tools/cal_tools/agipdlib.py
with
34 additions
and
39 deletions
cal_tools/cal_tools/agipdlib.py
+
34
−
39
View file @
e3cacdf8
...
@@ -1106,11 +1106,11 @@ class AgipdCorrections:
...
@@ -1106,11 +1106,11 @@ class AgipdCorrections:
def
get_valid_image_idx
(
self
):
def
get_valid_image_idx
(
self
):
"""
Return the indices of valid data
"""
Return the indices of valid data
"""
"""
idx
_base
=
self
.
idx_base
agipd
_base
=
self
.
idx_base
if
self
.
index_v
==
2
:
if
self
.
index_v
==
2
:
count
=
np
.
squeeze
(
self
.
infile
[
idx
_base
+
"
image/count
"
])
count
=
np
.
squeeze
(
self
.
infile
[
agipd
_base
+
"
image/count
"
])
first
=
np
.
squeeze
(
self
.
infile
[
idx
_base
+
"
image/first
"
])
first
=
np
.
squeeze
(
self
.
infile
[
agipd
_base
+
"
image/first
"
])
if
np
.
count_nonzero
(
count
!=
0
)
==
0
:
if
np
.
count_nonzero
(
count
!=
0
)
==
0
:
raise
IOError
(
"
File has no valid counts
"
)
raise
IOError
(
"
File has no valid counts
"
)
valid
=
count
!=
0
valid
=
count
!=
0
...
@@ -1137,12 +1137,12 @@ class AgipdCorrections:
...
@@ -1137,12 +1137,12 @@ class AgipdCorrections:
self
.
valid_indices
=
np
.
squeeze
(
valid_indices
).
astype
(
np
.
int32
)
self
.
valid_indices
=
np
.
squeeze
(
valid_indices
).
astype
(
np
.
int32
)
elif
self
.
index_v
==
1
:
elif
self
.
index_v
==
1
:
status
=
np
.
squeeze
(
self
.
infile
[
idx
_base
+
"
image/status
"
])
status
=
np
.
squeeze
(
self
.
infile
[
agipd
_base
+
"
image/status
"
])
if
np
.
count_nonzero
(
status
!=
0
)
==
0
:
if
np
.
count_nonzero
(
status
!=
0
)
==
0
:
raise
IOError
(
"
File {} has no valid counts
"
.
format
(
raise
IOError
(
"
File {} has no valid counts
"
.
format
(
self
.
infile
))
self
.
infile
))
last
=
np
.
squeeze
(
self
.
infile
[
idx
_base
+
"
image/last
"
])
last
=
np
.
squeeze
(
self
.
infile
[
agipd
_base
+
"
image/last
"
])
first
=
np
.
squeeze
(
self
.
infile
[
idx
_base
+
"
image/first
"
])
first
=
np
.
squeeze
(
self
.
infile
[
agipd
_base
+
"
image/first
"
])
valid
=
status
!=
0
valid
=
status
!=
0
last_index
=
int
(
last
[
status
!=
0
][
-
1
])
+
1
last_index
=
int
(
last
[
status
!=
0
][
-
1
])
+
1
first_index
=
int
(
first
[
status
!=
0
][
0
])
first_index
=
int
(
first
[
status
!=
0
][
0
])
...
@@ -1281,47 +1281,42 @@ class AgipdCorrections:
...
@@ -1281,47 +1281,42 @@ class AgipdCorrections:
# sanitize indices
# sanitize indices
for
do
in
[
"
image
"
,
]:
for
do
in
[
"
image
"
,
]:
# uq: INDEX/trainID
# fidxv: INDEX/.../image/first idx values
# cntsv: INDEX/.../image/counts values
# Extract parameters through identifying
# unique trains, index and numbers.
uq
,
fidxv
,
cntsv
=
np
.
unique
(
alltrains
[
firange
-
firange
[
0
]],
uq
,
fidxv
,
cntsv
=
np
.
unique
(
alltrains
[
firange
-
firange
[
0
]],
return_index
=
True
,
return_index
=
True
,
return_counts
=
True
)
return_counts
=
True
)
# Validate calculated CORR INDEX contents by checking difference between
duq
=
(
uq
[
1
:]
-
uq
[:
-
1
]).
astype
(
np
.
int64
)
# trainId stored in RAW data and trains from
train_diff
=
np
.
isin
(
np
.
array
(
self
.
infile
[
"
/INDEX/trainId
"
]),
uq
,
invert
=
True
)
cfidxv
=
[
fidxv
[
0
],
]
ccntsv
=
[
cntsv
[
0
],
]
# Insert zeros for missing trains.
for
i
,
du
in
enumerate
(
duq
.
tolist
()):
# fidxv and cntsv should have same length as
if
du
>
1000
:
# raw INDEX/.../image/first and INDEX/.../image/count,
du
=
1
# respectively
cntsv
[
i
]
=
0
for
i
,
diff
in
enumerate
(
train_diff
):
cfidxv
+=
[
0
]
*
(
du
-
1
)
+
[
fidxv
[
i
+
1
],
]
if
diff
:
ccntsv
+=
[
0
]
*
(
du
-
1
)
+
[
cntsv
[
i
+
1
],
]
if
i
<
len
(
cntsv
):
cntsv
=
np
.
insert
(
cntsv
,
i
,
0
)
mv
=
len
(
cfidxv
)
if
i
==
0
:
fidx
=
np
.
zeros
(
len
(
cfidxv
),
fidxv
.
dtype
)
fidxv
=
np
.
insert
(
fidxv
,
i
,
0
)
fidx
[
self
.
valid
[:
mv
]]
=
np
.
array
(
cfidxv
)[
self
.
valid
[:
mv
]]
else
:
fidxv
=
np
.
insert
(
fidxv
,
i
,
fidxv
[
i
-
1
])
for
i
in
range
(
len
(
fidx
)
-
1
,
2
,
-
1
):
else
:
if
fidx
[
i
-
1
]
==
0
and
fidx
[
i
]
!=
0
:
# append if at the end of the array
fidx
[
i
-
1
]
=
fidx
[
i
]
cntsv
=
np
.
append
(
cntsv
,
0
)
fidxv
=
np
.
append
(
fidxv
,
0
)
cnts
=
np
.
zeros
(
len
(
c
fidxv
)
,
cntsv
.
dtype
)
cnts
[
self
.
valid
[:
mv
]]
=
np
.
array
(
ccntsv
)[
self
.
valid
[:
mv
]]
# save INDEX contents (first, count) in CORR files
self
.
outfile
.
create_dataset
(
idx_base
+
"
{}/first
"
.
format
(
do
),
self
.
outfile
.
create_dataset
(
idx_base
+
"
{}/first
"
.
format
(
do
),
fidx
v
.
shape
,
fidx
.
shape
,
dtype
=
fidx
v
.
dtype
,
dtype
=
fidx
.
dtype
,
data
=
fidx
v
,
data
=
fidx
,
fletcher32
=
True
)
fletcher32
=
True
)
self
.
outfile
.
create_dataset
(
idx_base
+
"
{}/count
"
.
format
(
do
),
self
.
outfile
.
create_dataset
(
idx_base
+
"
{}/count
"
.
format
(
do
),
cnts
v
.
shape
,
cnts
.
shape
,
dtype
=
cnts
v
.
dtype
,
dtype
=
cnts
.
dtype
,
data
=
cnts
v
,
data
=
cnts
,
fletcher32
=
True
)
fletcher32
=
True
)
def
create_output_datasets
(
self
):
def
create_output_datasets
(
self
):
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment