Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
P
pycalibration
Manage
Activity
Members
Labels
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Deploy
Model registry
Analyze
Contributor analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
calibration
pycalibration
Commits
f0750b74
Commit
f0750b74
authored
3 years ago
by
Karim Ahmed
Browse files
Options
Downloads
Patches
Plain Diff
flake8
parent
ca2c38fb
No related branches found
No related tags found
1 merge request
!610
Add a pytest to run a dict of CALLAB test runs before releases
Changes
2
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
tests/callab_tests.yaml
+319
-327
319 additions, 327 deletions
tests/callab_tests.yaml
tests/test_pre_deployment.py
+17
-23
17 additions, 23 deletions
tests/test_pre_deployment.py
with
336 additions
and
350 deletions
tests/callab_tests.yaml
+
319
−
327
View file @
f0750b74
This diff is collapsed.
Click to expand it.
tests/test_pre_deployment.py
+
17
−
23
View file @
f0750b74
"""
[summary]
1. Read a dict of all detectors and calibration types to test.
2. Run xfel-calibrate for each detector and watch the SLURM jobs until it ends.
3. Validate that all SLURM jobs were COMPLETED.
4. Validate the presence of the PDF report.
5. Validate number of generated h5files against reference data.
6. Validate that the h5files are exactly the same against reference data.
7. TODO Validate expected time performace from the time-summary files?
8. Write/Output a report for each test result, by running pytest using --html=<report_name>.html
"""
import
hashlib
import
io
import
multiprocessing
...
...
@@ -18,14 +6,13 @@ import time
from
contextlib
import
redirect_stdout
from
datetime
import
datetime
from
subprocess
import
PIPE
,
run
from
typing
import
Any
,
Dict
,
List
,
Tuple
from
typing
import
Any
,
Dict
,
List
import
pytest
import
yaml
import
xfel_calibrate.calibrate
as
calibrate
REFERENCE_FOLDER
=
"
/gpfs/exfel/data/scratch/ahmedk/test/remove/pytest
"
OUT_FOLDER
=
"
/gpfs/exfel/data/scratch/ahmedk/test/remove/pytest
"
...
...
@@ -48,7 +35,11 @@ def validate_h5files(f, reference):
return
filemd5
(
f
)
==
filemd5
(
reference
/
f
.
name
),
f
def
parse_config
(
cmd
:
List
[
str
],
config
:
Dict
[
str
,
Any
],
out_folder
:
str
)
->
List
[
str
]:
def
parse_config
(
cmd
:
List
[
str
],
config
:
Dict
[
str
,
Any
],
out_folder
:
str
)
->
List
[
str
]:
"""
Convert a dictionary to a list of arguments.
Values that are not strings will be cast.
...
...
@@ -76,6 +67,7 @@ def parse_config(cmd: List[str], config: Dict[str, Any], out_folder: str) -> Lis
cmd
+=
[
"
--{}
"
.
format
(
key
),
str
(
value
)]
return
cmd
@pytest.mark.manual_run
@pytest.mark.parametrize
(
"
calibration_test
"
,
list
(
callab_test_dict
.
items
()))
def
test_xfel_calibrate
(
calibration_test
):
...
...
@@ -83,9 +75,8 @@ def test_xfel_calibrate(calibration_test):
test_key
,
val_dict
=
calibration_test
cmd
=
[
"
xfel-calibrate
"
,
val_dict
[
"
det_type
"
],
val_dict
[
"
cal_type
"
]]
cal_conf
=
val_dict
[
"
config
"
]
cal_conf
[
"
report-to
"
]
=
f
"
{
test_key
}
_
{
datetime
.
now
().
strftime
(
'
%y%m%d_%H%M%S
'
)
}
"
report_name
=
test_key
report_name
=
f
"
{
test_key
}
_
{
datetime
.
now
().
strftime
(
'
%y%m%d_%H%M%S
'
)
}
"
cal_conf
[
"
report-to
"
]
=
report_name
out_folder
=
pathlib
.
Path
(
cal_conf
[
"
out-folder
"
].
format
(
OUT_FOLDER
,
cal_conf
[
"
karabo-id
"
],
test_key
))
...
...
@@ -96,7 +87,7 @@ def test_xfel_calibrate(calibration_test):
cmd
+=
[
"
--slurm-name
"
,
test_key
]
f
=
io
.
StringIO
()
with
redirect_stdout
(
f
):
with
redirect_stdout
(
f
):
calibrate
.
run
(
cmd
)
out_str
=
f
.
getvalue
()
...
...
@@ -116,7 +107,9 @@ def test_xfel_calibrate(calibration_test):
states
=
res
.
stdout
.
decode
().
split
(
"
\n
"
)[
2
:
-
1
]
if
not
any
(
s
.
strip
()
in
[
"
COMPLETING
"
,
"
RUNNING
"
,
"
CONFIGURING
"
,
"
PENDING
"
]
for
s
in
states
):
# noqa
"
COMPLETING
"
,
"
RUNNING
"
,
"
CONFIGURING
"
,
"
PENDING
"
,
]
for
s
in
states
):
slurm_watcher
=
False
else
:
time
.
sleep
(
0.5
)
...
...
@@ -127,7 +120,7 @@ def test_xfel_calibrate(calibration_test):
# 2nd check for report availability.
report_file
=
out_folder
/
f
"
{
report_name
}
.pdf
"
assert
report_file
.
exists
(),
f
"
{
test_key
}
failure, report doesn
'
t exists.
"
assert
report_file
.
exists
(),
f
"
{
test_key
}
failure, report doesn
'
t exists.
"
# noqa
# 3rd Check number of produced h5 files.
h5files
=
list
(
out_folder
.
glob
(
"
*.h5
"
))
...
...
@@ -135,7 +128,8 @@ def test_xfel_calibrate(calibration_test):
assert
len
(
h5files
)
==
len
(
expected_h5files
),
f
"
{
test_key
}
failure, number of files are not as expected.
"
# noqa
print
(
f
"
{
test_key
}
'
s calibration h5files numbers are as expected.
"
)
# 4th check that h5files are exactly the same as the reference h5files.
# 4th check that h5files are exactly the same as
# the reference h5files.
all_files_validated
=
[]
non_valid_files
=
[]
with
multiprocessing
.
Pool
()
as
pool
:
...
...
@@ -146,4 +140,4 @@ def test_xfel_calibrate(calibration_test):
if
not
i
[
0
]:
non_valid_files
.
append
(
i
[
1
])
assert
all
(
all_files_validated
),
f
"
{
test_key
}
failure, while validating
{
non_valid_files
}
"
# noqa
print
(
f
"
{
test_key
}
'
s calibration h5files are validated successfully.
"
)
print
(
f
"
{
test_key
}
'
s calibration h5files are validated successfully.
"
)
# noqa
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment