Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
P
pycalibration
Manage
Activity
Members
Labels
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Deploy
Model registry
Analyze
Contributor analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
calibration
pycalibration
Commits
334a3535
Commit
334a3535
authored
3 years ago
by
Karim Ahmed
Browse files
Options
Downloads
Patches
Plain Diff
add CL options for release_test
parent
0c31e162
No related branches found
No related tags found
1 merge request
!610
Add a pytest to run a dict of CALLAB test runs before releases
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
tests/conftest.py
+43
-0
43 additions, 0 deletions
tests/conftest.py
tests/test_pre_deployment.py
+37
-5
37 additions, 5 deletions
tests/test_pre_deployment.py
with
80 additions
and
5 deletions
tests/conftest.py
+
43
−
0
View file @
334a3535
...
@@ -56,6 +56,34 @@ def pytest_addoption(parser):
...
@@ -56,6 +56,34 @@ def pytest_addoption(parser):
default
=
False
,
default
=
False
,
help
=
"
Start release test for all supported calibration notebooks
"
,
help
=
"
Start release test for all supported calibration notebooks
"
,
)
)
detectors_choices
=
[]
for
k
in
[
'
agipd
'
,
'
jungfrau
'
,
'
pnccd
'
,
'
epix100
'
,
'
all
'
]:
detectors_choices
+=
[
k
,
k
.
upper
()]
parser
.
addoption
(
"
--detectors
"
,
action
=
'
append
'
,
choices
=
detectors_choices
,
)
parser
.
addoption
(
"
--calibration
"
,
type
=
str
,
choices
=
[
'
correct
'
,
'
dark
'
,
'
all
'
],
default
=
'
all
'
,
)
parser
.
addoption
(
"
--no-numerical-validation
"
,
action
=
"
store_true
"
,
default
=
False
,
help
=
"
Skips tests for numerical validation for produced h5files.
"
,
)
parser
.
addoption
(
"
--picked-test
"
,
type
=
str
,
default
=
None
,
)
def
pytest_configure
(
config
):
def
pytest_configure
(
config
):
...
@@ -79,6 +107,21 @@ def pytest_configure(config):
...
@@ -79,6 +107,21 @@ def pytest_configure(config):
"
manual_run(): marks skips for tests that required to be run manually
"
,
"
manual_run(): marks skips for tests that required to be run manually
"
,
)
)
config
.
addinivalue_line
(
"
markers
"
,
"
manual_run(): marks skips for tests that required to be run manually
"
,
)
@pytest.fixture
def
release_test_config
(
request
):
detectors
=
request
.
config
.
getoption
(
"
--detectors
"
)
calibration
=
request
.
config
.
getoption
(
"
--calibration
"
)
skip_numerical_validation
=
request
.
config
.
getoption
(
"
--no-numerical-validation
"
)
picked_test
=
request
.
config
.
getoption
(
"
--picked-test
"
)
return
detectors
,
calibration
,
picked_test
,
skip_numerical_validation
@lru_cache
()
@lru_cache
()
def
server_reachable
(
server
:
str
=
"
max-exfl-cal002
"
):
def
server_reachable
(
server
:
str
=
"
max-exfl-cal002
"
):
...
...
This diff is collapsed.
Click to expand it.
tests/test_pre_deployment.py
+
37
−
5
View file @
334a3535
...
@@ -106,12 +106,38 @@ def parse_config(
...
@@ -106,12 +106,38 @@ def parse_config(
@pytest.mark.manual_run
@pytest.mark.manual_run
@pytest.mark.parametrize
(
"
calibration_test
"
,
list
(
callab_test_dict
.
items
()))
@pytest.mark.parametrize
(
def
test_xfel_calibrate
(
calibration_test
):
"
k, v
"
,
list
(
callab_test_dict
.
items
()),
test_key
,
val_dict
=
calibration_test
ids
=
list
(
callab_test_dict
.
keys
()),
)
def
test_xfel_calibrate
(
k
,
v
,
release_test_config
):
(
detectors
,
calibration
,
picked_test
,
skip_numerical_validation
)
=
release_test_config
test_key
,
val_dict
=
k
,
v
cal_type
=
val_dict
[
"
cal_type
"
]
cal_type
=
val_dict
[
"
cal_type
"
]
cmd
=
[
"
xfel-calibrate
"
,
val_dict
[
"
det_type
"
],
cal_type
]
det_type
=
val_dict
[
"
det_type
"
]
if
picked_test
is
None
:
# Skip non-selected detectors
if
(
detectors
!=
[
"
all
"
]
and
det_type
.
lower
()
not
in
[
d
.
lower
()
for
d
in
detectors
]
):
pytest
.
skip
()
# Skip non-selected calibration
if
calibration
!=
"
all
"
and
cal_type
.
lower
()
!=
calibration
:
pytest
.
skip
()
else
:
if
test_key
!=
picked_test
:
pytest
.
skip
()
cmd
=
[
"
xfel-calibrate
"
,
det_type
,
cal_type
]
cal_conf
=
val_dict
[
"
config
"
]
cal_conf
=
val_dict
[
"
config
"
]
report_name
=
f
"
{
test_key
}
_
{
datetime
.
now
().
strftime
(
'
%y%m%d_%H%M%S
'
)
}
"
report_name
=
f
"
{
test_key
}
_
{
datetime
.
now
().
strftime
(
'
%y%m%d_%H%M%S
'
)
}
"
cal_conf
[
"
report-to
"
]
=
report_name
cal_conf
[
"
report-to
"
]
=
report_name
...
@@ -122,6 +148,7 @@ def test_xfel_calibrate(calibration_test):
...
@@ -122,6 +148,7 @@ def test_xfel_calibrate(calibration_test):
reference_folder
=
pathlib
.
Path
(
val_dict
[
"
reference-folder
"
].
format
(
reference_folder
=
pathlib
.
Path
(
val_dict
[
"
reference-folder
"
].
format
(
REFERENCE_FOLDER
,
cal_conf
[
"
karabo-id
"
],
test_key
))
REFERENCE_FOLDER
,
cal_conf
[
"
karabo-id
"
],
test_key
))
cmd
+=
[
"
--slurm-name
"
,
test_key
]
cmd
+=
[
"
--slurm-name
"
,
test_key
]
f
=
io
.
StringIO
()
f
=
io
.
StringIO
()
...
@@ -149,6 +176,7 @@ def test_xfel_calibrate(calibration_test):
...
@@ -149,6 +176,7 @@ def test_xfel_calibrate(calibration_test):
slurm_watcher
=
False
slurm_watcher
=
False
else
:
else
:
time
.
sleep
(
0.5
)
time
.
sleep
(
0.5
)
# 1st check that all jobs were COMPLETED without errors.
# 1st check that all jobs were COMPLETED without errors.
states
=
res
.
stdout
.
decode
().
split
(
"
\n
"
)[
2
:
-
1
]
states
=
res
.
stdout
.
decode
().
split
(
"
\n
"
)[
2
:
-
1
]
assert
all
(
s
.
strip
()
==
"
COMPLETED
"
for
s
in
states
),
f
"
{
test_key
}
failure, calibration jobs were not completed.
{
jobids
}
:
{
states
}
"
# noqa
assert
all
(
s
.
strip
()
==
"
COMPLETED
"
for
s
in
states
),
f
"
{
test_key
}
failure, calibration jobs were not completed.
{
jobids
}
:
{
states
}
"
# noqa
...
@@ -164,6 +192,10 @@ def test_xfel_calibrate(calibration_test):
...
@@ -164,6 +192,10 @@ def test_xfel_calibrate(calibration_test):
assert
len
(
h5files
)
==
len
(
expected_h5files
),
f
"
{
test_key
}
failure, number of files are not as expected.
"
# noqa
assert
len
(
h5files
)
==
len
(
expected_h5files
),
f
"
{
test_key
}
failure, number of files are not as expected.
"
# noqa
print
(
f
"
{
test_key
}
'
s calibration h5files numbers are as expected.
"
)
print
(
f
"
{
test_key
}
'
s calibration h5files numbers are as expected.
"
)
# Stop tests at this point, if desired.
if
skip_numerical_validation
:
return
# 4th check that h5files are exactly the same as the reference h5files.
# 4th check that h5files are exactly the same as the reference h5files.
all_files_validated
=
[]
all_files_validated
=
[]
non_valid_files
=
[]
non_valid_files
=
[]
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment