Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
P
pycalibration
Manage
Activity
Members
Labels
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Deploy
Model registry
Analyze
Contributor analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
calibration
pycalibration
Commits
444597eb
Commit
444597eb
authored
3 years ago
by
Karim Ahmed
Browse files
Options
Downloads
Patches
Plain Diff
remove asyncio
parent
86851cb4
Loading
Loading
No related merge requests found
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
tests/test_pre_deployment.py
+11
-25
11 additions, 25 deletions
tests/test_pre_deployment.py
with
11 additions
and
25 deletions
tests/test_pre_deployment.py
+
11
−
25
View file @
444597eb
"""
1- RUN AGIPD xfel-calibrate (Run multiple threads/asyncio to run for different detectors/calibration-types)
2- AWAIT the slurm jobs to finish
3- DO healthy checks:
a- Check available report.
b- Check no errors in the rst files/slurm status.
c- Check number of corrected files? local constant files?
d- check expected time performance from time-summary files??
e- Copy latest out-data for comparison and validity check after ending the test???
"""
# 04.01.2022
"""
[summary]
"""
[summary]
1. Read a dict of all detectors and calibration types to test.
1. Read a dict of all detectors and calibration types to test.
2. Run xfel-calibrate for each detector and watch the SLURM jobs until it ends
(TODO Asyncio/Background threads??)
.
2. Run xfel-calibrate for each detector and watch the SLURM jobs until it ends.
3. Validate that all SLURM jobs were COMPLETED.
3. Validate that all SLURM jobs were COMPLETED.
4. Validate the presence of the report within the last 10 seconds
4. Validate the presence of the PDF report.
(TODO: Slurm folder and calibration metadata as well?)
5. Validate number of generated h5files against reference data.
5. Validate number of generated h5files.
6. Validate that the h5files are exactly the same against reference data.
6. Validate that the h5files are exactly the same.
7. TODO Validate expected time performace from the time-summary files?
7. TODO Validate expected time performace from the time-summary files?
8. Write/Output a report for each test result
? [HTML pytest output?]
8. Write/Output a report for each test result
, by running pytest using --html=<report_name>.html
"""
"""
import
asyncio
import
hashlib
import
hashlib
import
io
import
io
import
multiprocessing
import
pathlib
import
pathlib
import
time
import
time
from
contextlib
import
redirect_stdout
from
contextlib
import
redirect_stdout
from
subprocess
import
PIPE
,
run
from
subprocess
import
PIPE
,
run
from
typing
import
Any
,
Dict
,
List
import
pytest
import
pytest
import
yaml
import
yaml
from
typing
import
List
,
Any
,
Dict
import
xfel_calibrate.calibrate
as
calibrate
import
xfel_calibrate.calibrate
as
calibrate
with
open
(
"
./callab_tests.yaml
"
,
"
r
"
)
as
f
:
with
open
(
"
./callab_tests.yaml
"
,
"
r
"
)
as
f
:
callab_test_dict
=
yaml
.
load
(
f
)
callab_test_dict
=
yaml
.
load
(
f
)
import
multiprocessing
def
validate_h5files
(
f
,
reference
):
def
validate_h5files
(
f
,
reference
):
def
filemd5
(
filename
,
block_size
=
2
**
20
):
def
filemd5
(
filename
,
block_size
=
2
**
20
):
...
@@ -56,6 +42,7 @@ def validate_h5files(f, reference):
...
@@ -56,6 +42,7 @@ def validate_h5files(f, reference):
return
filemd5
(
f
)
==
filemd5
(
reference
/
f
.
name
),
f
return
filemd5
(
f
)
==
filemd5
(
reference
/
f
.
name
),
f
def
parse_config
(
cmd
:
List
[
str
],
config
:
Dict
[
str
,
Any
])
->
List
[
str
]:
def
parse_config
(
cmd
:
List
[
str
],
config
:
Dict
[
str
,
Any
])
->
List
[
str
]:
"""
Convert a dictionary to a list of arguments.
"""
Convert a dictionary to a list of arguments.
...
@@ -84,7 +71,6 @@ def parse_config(cmd: List[str], config: Dict[str, Any]) -> List[str]:
...
@@ -84,7 +71,6 @@ def parse_config(cmd: List[str], config: Dict[str, Any]) -> List[str]:
@pytest.mark.manual_run
@pytest.mark.manual_run
@pytest.mark.parametrize
(
"
calibration_test
"
,
list
(
callab_test_dict
.
items
()))
@pytest.mark.parametrize
(
"
calibration_test
"
,
list
(
callab_test_dict
.
items
()))
@pytest.mark.asyncio
def
test_xfel_calibrate
(
calibration_test
):
def
test_xfel_calibrate
(
calibration_test
):
test_key
,
val_dict
=
calibration_test
test_key
,
val_dict
=
calibration_test
...
@@ -148,4 +134,4 @@ def test_xfel_calibrate(calibration_test):
...
@@ -148,4 +134,4 @@ def test_xfel_calibrate(calibration_test):
if
i
[
0
]:
if
i
[
0
]:
non_valid_files
.
append
(
i
[
1
])
non_valid_files
.
append
(
i
[
1
])
assert
all
(
all_files_validated
),
f
"
{
test_key
}
failure, while validating
{
non_valid_files
}
"
# noqa
assert
all
(
all_files_validated
),
f
"
{
test_key
}
failure, while validating
{
non_valid_files
}
"
# noqa
print
(
f
"
{
test_key
}
'
s calibration h5files are validated successfully.
"
)
print
(
f
"
{
test_key
}
'
s calibration h5files are validated successfully.
"
)
\ No newline at end of file
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment