Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • calibration/pycalibration
1 result
Show changes
Commits on Source (16)
Showing
with 1155 additions and 311 deletions
......@@ -21,6 +21,7 @@
LPD/results
Test
build
coverage.*
docs/build
docs/source/_notebooks
docs/source/_static/reports
......
......@@ -43,7 +43,12 @@ pytest:
<<: *before_script
script:
- python3 -m pip install ".[test]"
- python3 -m pytest --verbose --cov=cal_tools --cov=xfel_calibrate
- python3 -m pytest --color yes --verbose --cov=cal_tools --cov=xfel_calibrate
# Nope... https://docs.gitlab.com/12.10/ee/user/project/merge_requests/test_coverage_visualization.html#enabling-the-feature
# - coverage xml
# artifacts:
# reports:
# cobertura: coverage.xml
cython-editable-install-test:
stage: test
......@@ -51,4 +56,4 @@ cython-editable-install-test:
<<: *before_script
script:
- python3 -m pip install -e ".[test]"
- python3 -m pytest ./tests/test_agipdalgs.py
- python3 -m pytest --color yes --verbose ./tests/test_agipdalgs.py
%% Cell type:markdown id: tags:
# Test Notebook - CLI
Author: Robert Rosca
Version: 0.1
Notebook for use with the unit and continuous integration tests.
%% Cell type:code id: tags:
```
in_folder = "/dev/null" # input folder
out_folder = "/dev/null" # output folder
list_normal = [10] # parameterized list, range allowed
list_intellilist = [2345] # parameterized list with ranges, range allowed
concurrency_parameter = [1] # concurrency parameter, range allowed
number = 0 # parameterized number
```
%% Cell type:markdown id: tags:
Tests notebook execution by just creating an empty file in the output directory.
%% Cell type:code id: tags:
```
from pathlib import Path
```
%% Cell type:code id: tags:
```
in_folder = Path(in_folder)
(in_folder / "touch").touch()
```
......@@ -4,6 +4,12 @@ requires = ["cython==0.29.21", "numpy==1.19.1", "setuptools>=40.8.0", "wheel"]
[tool.isort]
profile = "black"
[tool.pylint.messages_control]
disable = "C0330, C0326"
[tool.pylint.format]
max-line-length = "88"
[tool.pytest.ini_options]
norecursedirs = [
"legacy",
......@@ -15,5 +21,11 @@ norecursedirs = [
"dist",
"node_modules",
"venv",
"{arch}"
"{arch}",
]
required_plugins = [
"pytest-asyncio",
"pytest-cov",
"pytest-subprocess",
]
......@@ -74,14 +74,14 @@ setup(
ext_modules=cythonize(ext_modules),
install_requires=[
"iCalibrationDB @ git+ssh://git@git.xfel.eu:10022/detectors/cal_db_interactive.git@2.0.5", # noqa
"nbparameterise @ git+ssh://git@git.xfel.eu:10022/detectors/nbparameterise.git@0.3", # noqa
"XFELDetectorAnalysis @ git+ssh://git@git.xfel.eu:10022/karaboDevices/pyDetLib.git@2.5.6-2.10.0#subdirectory=lib", # noqa
"pasha @ git+https://github.com/European-XFEL/pasha.git@0.1.0",
"Cython==0.29.21",
"Jinja2==2.11.2",
"astcheck==0.2.5",
"astsearch==0.1.3",
"dill==0.3.0",
"extra_data==1.2.0",
"extra_data==1.4.1",
"extra_geom==1.1.1",
"fabio==0.9.0",
"gitpython==3.1.0",
......@@ -101,6 +101,7 @@ setup(
"nbclient==0.5.1",
"nbconvert==5.6.1",
"nbformat==5.0.7",
"nbparameterise==0.5",
"notebook==6.1.5",
"numpy==1.19.1",
"prettytable==0.7.2",
......@@ -125,6 +126,7 @@ setup(
"nbval",
"pytest-asyncio",
"pytest-cov",
"pytest-subprocess",
"pytest>=5.4.0",
"testpath",
"unittest-xml-reporting==3.0.2",
......
......@@ -15,16 +15,28 @@ from pathlib import Path
from subprocess import DEVNULL, check_output
from typing import List, Union
import cal_tools.tools
import nbconvert
import nbformat
import numpy as np
from jinja2 import Template
from nbparameterise import extract_parameters, parameter_values, replace_definitions
import cal_tools.tools
from .finalize import tex_escape
from .notebooks import notebooks
from .settings import *
from .settings import (
default_report_path,
free_nodes_cmd,
launcher_command,
max_reserved,
preempt_nodes_cmd,
python_path,
reservation,
reservation_char,
sprof,
temp_path,
try_report_to_output,
)
PKG_DIR = os.path.dirname(os.path.abspath(__file__))
......@@ -115,7 +127,6 @@ def make_intelli_list(ltype):
super(IntelliListAction, self).__init__(*args, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
parsed_values = []
values = ",".join(values)
if isinstance(values, str):
......@@ -171,7 +182,7 @@ def extract_title_author_version(nb):
# In case of a standard installation a version is stored
# in the _version.py file
try:
git_dir = os.path.join(PKG_DIR, '..', '.git')
git_dir = os.path.join(PKG_DIR, '..', '..', '.git')
version = check_output([
'git', f'--git-dir={git_dir}', 'describe', '--tag',
], stderr=DEVNULL).decode('utf8')
......@@ -284,8 +295,7 @@ def balance_sequences(in_folder: str, run: int, sequences: List[int],
if isinstance(karabo_da, str):
karabo_da = [karabo_da]
elif not isinstance(karabo_da, list):
raise ValueError("Balance sequences expects "
"karabo_da as a string or list.")
raise TypeError("Balance sequences expects `karabo_da` as a string or list.")
in_path = Path(in_folder, f"r{run:04d}")
......@@ -305,8 +315,10 @@ def balance_sequences(in_folder: str, run: int, sequences: List[int],
if sequences != [-1]:
seq_nums = sorted(seq_nums.intersection(sequences))
if len(seq_nums) == 0:
raise ValueError(f"Selected sequences {sequences} are not "
f"available in {in_path}")
raise ValueError(
f"Selected sequences {sequences} are not "
f"available in {in_path}"
)
# Validate required nodes with max_nodes
nsplits = len(seq_nums) // sequences_per_node
......@@ -332,6 +344,7 @@ def make_extended_parser() -> argparse.ArgumentParser:
try:
det_notebooks = notebooks[detector]
except KeyError:
# TODO: This should really go to stderr not stdout
print("Not one of the known detectors: {}".format(notebooks.keys()))
sys.exit(1)
......@@ -394,7 +407,7 @@ def add_args_from_nb(nb, parser, cvar=None, no_required=False):
:param bool no_required: If True, none of the added options are required.
"""
parser.description = make_epilog(nb)
parms = extract_parameters(nb, lang='python3')
parms = extract_parameters(nb, lang='python')
for p in parms:
......@@ -663,7 +676,7 @@ def concurrent_run(temp_path, nb, nbname, args, cparm=None, cval=None,
suffix = flatten_list(cval)
# first convert the notebook
parms = extract_parameters(nb, lang='python3')
parms = extract_parameters(nb, lang='python')
if has_parm(parms, "cluster_profile"):
cluster_profile = f"{args['cluster_profile']}_{suffix}"
......@@ -673,7 +686,7 @@ def concurrent_run(temp_path, nb, nbname, args, cparm=None, cval=None,
params = parameter_values(parms, **args)
params = parameter_values(params, cluster_profile=cluster_profile)
new_nb = replace_definitions(nb, params, execute=False, lang='python3')
new_nb = replace_definitions(nb, params, execute=False, lang='python')
if not show_title:
first_markdown_cell(new_nb).source = ''
set_figure_format(new_nb, args["vector_figs"])
......@@ -682,9 +695,7 @@ def concurrent_run(temp_path, nb, nbname, args, cparm=None, cval=None,
os.path.basename(base_name), cparm, suffix)
nbpath = os.path.join(temp_path, new_name)
with open(nbpath, "w") as f:
f.write(nbconvert.exporters.export(
nbconvert.NotebookExporter, new_nb)[0])
nbformat.write(new_nb, nbpath)
# add finalization to the last job
if final_job:
......@@ -832,7 +843,7 @@ def run():
if ext_func is not None:
extend_params(nb, ext_func)
parms = extract_parameters(nb, lang='python3')
parms = extract_parameters(nb, lang='python')
title, author, version = extract_title_author_version(nb)
......
......@@ -233,4 +233,14 @@ notebooks = {
"cluster cores": 16},
},
},
"TEST": {
"TEST-CLI": {
"notebook": "notebooks/test/test-cli.ipynb",
"concurrency": {
"parameter": "concurrency_parameter",
"default concurrency": None,
"cluster cores": 1,
},
},
},
}
......@@ -17,7 +17,7 @@ try_report_to_output = True
logo_path = "xfel.pdf"
# the command to run this concurrently. It is prepended to the actual call
sprof = os.environ.get("XFELCALSLURM", "exfel")
sprof = os.environ.get("XFELCALSLURM", "exfel") # TODO: https://git.xfel.eu/gitlab/calibration/planning/issues/3
launcher_command = "sbatch -t 24:00:00 --requeue --output {temp_path}/slurm-%j.out"
free_nodes_cmd = "sinfo -p exfel -t idle -N --noheader | wc -l"
preempt_nodes_cmd = "squeue -p all,grid --noheader | grep max-exfl | egrep -v 'max-exfl18[3-8]|max-exfl100|max-exflg' | wc -l"
......
......@@ -10,7 +10,7 @@
\end{figure}
\vspace{0mm}
\Large \textbf{ Detector group }
\Large \textbf{ Data department }
Based on data sample: {{ data_path }}
......
from pathlib import Path
import pytest
def pytest_addoption(parser):
parser.addoption(
"--no-gpfs",
action="store_true",
default="false",
help="Skips tests marked as requiring GPFS access",
)
def pytest_configure(config):
config.addinivalue_line(
"markers", "requires_gpfs(): marks skips for tests that require GPFS access"
)
def pytest_runtest_setup(item):
if list(item.iter_markers(name="requires_gpfs")) and (
not Path("/gpfs").is_dir() or item.config.getoption("--no-gpfs")
):
pytest.skip("gpfs not available")
......@@ -21,6 +21,7 @@ def test_show_processed_modules():
assert 'LDP' in err.value()
@pytest.mark.requires_gpfs
def test_dir_creation_date():
folder = '/gpfs/exfel/exp/CALLAB/202031/p900113/raw'
......@@ -54,46 +55,48 @@ cal_db_interface = "tcp://max-exfl017:8020"
def test_get_pdu_from_db():
snapshot_at = "2021-05-06 00:20:10.00"
# A karabo_da str returns a list of one element.
pdu_dict = get_pdu_from_db(karabo_id="TEST_DET_CI-2",
karabo_da="TEST_DAQ_DA_01",
pdu_dict = get_pdu_from_db(karabo_id="TEST_DET_CAL_CI-1",
karabo_da="TEST_DET_CAL_DA0",
constant=constant,
condition=condition,
cal_db_interface=cal_db_interface,
snapshot_at="2021-03-01 09:44:00+00:00",
snapshot_at=snapshot_at,
timeout=30000)
assert len(pdu_dict) == 1
assert pdu_dict[0] == 'PHYSICAL_DETECTOR_UNIT-1_DO_NOT_DELETE'
assert pdu_dict[0] == "CAL_PHYSICAL_DETECTOR_UNIT-1_TEST"
# A list of karabo_das to return thier PDUs, if available.
pdu_dict = get_pdu_from_db(karabo_id="TEST_DET_CI-2",
karabo_da=["TEST_DAQ_DA_01", "TEST_DAQ_DA_02",
pdu_dict = get_pdu_from_db(karabo_id="TEST_DET_CAL_CI-1",
karabo_da=["TEST_DET_CAL_DA0", "TEST_DET_CAL_DA1",
"UNAVAILABLE_DA"],
constant=constant,
condition=condition,
cal_db_interface=cal_db_interface,
snapshot_at="2021-03-01 09:44:00+00:00",
snapshot_at=snapshot_at,
timeout=30000)
assert pdu_dict == ['PHYSICAL_DETECTOR_UNIT-1_DO_NOT_DELETE',
'PHYSICAL_DETECTOR_UNIT-2_DO_NOT_DELETE',
assert pdu_dict == ["CAL_PHYSICAL_DETECTOR_UNIT-1_TEST",
"CAL_PHYSICAL_DETECTOR_UNIT-2_TEST",
None]
# "all" is used to return all corresponding units for a karabo_id.
pdu_dict = get_pdu_from_db(karabo_id="TEST_DET_CI-2",
pdu_dict = get_pdu_from_db(karabo_id="TEST_DET_CAL_CI-1",
karabo_da="all",
constant=constant,
condition=condition,
cal_db_interface="tcp://max-exfl017:8020",
snapshot_at="2021-03-01 09:44:00+00:00",
cal_db_interface=cal_db_interface,
snapshot_at=snapshot_at,
timeout=30000)
assert len(pdu_dict) == 3
assert pdu_dict == ['PHYSICAL_DETECTOR_UNIT-1_DO_NOT_DELETE',
'PHYSICAL_DETECTOR_UNIT-2_DO_NOT_DELETE',
'PHYSICAL_DETECTOR_UNIT-3_DO_NOT_DELETE']
assert len(pdu_dict) == 2
assert pdu_dict == ["CAL_PHYSICAL_DETECTOR_UNIT-1_TEST",
"CAL_PHYSICAL_DETECTOR_UNIT-2_TEST"]
@pytest.mark.requires_gpfs
def test_initialize_from_db():
creation_time = datetime.strptime("2020-01-07 13:26:48.00",
"%Y-%m-%d %H:%M:%S.%f")
......
import pytest
from xfel_calibrate.calibrate import balance_sequences
def test_balance_sequences():
ret = balance_sequences(in_folder="/gpfs/exfel/exp/CALLAB/202031/p900113/raw", # noqa
run=9992, sequences=[0, 2, 5, 10, 20, 50, 100],
sequences_per_node=1, karabo_da=["all"],
max_nodes=8)
expected = [[0], [2]]
assert expected == ret
ret = balance_sequences(in_folder="/gpfs/exfel/exp/CALLAB/202031/p900113/raw", # noqa
run=9992, sequences=[-1],
sequences_per_node=1, karabo_da=["JNGFR01"],
max_nodes=3)
expected = []
assert expected == ret
with pytest.raises(ValueError) as e:
balance_sequences(in_folder="/gpfs/exfel/exp/CALLAB/202031/p900113/raw", # noqa
run=9992, sequences=[1991, 2021],
sequences_per_node=1, karabo_da=["all"],
max_nodes=3)
assert 'Selected sequences [1991, 2021]]' in e.value()
with pytest.raises(ValueError) as e:
balance_sequences(in_folder="/gpfs/exfel/exp/CALLAB/202031/p900113/raw", # noqa
run=9992, sequences=[1991, 2021],
sequences_per_node=1, karabo_da=-1,
max_nodes=3)
assert 'karabo_da as a string or list' in e.value()
......@@ -9,6 +9,7 @@ sys.path.insert(0, Path(__file__).parent / 'webservice')
from webservice.webservice import check_files, merge, parse_config, wait_on_transfer
@pytest.mark.requires_gpfs
def test_check_files():
in_folder = '/gpfs/exfel/exp/CALLAB/202031/p900113/raw'
runs = [9985, 9984]
......
# pylint: disable=missing-module-docstring
import sys
from pathlib import Path
from typing import Callable, Dict, List, NamedTuple, Optional, Union
from unittest import mock
import extra_data.tests.make_examples as make_examples
from pytest_subprocess import FakeProcess
from xfel_calibrate import calibrate, settings
class FakeProcessCalibrate(FakeProcess):
"""Class used to create a fake process which will mock calls to processes
expected to be called by pycalibration
"""
def __init__(self):
"""Sets up fake process for use with xfel calibrate
Fakes:
- Slurm free nodes call
- Slurm preempt nodes call
- Slurm sbatch call
- Git describe calls (for pulling repo version)
Expected use is something like:
```
@pytest.fixture(scope="class", autouse=True)
def fake_process_calibrate(self):
with FakeProcessCalibrate() as fake_process:
yield fake_process
```
"""
super().__init__()
# Fake calls to slurm
self.register_subprocess(settings.free_nodes_cmd, stdout=["1"])
self.register_subprocess(settings.preempt_nodes_cmd, stdout=["1"])
self.register_subprocess(
["sbatch", self.any()], stdout=["Submitted batch job 000000"]
)
# For the version insertion...
self.register_subprocess(
["git", self.any(), "describe", "--tag"], stdout=["0.0.0"]
)
self.keep_last_process(True)
class MockProposal:
"""Class used with pytest to create proposal directories with placeholder
files
Use this to generate a skeleton of the proposal structure for tests which
only rely on the files being present and not their contents, e.g. balancing
by sequences done in some notebooks
"""
def __init__(
self,
*,
tmp_path: Path,
runs: Union[int, list, slice],
instrument: str,
cycle: str,
proposal: str,
sequences: int = 2,
):
"""Create a MockProposal object, this should be used to create a fixture
Expected use is something like:
```python3
@pytest.fixture
def mock_proposal(tmp_path):
return MockProposal(tmp_path, runs=4)
```
Then `mock_proposal` can be passed as a fixture (or set to autouse) to
make the object available for individual or class tests
Args:
tmp_path (Path): Temporary path, should come from pytest fixture
runs (Union[int, list, slice]): Defines what run directories to make
instrument (str, optional): Instrument name, e.g. "AGIPD".
cycle (str, optional): Cycle, e.g. "202031".
proposal (str, optional): Proposal number, e.g. "p900113".
sequences (int, optional): Number of sequence files. Defaults to 2.
"""
self.tmp_path = tmp_path
self.instrument = instrument
self.cycle = cycle
self.proposal = proposal
self.sequences = list(
range(sequences)
) # TODO: Implement once it's in extra-data
self.path = tmp_path / instrument / cycle / proposal
self.path_raw = self.path / "raw"
self.path_proc = self.path / "proc"
self.runs = self.create_runs(runs)
def create_runs(self, runs: Union[int, List[int], slice]) -> Dict[int, Path]:
"""Create run directories with skeleton files for the proposal
Args:
runs (Union[int, list, slice]): Defines what run directories to make
Returns:
[Dict[int, Path]]: Dictionary of the run number and run directory
"""
if isinstance(runs, int):
runs = list(range(runs))
elif isinstance(runs, list):
if not all(isinstance(r, int) for r in runs):
raise TypeError("lists of runs must contain only integers")
elif isinstance(runs, slice):
if runs.stop:
runs = list(range(runs.stop)[runs])
else:
raise ValueError("runs slice must have a stop value")
else:
raise TypeError("runs must be an int, list of integers, or a slice")
run_directories = {run: self.path_raw / f"r{run:04d}" for run in runs}
for run_path in run_directories.values():
run_path.mkdir(parents=True)
self._create_run(run_path)
return run_directories
@property
def _create_run(self) -> Callable:
"""Return the number of modules for a detector type
Returns:
modules [Iterable]: Number of modules for a detector type
"""
return {
"AGIPD": make_examples.make_spb_run,
"DSSC": make_examples.make_scs_run,
"JGFR": make_examples.make_jungfrau_run,
"LPD": make_examples.make_fxe_run,
"None": lambda x: None,
}[self.instrument]
class CalibrateCall:
"""Class used with pytest to create call `xfel-calibrate` and log its output
Use this to manage calls to `xfel-calibrate` via pytest fixtures
"""
def __init__(
self,
tmp_path,
capsys,
*,
detector: str,
cal_type: str,
command: str = "xfel-calibrate",
in_folder: Optional[Path] = None,
out_folder: Optional[Path] = None,
extra_args: List[str] = None,
):
"""Create a CallibrateCall object, this should be used to create a fixture
Expected use is something like:
```python3
@pytest.fixture(scope="function")
def calibrate_call(self, mock_proposal: MockProposal, capsys, tmp_path):
return CalibrateCall(
tmp_path,
capsys,
in_folder=mock_proposal.path_raw,
out_folder=mock_proposal.path_proc,
command="xfel-calibrate",
detector="AGIPD",
cal_type="CORRECT",
extra_args=["--run", "0"],
)
```
Args:
tmp_path ([type]): Temporary path, should come from pytest fixture
capsys ([type]): capsys path, should come from pytest fixture
detector (str): Detector passed to the command, e.g. AGIPD
cal_type (str): Calibration type passed to the command, e.g. CORRECT
command (str): Main entrypoint called. Defaults to "xfel-calibrate".
in_folder (Optional[Path], optional): Path to the input folder, usually
raw. Defaults to None.
out_folder (Optional[Path], optional): Path to the output folder, usually
proc. Defaults to None.
extra_args (List[str], optional): Additional arguments to pass to the
command. Defaults to None.
"""
self.tmp_path = tmp_path
self.in_folder = in_folder
self.out_folder = out_folder
self.args = []
self.args.extend([command, detector, cal_type])
if in_folder:
self.args.extend(["--in-folder", str(self.in_folder)])
if out_folder:
self.args.extend(["--out-folder", str(self.out_folder)])
if extra_args:
self.args.extend(extra_args)
with mock.patch.object(sys, "argv", self.args):
with mock.patch.object(calibrate, "temp_path", str(tmp_path)):
calibrate.run()
out, err = capsys.readouterr()
self.out: str = out
self.err: str = err
Paths = NamedTuple(
"Paths",
[
("notebooks", List[Path]),
("run_calibrate", Path),
("finalize", Path),
("InputParameters", Path),
],
)
self.paths = Paths(
notebooks=list(self.tmp_path.glob("**/*/*.ipynb")),
run_calibrate=list(self.tmp_path.glob("**/*/run_calibrate.sh"))[0],
finalize=list(self.tmp_path.glob("**/*/finalize.py"))[0],
InputParameters=list(self.tmp_path.glob("**/*/InputParameters.rst"))[0],
)
# TODO: These are unit tests, `test_cli.py` contains integration tests, may be
# worth splitting these up in the future so that it's easier to track
# what's-what, track the coverage of both approaches individually, and run them
# independently from each other
import pytest
from xfel_calibrate.calibrate import balance_sequences
@pytest.mark.parametrize(
"karabo_da,sequences,expected",
[
pytest.param(
"all",
[0, 2, 5, 10, 20, 50, 100],
[[0], [2]],
marks=pytest.mark.requires_gpfs(),
),
("JNGFR01", [-1], []),
],
)
def test_balance_sequences(karabo_da, sequences, expected):
ret = balance_sequences(
in_folder="/gpfs/exfel/exp/CALLAB/202031/p900113/raw",
run=9992,
sequences=sequences,
sequences_per_node=1,
karabo_da=karabo_da,
max_nodes=8,
)
assert ret == expected
@pytest.mark.parametrize(
"karabo_da,sequences,expected",
[
pytest.param("all", [1991, 2021], ValueError, marks=pytest.mark.requires_gpfs()),
(-1, [], TypeError),
],
)
def test_balance_sequences_raises(karabo_da, sequences, expected):
with pytest.raises(expected):
balance_sequences(
in_folder="/gpfs/exfel/exp/CALLAB/202031/p900113/raw",
run=9992,
sequences=sequences,
sequences_per_node=1,
karabo_da=karabo_da,
max_nodes=8,
)
# pylint: disable=missing-class-docstring, missing-function-docstring, no-self-use
"""Tests for the CLI portion of `xfel_calibrate`
These tests cover the CLI interface which is called by the `xfel-calibrate ...`
entrypoint. Some sections of the `calibrate.py` file are still not covered by
the current test cases, this should be improved later on.
"""
import ast
import shlex
import sys
from datetime import date
from pathlib import Path
from unittest import mock
import nbformat
import pytest
from nbparameterise import extract_parameters
import xfel_calibrate.calibrate as calibrate
from tests.test_xfel_calibrate.conftest import (
CalibrateCall,
FakeProcessCalibrate,
MockProposal,
)
class TestBasicCalls:
"""Tests which only call the command line utility `xfel-calibrate` and check
that the expected output is present in stdout
"""
@mock.patch.object(sys, "argv", ["xfel-calibrate", "--help"])
def test_help(self, capsys):
with pytest.raises(SystemExit):
calibrate.run()
out, err = capsys.readouterr()
# Should always be present in these help outputs
assert "positional arguments:" in out
assert "optional arguments:" in out
assert err == ""
@mock.patch.object(sys, "argv", ["xfel-calibrate", "TEST", "-h"])
def test_help_detector(self, capsys):
with pytest.raises(SystemExit):
calibrate.run()
out, err = capsys.readouterr()
assert "Notebook for use with the unit and continuous integration" in out
assert "tests." in out
assert err == ""
@mock.patch.object(sys, "argv", ["xfel-calibrate", "NotADetector", "beep", "-h"])
def test_unknown_detector(self, capsys):
with pytest.raises(SystemExit) as exit_exception:
calibrate.run()
out, err = capsys.readouterr()
assert exit_exception.value.code == 1
assert "Not one of the known calibrations or detectors" in out
assert err == ""
@mock.patch.object(sys, "argv", ["xfel-calibrate", "NotADetector", "-h"])
def test_unknown_detector_h(self, capsys):
with pytest.raises(SystemExit) as exit_exception:
calibrate.run()
out, err = capsys.readouterr()
assert exit_exception.value.code == 1
assert "Not one of the known detectors" in out
assert err == ""
@mock.patch.object(sys, "argv", ["xfel-calibrate", "Tutorial", "TEST", "--help"])
def test_help_nb(self, capsys):
with pytest.raises(SystemExit):
calibrate.run()
out, err = capsys.readouterr()
# Should always be present in these help outputs
assert "positional arguments:" in out
assert "optional arguments:" in out
# Defined in the test notebook, should be propagated to help output
assert "sensor-size" in out
assert "random-seed" in out
assert err == ""
class TestTutorialNotebook:
"""Checks calling `xfel-calibrate` on the `Tutorial TEST` notebook, looks
at the stdout as well as files generated by the call
"""
@pytest.fixture(scope="class", autouse=True)
def fake_process_calibrate(self):
with FakeProcessCalibrate() as fake_process:
yield fake_process
@pytest.fixture(scope="class")
def mock_proposal(self, tmp_path_factory):
return MockProposal(
tmp_path=tmp_path_factory.mktemp("exp"),
instrument="None",
cycle="000000",
proposal="p000000",
runs=0,
sequences=0,
)
@pytest.fixture(scope="function")
def calibrate_call(
self,
mock_proposal: MockProposal,
capsys,
tmp_path,
):
return CalibrateCall(
tmp_path,
capsys,
out_folder=mock_proposal.path_proc,
detector="Tutorial",
cal_type="TEST",
extra_args=["--runs", "1000"],
)
def test_call(
self,
calibrate_call: CalibrateCall,
):
assert "sbatch" in calibrate_call.out
assert "--job-name xfel_calibrate" in calibrate_call.out
assert str(calibrate_call.tmp_path) in calibrate_call.out
assert "Submitted job: " in calibrate_call.out
assert calibrate_call.err == ""
def test_expected_processes_called(
self,
fake_process_calibrate: FakeProcessCalibrate,
):
process_calls = [
list(shlex.shlex(p, posix=True, punctuation_chars=True))
if isinstance(p, str)
else p
for p in fake_process_calibrate.calls
]
processes_called = [p[0] for p in process_calls] # List of the process names
assert "sbatch" in processes_called
@pytest.mark.skip(reason="not implemented")
def test_output_metadata_yml(self):
# TODO: Finish this test later, not a priority
# metadata_yml_path = list(self.tmp_path.glob("**/calibration_metadata.yml"))
pass
def test_output_ipynb(self, calibrate_call: CalibrateCall):
notebook_path = calibrate_call.paths.notebooks
assert len(notebook_path) == 1
with notebook_path[0].open() as file:
notebook = nbformat.read(file, as_version=4)
parameters = {p.name: p.value for p in extract_parameters(notebook)}
assert parameters["out_folder"] == str(calibrate_call.out_folder)
assert parameters["sensor_size"] == [10, 30]
assert parameters["random_seed"] == [2345]
assert parameters["runs"] == 1000
def test_output_finalize(
self, mock_proposal: MockProposal, calibrate_call: CalibrateCall
):
# TODO: Specify `feature_version` once we run on python 3.8+
finalize_ast = ast.parse(calibrate_call.paths.finalize.read_text())
today = date.today()
expected_equals = {
"joblist": [],
"project": "Tutorial Calculation",
"calibration": "Tutorial Calculation",
"author": "Astrid Muennich",
"version": "0.0.0",
"data_path": "",
}
expected_contains = {
"request_time": str(today),
"submission_time": str(today),
"run_path": str(calibrate_call.tmp_path),
# TODO: add a test checking that the out folder is correct
# reffer to: https://git.xfel.eu/gitlab/detectors/pycalibration/issues/52
"out_path": str(mock_proposal.path_proc),
"report_to": str(mock_proposal.path_proc),
}
# Pull the keyword arguments out of the finalize function call via the AST,
# here we use the keys in `expected_...` to filter which kwargs are parsed
# as some cannot be read
finalize_kwargs = {
k.arg: ast.literal_eval(k.value)
for k in ast.walk(finalize_ast)
if isinstance(k, ast.keyword)
and (k.arg in expected_equals or k.arg in expected_contains)
}
for k, v in expected_equals.items():
assert v == finalize_kwargs[k]
for k, v in expected_contains.items():
assert v in finalize_kwargs[k]
@pytest.mark.skip(reason="not implemented")
def test_output_rst(self, calibrate_call: CalibrateCall):
# TODO: Finish this test later, not a priority
# rst_path = calibrate_call.paths.InputParameters
pass
def test_output_sh(self, calibrate_call: CalibrateCall):
cmd = list(
shlex.shlex(
calibrate_call.paths.run_calibrate.read_text(),
posix=True,
punctuation_chars=True,
)
)
assert (
cmd[0] == "xfel-calibrate"
), f"{calibrate_call.paths.run_calibrate} does not call `xfel-calibrate`"
assert cmd[1:3] == ["Tutorial", "TEST"]
assert {"--out-folder", str(calibrate_call.out_folder)}.issubset(cmd)
assert {"--runs", "1000"}.issubset(cmd)
class TestIntelliList:
@pytest.fixture(scope="class", autouse=True)
def fake_process_calibrate(self):
with FakeProcessCalibrate() as fake_process:
yield fake_process
@pytest.fixture(scope="class")
def mock_proposal(self, tmpdir_factory):
return MockProposal(
tmp_path=Path(tmpdir_factory.mktemp("exp")),
instrument="AGIPD",
cycle="202031",
proposal="p900113",
runs=1,
sequences=1,
)
@pytest.fixture(scope="function")
def calibrate_call(self, mock_proposal: MockProposal, capsys, tmp_path):
return CalibrateCall(
tmp_path,
capsys,
in_folder=mock_proposal.path_raw,
out_folder=mock_proposal.path_proc,
detector="TEST",
cal_type="TEST-CLI",
extra_args=[
"--number",
"10",
"--list-normal",
"1,2,10",
"--list-intellilist",
"1,2,5-8",
"--concurrency-parameter",
"0,1",
],
)
def test_intellilist(self, calibrate_call: CalibrateCall):
assert "--number" in calibrate_call.args
assert "--list-intellilist" in calibrate_call.args
assert "1,2,5-8" in calibrate_call.args
assert len(calibrate_call.paths.notebooks) == 2
for i, notebook_path in enumerate(calibrate_call.paths.notebooks):
with notebook_path.open() as file:
notebook = nbformat.read(file, as_version=4)
parameters = {p.name: p.value for p in extract_parameters(notebook)}
assert parameters["number"] == 10
assert parameters["list_normal"] == [1, 2, 10]
assert parameters["list_intellilist"] == [1, 2, 5, 6, 7]
assert parameters["concurrency_parameter"][0] == i
class TestAgipdNotebook:
@pytest.fixture(scope="class", autouse=True)
def fake_process_calibrate(self):
with FakeProcessCalibrate() as fake_process:
yield fake_process
@pytest.fixture(scope="function")
def mock_proposal(self, tmpdir_factory):
return MockProposal(
tmp_path=Path(tmpdir_factory.mktemp("exp")),
instrument="AGIPD",
cycle="202031",
proposal="p900113",
runs=2,
# TODO: update this once extra-data tests can have variable sequences
# sequences=5,
)
@pytest.fixture(scope="function")
def calibrate_call(self, mock_proposal: MockProposal, capsys, tmp_path):
return CalibrateCall(
tmp_path,
capsys,
in_folder=mock_proposal.path_raw,
out_folder=mock_proposal.path_proc / "r0000",
detector="AGIPD",
cal_type="CORRECT",
extra_args=[
"--run",
"0",
"--sequences",
"1-3",
# TODO: enable this when notebook execution tests are ready to be ran
# "--no-cluster-job",
],
)
@pytest.mark.skip(reason="not implemented")
def test_out_folder_correct(self):
# TODO: add a test checking that the out folder is correct
# reffer to: https://git.xfel.eu/gitlab/detectors/pycalibration/issues/52
pass
@pytest.mark.skip(reason="requires extra-data test file sequence options")
def test_files_present(self, calibrate_call: CalibrateCall):
# There should be three notebooks: one pre, then the main, then one post
assert len(calibrate_call.paths.notebooks) == 3
# This is pretty fragile, but the name of notebooks should not change
# (too) often
root_nb_path = calibrate_call.paths.notebooks[0].parent
notebooks = [
root_nb_path / "AGIPD_Correct_and_Verify__sequences__1.ipynb",
root_nb_path / "AGIPD_Correct_and_Verify_Summary_NBC__None__None.ipynb",
root_nb_path / "AGIPD_Retrieve_Constants_Precorrection__None__None.ipynb",
]
assert all(nb in calibrate_call.paths.notebooks for nb in notebooks)
@pytest.mark.skip(reason="not implemented")
def test_nb_sequences(self, calibrate_call: CalibrateCall):
notebook_path = (
calibrate_call.paths.notebooks[0].parent
/ "AGIPD_Correct_and_Verify__sequences__1.ipynb"
)
with notebook_path.open() as file:
notebook = nbformat.read(file, as_version=4)
parameters = {p.name: p.value for p in extract_parameters(notebook)}
# TODO: add test cases for this notebook
print(parameters)
@pytest.mark.skip(reason="not implemented")
def test_nb_summary(self, calibrate_call: CalibrateCall):
notebook_path = (
calibrate_call.paths.notebooks[0].parent
/ "AGIPD_Correct_and_Verify_Summary_NBC__None__None.ipynb"
)
with notebook_path.open() as file:
notebook = nbformat.read(file, as_version=4)
parameters = {p.name: p.value for p in extract_parameters(notebook)}
# TODO: add test cases for this notebook
print(parameters)
@pytest.mark.skip(reason="not implemented")
def test_nb_precorrection(self, calibrate_call: CalibrateCall):
notebook_path = (
calibrate_call.paths.notebooks[0].parent
/ "AGIPD_Retrieve_Constants_Precorrection__None__None.ipynb"
)
with notebook_path.open() as file:
notebook = nbformat.read(file, as_version=4)
# TODO: add test cases for this notebook
parameters = {p.name: p.value for p in extract_parameters(notebook)}
print(parameters)
......@@ -1087,11 +1087,18 @@ class ActionsServer:
request_time
)
await update_mdc_status(self.mdc, 'dark_request', rid, ret)
if report_path is None:
if len(report_path) == 0:
logging.warning("Failed to identify report path for dark_request")
else:
await update_darks_paths(self.mdc, rid, in_folder,
out_folder, report_path)
if len(report_path) > 1:
logging.warning(
"More than one report path is returned. "
"Updating myMDC with the first report path only."
)
await update_darks_paths(
self.mdc, rid, in_folder,
out_folder, report_path[0]
)
# END of part to run after sending reply
asyncio.ensure_future(_continue())
......@@ -1162,7 +1169,9 @@ class ActionsServer:
async def launch_jobs(
self, run_nrs, rid, detectors, action, instrument, cycle, proposal,
request_time
) -> (str, Optional[str]):
) -> (str, List[str]):
report = []
ret = []
# run xfel_calibrate
for karabo_id, dconfig in detectors.items():
detector = dconfig['detector-type']
......@@ -1179,16 +1188,16 @@ class ActionsServer:
).split()
cmd = parse_config(cmd, dconfig)
ret = await run_action(self.job_db, cmd, self.mode,
proposal, run_nrs[-1], rid)
# TODO: Add detector info in returned run action status.
ret.append(await run_action(
self.job_db, cmd, self.mode,
proposal, run_nrs[-1], rid
))
if '--report-to' in cmd[:-1]:
report_idx = cmd.index('--report-to') + 1
report = cmd[report_idx] + '.pdf'
else:
report = None
return ret, report
report.append(cmd[report_idx] + '.pdf')
# return string without a tailing comma.
return ", ".join(ret), report
parser = argparse.ArgumentParser(
description='Start the calibration webservice')
......