Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • calibration/pycalibration
1 result
Show changes
Showing
with 967 additions and 654 deletions
......@@ -293,7 +293,7 @@ async def server_runner(conf_file, mode):
try:
output = await asyncio.create_subprocess_shell(
" ".join(run_base), stdout=PIPE, stderr=PIPE)
launched_jobs.append(output.communicate())
logging.info('Submission information: {}:'
......@@ -402,7 +402,7 @@ async def server_runner(conf_file, mode):
break
arg_parser = argparse.ArgumentParser(description='Start the report service')
arg_parser.add_argument('--config-file', type=str,
arg_parser.add_argument('--config-file', type=str,
default='./report_conf.yaml',
help='config file path with '
'reportservice port. '
......
git+file:///gpfs/exfel/sw/calsoft/git/cal_db_interactive@2.0.0
git+file:///gpfs/exfel/sw/calsoft/git/cal_db_interactive@2.0.1
git+file:///gpfs/exfel/sw/calsoft/git/nbparameterise@0.3
git+file:///gpfs/exfel/sw/calsoft/git/pyDetLib@2.5.6-2.10.0#subdirectory=lib
Cython == 0.29.21
Jinja2 == 2.11.2
astcheck == 0.2.5
astsearch == 0.1.3
Cython == 0.29.21
dill == 0.3.0
extra_data == 1.2.0
extra_geom == 1.1.1
......@@ -11,22 +12,22 @@ fabio == 0.9.0
gitpython == 3.1.0
h5py == 2.10.0
iminuit == 1.3.8
ipyparallel == 6.2.4
ipykernel == 5.1.4
ipyparallel == 6.2.4
ipython == 7.12.0
ipython_genutils == 0.2.0
Jinja2 == 2.11.2
jupyter-core == 4.6.1
jupyter_client == 6.1.7
jupyter_console == 6.1.0
jupyter-core == 4.6.1
karabo_data == 0.7.0
lxml == 4.5.0
metadata_client == 3.0.8
nbclient == 0.5.1
nbconvert == 5.6.1
nbformat == 5.0.7
notebook == 6.1.5
notebook == 6.1.5
numpy == 1.19.1
pre-commit == 2.10.0
prettytable == 0.7.2
princess == 0.2
pypandoc == 1.4
......
......@@ -68,7 +68,7 @@ setup(
'xfel_calibrate': ['bin/*.sh'] + data_files + ['titlepage.tmpl',
'xfel.pdf']
},
cmdclass={
'build' : PreInstallCommand,
'install': PostInstallCommand,
......@@ -81,10 +81,9 @@ setup(
description='',
entry_points = {
'console_scripts': [
'xfel-calibrate = xfel_calibrate.calibrate:run',
],
'xfel-calibrate = xfel_calibrate.calibrate:run',
],
},
ext_modules=extensions
)
)
......@@ -283,7 +283,7 @@ class CorrectionTestBase:
print("Executing {}".format(" ".join(cmd)))
print("Creating data paths for artefacts")
cls.artefact_dir = get_artefact_dir(cls)
if not os.path.exists(cls.artefact_dir):
......@@ -333,10 +333,10 @@ class CorrectionTestBase:
"""
out_folder = self._output_to_path()
files_to_check = glob.glob(
"{}/*{}".format(out_folder, self.rel_file_ext))
"{}/*{}".format(out_folder, self.rel_file_ext))
for fname in files_to_check:
with h5py.File(fname, "r") as f:
d = {}
def visitor(k, item):
......@@ -344,20 +344,20 @@ class CorrectionTestBase:
d[k] = item.fletcher32
f.visititems(visitor)
chkfname = "{}.checksum".format(fname)
chkpath = "{}/{}".format(self.artefact_dir,
os.path.basename(chkfname))
with open(chkpath, 'wb') as fc:
pickle.dump(d, fc, pickle.HIGHEST_PROTOCOL)
pickle.dump(d, fc, pickle.HIGHEST_PROTOCOL)
@unittest.skipIf(args.skip_checksum,
"User requested to skip checksum test")
def test_checksums(self):
""" Compare Fletcher32 checksums of notebook's output with artefacts
This test will verify if datasets with checksums are identical.
Even for small changes in the correction logic this test is likely
This test will verify if datasets with checksums are identical.
Even for small changes in the correction logic this test is likely
to fail.
If this is the case, it is recommended to verify correctness using
the other tests, which inspect data, and the create new checksums
......@@ -379,12 +379,12 @@ class CorrectionTestBase:
"No comparison checksums found")
with open(chkpath, 'rb') as fc:
d = pickle.load(fc)
with h5py.File(fname, "r") as f:
def visitor(k, item):
if isinstance(item, h5py.Dataset):
msg = "Verify checksum of: {}".format(k)
with self.subTest(msg=msg):
self.assertIn(k, d)
......@@ -578,7 +578,7 @@ class CorrectionTestBase:
_, last_train = rd.train_from_id(rd.train_ids[-1])
test_train_info(last_train, "last_train")
@unittest.skipIf(args.skip_karabo_data,
"User requested to skip karabo data test")
def test_karabo_data_self_test(self):
......@@ -592,7 +592,7 @@ class CorrectionTestBase:
"User requested to skip report generation test")
def test_report_gen(self):
""" Verify expected reports are generated
Also verifies that no additional reports are present, and copies
the report to the artefact dir.
"""
......@@ -610,4 +610,3 @@ class CorrectionTestBase:
pdfs = glob.glob("{}/*.pdf".format(out_folder))
for pdf in pdfs:
self.assertIn(os.path.basename(pdf), self.expected_reports)
......@@ -4,4 +4,3 @@ They are broken and haven't been looked at. Some may be fixed, some are deprecat
This directory is excluded from the CI runner.
It does not mean that they can be freely deleted. Each test file should be assayed and fixed, if possible!
import pytest
from xfel_calibrate.calibrate import balance_sequences
def test_balance_sequences():
ret = balance_sequences(in_folder="/gpfs/exfel/exp/CALLAB/202031/p900113/raw", # noqa
run=9992, sequences=[0, 2, 5, 10, 20, 50, 100],
sequences_per_node=1, karabo_da=["all"],
max_nodes=8)
expected = [[0], [2]]
assert expected == ret
ret = balance_sequences(in_folder="/gpfs/exfel/exp/CALLAB/202031/p900113/raw", # noqa
run=9992, sequences=[-1],
sequences_per_node=1, karabo_da=["JNGFR01"],
max_nodes=3)
expected = []
assert expected == ret
with pytest.raises(ValueError) as e:
balance_sequences(in_folder="/gpfs/exfel/exp/CALLAB/202031/p900113/raw", # noqa
run=9992, sequences=[1991, 2021],
sequences_per_node=1, karabo_da=["all"],
max_nodes=3)
assert 'Selected sequences [1991, 2021]]' in e.value()
with pytest.raises(ValueError) as e:
balance_sequences(in_folder="/gpfs/exfel/exp/CALLAB/202031/p900113/raw", # noqa
run=9992, sequences=[1991, 2021],
sequences_per_node=1, karabo_da=-1,
max_nodes=3)
assert 'karabo_da as a string or list' in e.value()
import sys
from pathlib import Path
from unittest import mock
import pytest
from testpath import MockCommand
sys.path.insert(0, Path(__file__).parent / 'webservice')
from webservice.webservice import check_files # noqa
from webservice.webservice import (check_files, merge, parse_config,
wait_on_transfer)
def test_check_files():
......@@ -23,3 +26,54 @@ def test_check_files():
with pytest.raises(PermissionError):
in_folder = '/gpfs/maxwell/home/achilles' # arbitrarily chosen
check_files(in_folder, runs, karabo_das)
def test_merge():
a = {'some': {'key': {'akey': 'avalue', 'number': 1}}}
b = {'some': {'key': {'anotherkey': 'anothervalue', 'number': 5}},
'completely': 'different'}
ret = merge(a, b)
expected = {'some': {'key': {'akey': 'avalue',
'anotherkey': 'anothervalue',
'number': 1}},
'completely': 'different'}
assert ret == expected
def test_parse_config():
cmd = ['whatever']
config = {'somebool': True,
'notsomebool': False,
'alist': [1, 2, 3],
'some_empty_key': '""',
'other_empty_key': "''",
'brian': 'scone'}
expected = ['whatever', '--somebool', '--alist', '1', '2', '3',
'--some_empty_key', '', '--other_empty_key', '',
'--brian', 'scone']
config = parse_config(cmd, config)
assert config == expected
assert '--notsomebool' not in config
with pytest.raises(ValueError):
config = {'some key': 'value'}
config = parse_config(cmd, config)
with pytest.raises(ValueError):
config = {'somekey': 'a value'}
config = parse_config(cmd, config)
@pytest.mark.asyncio
async def test_wait_on_transfer(tmp_path):
mock_getfattr = MockCommand(
'getfattr',
content="""#!{}\nprint('user.status="dCache"')""".format(sys.executable)
)
with mock_getfattr:
res = await wait_on_transfer(str(tmp_path), max_tries=1)
assert res is True
......@@ -2,7 +2,7 @@ Offline Calibration Webservice
==============================
The offline calibration webservice interacts with the Metadata Catalogue (MDC),
such that migration of data to the offline cluster automatically triggers
such that migration of data to the offline cluster automatically triggers
calibration jobs on relevant files.
Installation
......@@ -18,7 +18,7 @@ The service needs to be installed under a functional user account which
* has write permission to the *proc* folders for outputting corrected data
* is allowed to launch SLURM jobs on the cluster
The hosting system needs to be accessible via ZMQ calls from the MDC.
The hosting system needs to be accessible via ZMQ calls from the MDC.
This requires appropriate DMZ settings. Additionally, it needs to be able
to interact with the MDC via the MDC client interface
......@@ -32,10 +32,10 @@ Additionally, the *xfel-calibrate* environment needs to be installed:
``` bash
git clone https://git.xfel.eu/gitlab/detectors/pycalibration.git .
```
2. pick the python environment to install into. On Maxwell the anaconda/3
environment will work:
``` bash
module load anaconda/3
```
......@@ -48,7 +48,7 @@ Additionally, the *xfel-calibrate* environment needs to be installed:
4. some correction notebooks require pyDetLib. It requires manual installation in
a non-Karabo python environment
``` bash
mkdir pydetlib
cd pydetlib
......@@ -57,19 +57,19 @@ Additionally, the *xfel-calibrate* environment needs to be installed:
pip install --user pycuda
pip install --user ./lib/
cd ..
5. install the separate requirements for the webservice:
``` bash
cd webservice
pip install --user -r requirements.txt
```
6. install the metadata_client library, according to instructions at
https://git.xfel.eu/gitlab/ITDM/metadata_client
You are now good to go.
Configuration
......@@ -84,7 +84,7 @@ In the **config-repo** section, the configuration repository needs to be configu
config-repo:
url: https://git.xfel.eu/gitlab/detectors/calibration_configurations.git
local-path: /home/haufs/calibration_config/
```
```
Here you should prepend the *url* entry with a gitlab access token, that provides access
to the calibration_configurations repository.
......@@ -108,9 +108,9 @@ In the **metadata-client** section, the client interface to the MDC is configure
``` YAML
metadata-client:
user-id:
user-secret:
user-email:
user-id:
user-secret:
user-email:
metadata-web-app-url: 'https://in.xfel.eu/metadata'
metadata-web-app-url: 'https://in.xfel.eu/metadata'
token-url: 'https://in.xfel.eu/metadata/oauth/token'
......@@ -153,5 +153,5 @@ Use
``` bash
python webservice.py --help
```
to display a list of available options.
\ No newline at end of file
to display a list of available options.
......@@ -24,5 +24,3 @@ stuff = [action, dark_run_id, sase, instrument, cycle, proposal, 'SPB_DET_AGIPD1
socket.send(str(stuff).encode())
resp = socket.recv_multipart()[0]
print(resp.decode())
......@@ -13,6 +13,7 @@ class Errors:
MDC_RESPONSE = "FAILED: Response error from MDC: {}"
NOT_CONFIGURED = "FAILED: instrument not configured, please contact det-support@xfel.eu"
NOT_SUBMITTED = "FAILED: correction of {} failed during submision, please contact det-support@xfel.eu"
OTHER_ERROR = "FAILED: Error {}, please contact det-support@xfel.eu"
class MDC:
......
......@@ -19,9 +19,9 @@ shell-commands:
cat-log: "cat web.log"
run-candidates:
- "--run-high"
- "--run-high"
- "--run-med"
- "--run-low"
- "--run-low"
- "--run"
server-config:
......
......@@ -24,4 +24,3 @@ for r in c.fetchall():
rid, jobid, db_proposal, db_run, status, time, _, _ = r
if db_proposal == proposal and db_run == run:
print(r)
......@@ -8,4 +8,4 @@
{% for run_name in runs %}
<label >{{run_name}}:</label>
<input type="number" id="run{{loop.index}}" name="{{run_name}}" min="1" max="999999" size="4">
{% endfor %}
\ No newline at end of file
{% endfor %}
......@@ -3,4 +3,4 @@
<div class="log-out">
{{ logout }}
</div>
</div>
\ No newline at end of file
</div>
......@@ -39,4 +39,3 @@ if response.status_code == 200:
print('Run is updated')
else:
print(f'Update failed {response}')
This diff is collapsed.
......@@ -31,7 +31,7 @@ correct:
--slurm-scheduling {sched_prio}
--slurm-mem 750
--request-time {request_time}
--slurm-name {action}_{instrument}_{detector}_{cycle}_p{proposal}_r{runs}
--slurm-name {action}_{instrument}_{detector}_{cycle}_p{proposal}_{runs}
--report-to /gpfs/exfel/exp/{instrument}/{cycle}/p{proposal}/usr/Reports/{runs}/{det_instance}_{action}_{proposal}_{runs}_{time_stamp}
--cal-db-timeout 300000
--cal-db-interface tcp://max-exfl016:8015#8044
......@@ -45,7 +45,7 @@ dark:
--concurrency-par karabo_da
--slurm-scheduling {sched_prio}
--request-time {request_time}
--slurm-name {action}_{instrument}_{detector}_{cycle}_p{proposal}_r{runs}
--slurm-name {action}_{instrument}_{detector}_{cycle}_p{proposal}_{runs}
--report-to /gpfs/exfel/d/cal/caldb_store/xfel/reports/{instrument}/{det_instance}/{action}/{action}_{proposal}_{runs}_{time_stamp}
--cal-db-interface tcp://max-exfl016:8015#8044
--db-output
This diff is collapsed.
This diff is collapsed.
......@@ -45,7 +45,7 @@ notebooks = {
"cluster cores": 8},
},
"FF_HISTS": {
"notebook":
"notebook":
"notebooks/AGIPD/AGIPD_FF_Histogramming.ipynb",
"concurrency": {"parameter": "modules",
"default concurrency": list(range(16)),
......