Skip to content
Snippets Groups Projects
Commit a5fcf4dd authored by Karim Ahmed's avatar Karim Ahmed
Browse files

Merge branch 'feat/test_logs' into 'master'

move some logs to DEBUG and extend the report sleep

See merge request !896
parents 34c5c4db 5fb8ad7a
No related branches found
No related tags found
1 merge request!896move some logs to DEBUG and extend the report sleep
...@@ -152,23 +152,23 @@ def find_differences( ...@@ -152,23 +152,23 @@ def find_differences(
assert set(datasets_f1) == set(datasets_f2), f"{test_file} and {reference_file} consists of different datasets." # noqa assert set(datasets_f1) == set(datasets_f2), f"{test_file} and {reference_file} consists of different datasets." # noqa
assert set(objects_f1) == set(objects_f2), f"{test_file} and {reference_file} consists of different datasets." # noqa assert set(objects_f1) == set(objects_f2), f"{test_file} and {reference_file} consists of different datasets." # noqa
duration = time.perf_counter() - start_time duration = time.perf_counter() - start_time
LOGGER.info("Elapsed time comparing groups, " LOGGER.debug("Elapsed time comparing groups, "
f"datasets, and objects: {duration} seconds") f"datasets, and objects: {duration} seconds")
LOGGER.info("Groups, datasets, and objects have the same content.") LOGGER.debug("Groups, datasets, and objects have the same content.")
# Compare datasets and objects. # Compare datasets and objects.
start_time = time.perf_counter() start_time = time.perf_counter()
h5_diff_datasets = compare_datasets(file1, file2, datasets_f1) h5_diff_datasets = compare_datasets(file1, file2, datasets_f1)
duration = time.perf_counter() - start_time duration = time.perf_counter() - start_time
LOGGER.info(f"Elapsed time comparing datasets: {duration} seconds") LOGGER.debug(f"Elapsed time comparing datasets: {duration} seconds")
start_time = time.perf_counter() start_time = time.perf_counter()
h5_diff_objects = compare_objects(file1, file2, objects_f1) h5_diff_objects = compare_objects(file1, file2, objects_f1)
LOGGER.info(f"Elapsed time comparing objects: {duration} seconds") LOGGER.debug(f"Elapsed time comparing objects: {duration} seconds")
assert not h5_diff_datasets, f"{[d for d in h5_diff_datasets]} datasets contain different values for {test_file} and {reference_file}" # noqa assert not h5_diff_datasets, f"{[d for d in h5_diff_datasets]} datasets contain different values for {test_file} and {reference_file}" # noqa
LOGGER.info("Datasets are validated.") LOGGER.debug("Datasets are validated.")
assert not h5_diff_objects, f"{[d for d in h5_diff_objects]} objects contain different values for {test_file} and {reference_file}" # noqa assert not h5_diff_objects, f"{[d for d in h5_diff_objects]} objects contain different values for {test_file} and {reference_file}" # noqa
LOGGER.info("Objects are validated.") LOGGER.debug("Objects are validated.")
def validate_files( def validate_files(
...@@ -235,19 +235,19 @@ def validate_files( ...@@ -235,19 +235,19 @@ def validate_files(
exclude_sources(test_file, hp1, exclude_attrs) exclude_sources(test_file, hp1, exclude_attrs)
duration = time.perf_counter() - start_time duration = time.perf_counter() - start_time
LOGGER.info(f"Elapsed time copying {test_file}: " LOGGER.debug(f"Elapsed time copying {test_file}: "
f"{duration} seconds") f"{duration} seconds")
start_time = time.perf_counter() start_time = time.perf_counter()
exclude_sources(ref_folder / test_file.name, hp2, exclude_attrs) exclude_sources(ref_folder / test_file.name, hp2, exclude_attrs)
duration = time.perf_counter() - start_time duration = time.perf_counter() - start_time
LOGGER.info(f"Elapsed time copying {ref_folder / test_file.name}: " LOGGER.debug(f"Elapsed time copying {ref_folder / test_file.name}: "
f"{duration} seconds") f"{duration} seconds")
start_time = time.perf_counter() start_time = time.perf_counter()
result = file_md5(out_tf.name) == file_md5(ref_tf.name) result = file_md5(out_tf.name) == file_md5(ref_tf.name)
LOGGER.info(f"MD5 validation for {test_file}: {duration} seconds") LOGGER.debug(f"MD5 validation for {test_file}: {duration} seconds")
duration = time.perf_counter() - start_validating duration = time.perf_counter() - start_validating
return result, test_file return result, test_file
...@@ -394,7 +394,6 @@ def slurm_watcher( ...@@ -394,7 +394,6 @@ def slurm_watcher(
states = res.stdout.decode().split("\n")[2:-1] states = res.stdout.decode().split("\n")[2:-1]
assert all(s.strip() == "COMPLETED" for s in states), f"{test_key} failure, calibration jobs were not completed. {jobids}: {states}" # noqa assert all(s.strip() == "COMPLETED" for s in states), f"{test_key} failure, calibration jobs were not completed. {jobids}: {states}" # noqa
LOGGER.info(f"{test_key}'s jobs were COMPLETED") LOGGER.info(f"{test_key}'s jobs were COMPLETED")
time.sleep(1.0)
@pytest.mark.manual_run @pytest.mark.manual_run
...@@ -483,9 +482,15 @@ def test_xfel_calibrate( ...@@ -483,9 +482,15 @@ def test_xfel_calibrate(
# confirm that all jobs succeeded. # confirm that all jobs succeeded.
assert errors == 0 assert errors == 0
time_to_wait = 5
time_counter = 0
# 2nd check for report availability. # 2nd check for report availability.
report_file = out_folder / f"{report_name}.pdf" report_file = out_folder / f"{report_name}.pdf"
assert report_file.exists(), f"{test_key} failure, report doesn't exists." while not report_file.exists():
time.sleep(1)
time_counter += 1
if time_counter > time_to_wait:
assert False, f"{test_key} failure, report doesn't exists."
LOGGER.info("Report found.") LOGGER.info("Report found.")
# Stop tests at this point, if desired. # Stop tests at this point, if desired.
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment