diff --git a/tests/test_reference_runs/test_pre_deployment.py b/tests/test_reference_runs/test_pre_deployment.py
index 7a4f36e74191ba90380f76439b8a6893b70a1ff8..c6b2f573ac7b00d081725b7731fdea7db721fb61 100644
--- a/tests/test_reference_runs/test_pre_deployment.py
+++ b/tests/test_reference_runs/test_pre_deployment.py
@@ -152,23 +152,23 @@ def find_differences(
         assert set(datasets_f1) == set(datasets_f2), f"{test_file} and {reference_file} consists of different datasets."  # noqa
         assert set(objects_f1) == set(objects_f2), f"{test_file} and {reference_file} consists of different datasets."  # noqa
         duration = time.perf_counter() - start_time
-        LOGGER.info("Elapsed time comparing groups, "
+        LOGGER.debug("Elapsed time comparing groups, "
                     f"datasets, and objects: {duration} seconds")
-        LOGGER.info("Groups, datasets, and objects have the same content.")
+        LOGGER.debug("Groups, datasets, and objects have the same content.")
 
         # Compare datasets and objects.
         start_time = time.perf_counter()
         h5_diff_datasets = compare_datasets(file1, file2, datasets_f1)
         duration = time.perf_counter() - start_time
-        LOGGER.info(f"Elapsed time comparing datasets: {duration} seconds")
+        LOGGER.debug(f"Elapsed time comparing datasets: {duration} seconds")
         start_time = time.perf_counter()
         h5_diff_objects = compare_objects(file1, file2, objects_f1)
-        LOGGER.info(f"Elapsed time comparing objects: {duration} seconds")
+        LOGGER.debug(f"Elapsed time comparing objects: {duration} seconds")
 
         assert not h5_diff_datasets, f"{[d for d in h5_diff_datasets]} datasets contain different values for {test_file} and {reference_file}"  # noqa
-        LOGGER.info("Datasets are validated.")
+        LOGGER.debug("Datasets are validated.")
         assert not h5_diff_objects, f"{[d for d in h5_diff_objects]} objects contain different values for {test_file} and {reference_file}"  # noqa
-        LOGGER.info("Objects are validated.")
+        LOGGER.debug("Objects are validated.")
 
 
 def validate_files(
@@ -235,19 +235,19 @@ def validate_files(
             exclude_sources(test_file, hp1, exclude_attrs)
 
             duration = time.perf_counter() - start_time
-            LOGGER.info(f"Elapsed time copying {test_file}: "
+            LOGGER.debug(f"Elapsed time copying {test_file}: "
                         f"{duration} seconds")
 
             start_time = time.perf_counter()
             exclude_sources(ref_folder / test_file.name, hp2, exclude_attrs)
 
             duration = time.perf_counter() - start_time
-            LOGGER.info(f"Elapsed time copying {ref_folder / test_file.name}: "
+            LOGGER.debug(f"Elapsed time copying {ref_folder / test_file.name}: "
                         f"{duration} seconds")
 
             start_time = time.perf_counter()
             result = file_md5(out_tf.name) == file_md5(ref_tf.name)
-            LOGGER.info(f"MD5 validation for {test_file}: {duration} seconds")
+            LOGGER.debug(f"MD5 validation for {test_file}: {duration} seconds")
     duration = time.perf_counter() - start_validating
     return result, test_file
 
@@ -394,7 +394,6 @@ def slurm_watcher(
     states = res.stdout.decode().split("\n")[2:-1]
     assert all(s.strip() == "COMPLETED" for s in states), f"{test_key} failure, calibration jobs were not completed. {jobids}: {states}"  # noqa
     LOGGER.info(f"{test_key}'s jobs were COMPLETED")
-    time.sleep(1.0)
 
 
 @pytest.mark.manual_run
@@ -483,9 +482,15 @@ def test_xfel_calibrate(
         # confirm that all jobs succeeded.
         assert errors == 0
 
+    time_to_wait = 5
+    time_counter = 0
     # 2nd check for report availability.
     report_file = out_folder / f"{report_name}.pdf"
-    assert report_file.exists(), f"{test_key} failure, report doesn't exists."
+    while not report_file.exists():
+        time.sleep(1)
+        time_counter += 1
+        if time_counter > time_to_wait:
+            assert False, f"{test_key} failure, report doesn't exists."
     LOGGER.info("Report found.")
 
     # Stop tests at this point, if desired.