diff --git a/tests/conftest.py b/tests/conftest.py
index d6e39d35738ad1fb774d31e2e9a8ccc286bdc00e..b650a173f376307143a2c0e9ff534463398ab498 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -56,6 +56,34 @@ def pytest_addoption(parser):
         default=False,
         help="Start release test for all supported calibration notebooks",
     )
+    detectors_choices = []
+    for k in ['agipd', 'jungfrau', 'pnccd', 'epix100', 'all']:
+        detectors_choices += [k, k.upper()]
+    parser.addoption(
+        "--detectors",
+        action='append',
+        choices=detectors_choices,
+    )
+
+    parser.addoption(
+        "--calibration",
+        type=str,
+        choices=['correct', 'dark', 'all'],
+        default='all',
+    )
+
+    parser.addoption(
+        "--no-numerical-validation",
+        action="store_true",
+        default=False,
+        help="Skips tests for numerical validation for produced h5files.",
+    )
+
+    parser.addoption(
+        "--picked-test",
+        type=str,
+        default=None,
+    )
 
 
 def pytest_configure(config):
@@ -79,6 +107,21 @@ def pytest_configure(config):
         "manual_run(): marks skips for tests that required to be run manually",
     )
 
+    config.addinivalue_line(
+        "markers",
+        "manual_run(): marks skips for tests that required to be run manually",
+    )
+
+
+@pytest.fixture
+def release_test_config(request):
+    detectors = request.config.getoption("--detectors")
+    calibration = request.config.getoption("--calibration")
+    skip_numerical_validation = request.config.getoption(
+        "--no-numerical-validation")
+    picked_test = request.config.getoption("--picked-test")
+    return detectors, calibration, picked_test, skip_numerical_validation
+
 
 @lru_cache()
 def server_reachable(server: str = "max-exfl-cal002"):
diff --git a/tests/test_pre_deployment.py b/tests/test_pre_deployment.py
index cf21576b7b09b61fb44653f5a764925099ec76b2..550370ff3125b0681746e38e295915fd47bd9ff3 100644
--- a/tests/test_pre_deployment.py
+++ b/tests/test_pre_deployment.py
@@ -106,12 +106,38 @@ def parse_config(
 
 
 @pytest.mark.manual_run
-@pytest.mark.parametrize("calibration_test", list(callab_test_dict.items()))
-def test_xfel_calibrate(calibration_test):
-
-    test_key, val_dict = calibration_test
+@pytest.mark.parametrize(
+    "k, v",
+    list(callab_test_dict.items()),
+    ids=list(callab_test_dict.keys()),
+)
+def test_xfel_calibrate(k, v, release_test_config):
+
+    (
+        detectors, calibration,
+        picked_test, skip_numerical_validation
+    ) = release_test_config
+
+    test_key, val_dict = k, v
     cal_type = val_dict["cal_type"]
-    cmd = ["xfel-calibrate", val_dict["det_type"], cal_type]
+    det_type = val_dict["det_type"]
+
+    if picked_test is None:
+        # Skip non-selected detectors
+        if (
+            detectors != ["all"] and
+            det_type.lower() not in [d.lower() for d in detectors]
+        ):
+            pytest.skip()
+
+        # Skip non-selected calibration
+        if calibration != "all" and cal_type.lower() != calibration:
+            pytest.skip()
+    else:
+        if test_key != picked_test:
+            pytest.skip()
+
+    cmd = ["xfel-calibrate", det_type, cal_type]
     cal_conf = val_dict["config"]
     report_name = f"{test_key}_{datetime.now().strftime('%y%m%d_%H%M%S')}"
     cal_conf["report-to"] = report_name
@@ -122,6 +148,7 @@ def test_xfel_calibrate(calibration_test):
 
     reference_folder = pathlib.Path(val_dict["reference-folder"].format(
         REFERENCE_FOLDER, cal_conf["karabo-id"], test_key))
+
     cmd += ["--slurm-name", test_key]
     f = io.StringIO()
 
@@ -149,6 +176,7 @@ def test_xfel_calibrate(calibration_test):
             slurm_watcher = False
         else:
             time.sleep(0.5)
+
     # 1st check that all jobs were COMPLETED without errors.
     states = res.stdout.decode().split("\n")[2:-1]
     assert all(s.strip() == "COMPLETED" for s in states), f"{test_key} failure, calibration jobs were not completed. {jobids}: {states}"  # noqa
@@ -164,6 +192,10 @@ def test_xfel_calibrate(calibration_test):
     assert len(h5files) == len(expected_h5files), f"{test_key} failure, number of files are not as expected."  # noqa
     print(f"{test_key}'s calibration h5files numbers are as expected.")
 
+    # Stop tests at this point, if desired.
+    if skip_numerical_validation:
+        return
+
     # 4th check that h5files are exactly the same as the reference h5files.
     all_files_validated = []
     non_valid_files = []