diff --git a/tests/test_webservice.py b/tests/test_webservice.py
index 8ce9afe6c0ee756e026fcbf92afe0ae6f265e5d5..87918d86444d4278099479d0b1d643c7766f6daf 100644
--- a/tests/test_webservice.py
+++ b/tests/test_webservice.py
@@ -12,6 +12,7 @@ import webservice  # noqa: import not at top of file
 from webservice.messages import MigrationError  # noqa: import not at top
 from webservice.webservice import (  # noqa: import not at top of file
     check_files,
+    check_run_type_skip,
     merge,
     parse_config,
     run_action,
@@ -379,3 +380,60 @@ async def test_get_slurm_nice_fails(fp):
 
     assert await get_slurm_nice('exfel', 'SPB', '202201') == 0
 
+@pytest.mark.parametrize(
+    'run_type, experiment_type_id, should_skip',
+    [
+        ('JF Darks', 11, True),
+        ('AGIPD dark HG', 11, True),
+        ('Darks JF1', 11, True),
+        ('JF1', 11, True),
+        ('Calibration - Dark LG', 11, True),
+        ('Custom dark experiment type', 11, True),
+        ('JF', 0, False),
+        ('Darks', 0, False),
+        ('something', 0, False),
+        ('Darkness experiment', 0, False),
+    ]
+)
+@pytest.mark.asyncio
+async def test_skip_runs(run_type: str, experiment_type_id:int, should_skip: bool):
+    res_run_by_id = mock.Mock()
+    res_run_by_id.status_code = 200
+    res_run_by_id.json = lambda: {
+        "data_groups_repositories": {"experiment": {"name": run_type, "id": 0}}
+    }
+
+    res_experiment_by_id = mock.Mock()
+    res_experiment_by_id.status_code = 200
+    res_experiment_by_id.json = lambda: {"experiment_type_id": experiment_type_id}
+
+    client = mock.Mock()
+    client.get_run_by_id_api = mock.Mock(return_value=res_run_by_id)
+    client.get_experiment_by_id_api = mock.Mock(return_value=res_experiment_by_id)
+
+    ret = await check_run_type_skip(client, "correct", 0)
+
+    assert ret == should_skip
+
+@pytest.mark.parametrize(
+    'return_value, status_code',
+    [
+        ({}, 200), ({}, 404), ({}, 500),
+    ]
+)
+@pytest.mark.asyncio
+async def test_skip_runs_exception(return_value, status_code, caplog):
+    caplog.set_level(logging.INFO)
+    response = mock.Mock()
+    response.status_code = status_code
+    response.json = lambda: return_value
+
+    client = mock.Mock()
+    client.get_run_by_id_api = mock.Mock(return_value=response)
+
+    ret = await check_run_type_skip(client, "correct", 0)
+
+    # If there is a key error, it should be caught and a warning logged instead
+    assert "run information does not contain expected key" in caplog.text
+    # And `False` should be returned
+    assert ret == False
diff --git a/webservice/config/webservice.yaml b/webservice/config/webservice.yaml
index f57b6431f3c78aebb539f4b9727a458985b4eaa3..78b13b61402a3a00770a4b952f9bbcdc385bebfc 100644
--- a/webservice/config/webservice.yaml
+++ b/webservice/config/webservice.yaml
@@ -36,6 +36,14 @@ correct:
   commissioning-penalty: 1250
   commissioning-max-age-days: 3
   job-penalty: 2
+  skip-run-types:
+    [
+      "AGIPD dark (LG|MG|HG)",
+      "Calibration - Dark (LG|MG|HG)",
+      "(Darks )?JF(0|1|2)",
+      "(Low|Medium|High) gain",
+      "(JF|LPD) Darks",
+    ]
   cmd: >-
     python -m xfel_calibrate.calibrate {detector} CORRECT
     --slurm-scheduling {sched_prio}
diff --git a/webservice/messages.py b/webservice/messages.py
index 540232e41ee5f130e8c3fca722a92befbb4c2587..4928545df757c3e8560e51eb714de4aca2ebdfb2 100644
--- a/webservice/messages.py
+++ b/webservice/messages.py
@@ -1,5 +1,6 @@
 class Errors:
     REQUEST_FAILED = "FAILED: request could not be parsed, please contact det-support@xfel.eu"
+    RUN_SKIPPED = "WARN: run at {} is marked to be skipped for calibration."
     REQUEST_MALFORMED = "FAILED: request {} is malformed, please contact det-support@xfel.eu"
     UNKNOWN_ACTION = "FAILED: action {} is not known!, please contact det-support@xfel.eu"
     PATH_NOT_FOUND = "FAILED: run at {} not found!, please contact det-support@xfel.eu"
diff --git a/webservice/webservice.py b/webservice/webservice.py
index 394db06c3a722279fd30a5f153f0bfc15ca39764..0afc90c59f931d5d544f4c13227c88b62cd938c8 100644
--- a/webservice/webservice.py
+++ b/webservice/webservice.py
@@ -9,6 +9,7 @@ import json
 import locale
 import logging
 import os
+import re
 import shlex
 import sqlite3
 import sys
@@ -617,6 +618,35 @@ async def get_slurm_partition(
     return partition
 
 
+async def check_run_type_skip(
+    mdc: MetadataClient,
+    action: str,
+    run_id: int,
+) -> bool:
+    loop = get_event_loop()
+
+    res = await shield(
+        loop.run_in_executor(None, mdc.get_run_by_id_api, run_id)
+    )
+
+    try:
+        run_type = res.json()["data_groups_repositories"]["experiment"]["name"]
+        if any(re.search(m, run_type) for m in config[action]["skip-run-types"]):
+            return True
+
+        experiment_id = res.json()["data_groups_repositories"]["experiment"]["id"]
+        res_experiment = await shield(
+            loop.run_in_executor(None, mdc.get_experiment_by_id_api, experiment_id)
+        )
+        # Experiment type id 11 is for dark experiments
+        if res_experiment.json()["experiment_type_id"] == 11:
+            return True
+    except KeyError as e:
+        logging.warning(f"mymdc run information does not contain expected key `{e}`")
+
+    return False
+
+
 async def get_slurm_nice(partition: str, instrument: str,
                          cycle: Union[int, str], job_penalty: int = 2,
                          commissioning_penalty: int = 1250) -> int:
@@ -990,6 +1020,12 @@ class ActionsServer:
                 await update_mdc_status(self.mdc, 'correct', rid, msg)
                 return
 
+            if await check_run_type_skip(self.mdc, "correct", rid):
+                msg = Errors.RUN_SKIPPED.format(rpath)
+                logging.warning(msg)
+                await update_mdc_status(self.mdc, 'correct', rid, msg)
+                return
+
             ret, _ = await self.launch_jobs(
                 [runnr], req_id, detectors, 'correct', instrument, cycle,
                 proposal, request_time, rid