diff --git a/docs/source/testing.rst b/docs/source/testing.rst
index 82e4d3450cc5c1cfb34571b1e7d1532d41c96f1d..f5d4e74dfa54756a4d206abf60fa1ef35d03eaa5 100644
--- a/docs/source/testing.rst
+++ b/docs/source/testing.rst
@@ -25,7 +25,7 @@ To run all tests, navigate to the test directory and execute::
     python -m unittest discover
     
 This will usually entail executing a notebook under test via SLURM
-first, then checking its output against the last commited artifacts
+first, then checking its output against the last commited artefacts
 of that test type.
 
 If individual tests are run, e.g. for debugging, additional options
@@ -38,63 +38,63 @@ available for that test.
 
 If all tests pass, you can commit and push your updates. If you have
 failures, either check your changes, or if changes are intended,
-generate new artifacts.
+generate new artefacts.
 
 .. note::
 
     Running tests will generate entries for test reports in the
-    artifacts directory under the most recent commit.
+    artefacts directory under the most recent commit.
     Reviewers should check that such updates are present in the 
     list of changed files.
 
 
-Generating new Artifacts
+Generating new Artefacts
 ++++++++++++++++++++++++
 
 If an update intents to change output, the tests can be used to
-generate new artifacts against which subsequent tests will then run.
+generate new artefacts against which subsequent tests will then run.
 
-First, commit your changes which you want to produce new artifacts
+First, commit your changes which you want to produce new artefacts
 for::
 
     git add ...
     git commit -m "AGIPD corrections handle baseline shift"
 
-Contrary to running tests alone, new artifacts need to be generated
+Contrary to running tests alone, new artefacts need to be generated
 for each affected test individually::
 
     python test_XXX.py --generate
     
 replacing `test_XXX.py` with the test you'd like to run. This
-will execute the notebook, create artifact entries in the artifact
+will execute the notebook, create artefact entries in the artefact
 dir, and the check for consistency by executing the test against
-these artifacts. This last part is important: the test should not
+these artefacts. This last part is important: the test should not
 fail on its own input. If it does, something is very likely wrong!
 
-After artifacts are created and tests using these have passed,
-commit the new artifacts and create a merge request for your branch::
+After artefacts are created and tests using these have passed,
+commit the new artefacts and create a merge request for your branch::
 
-    git add tests/artifacts/
-    git commit -m "Added new artifacts for changes related to baseline shifts"
+    git add tests/artefacts/
+    git commit -m "Added new artefacts for changes related to baseline shifts"
     
-Please also add comments in the MR description on why artifacts have
+Please also add comments in the MR description on why artefacts have
 changed.
 
 .. note::
 
-    Reviewers should always question if a change in test artifacts
+    Reviewers should always question if a change in test artefacts
     is appropriate and intended.
     
 Test Reports
 ++++++++++++
 
 Test reports are automatically generated when building documentation
-from all xml report files found in sub-directories of the artifact
+from all xml report files found in sub-directories of the artefact
 directory.
 
 .. note::
 
     Please make sure to not commit any additional files into the
-    `test_rsts subfolder? of this documentation. Also, do not commit
-    `test_results.rst`; it is autogenerated.
+    `test_rsts subfolder` of this documentation. Also, do not commit
+    `test_results.rst`. It is autogenerated.
 
diff --git a/tests/correction_base.py b/tests/correction_base.py
index b1e2a1b1cbb3210df2505d6f7424d4d95bbaf7ba..022a11342dc415e26507468999283e8aac5cd9ea 100644
--- a/tests/correction_base.py
+++ b/tests/correction_base.py
@@ -22,15 +22,15 @@ np.warnings.filterwarnings('ignore')
 
 parser = argparse.ArgumentParser()
 parser.add_argument('--generate', action="store_true", default=False,
-                    help="Set this flag to generate new artifacts from " +
-                    "the test. These will be placed in the artifact " +
+                    help="Set this flag to generate new artefacts from " +
+                    "the test. These will be placed in the artefact " +
                     "directory, under the latest commit your git " +
                     "repository is on. This will launch the " +
                     "notebook under test first.")
 parser.add_argument('--generate-wo-execution', action="store_true",
                     default=False, help="Set this flag to generate new " +
-                    "artifacts from the test. These will be placed in " +
-                    "the artifact directory, under the latest commit " +
+                    "artefacts from the test. These will be placed in " +
+                    "the artefact directory, under the latest commit " +
                     "your git repository is on. This will not launch " +
                     "the notebook being tested, but assumes it's " +
                     "output is already present. Use e.g. to debug tests.")
@@ -39,27 +39,27 @@ parser.add_argument('--test-wo-execution', action="store_true", default=False,
                     "tested first. This is assumes its output is already " +
                     "present. Use e.g. to debug tests.")
 parser.add_argument('--skip-checksum', action="store_true", default=False,
-                    help="Skip checksum tests (and artifact generation)")
+                    help="Skip checksum tests (and artefact generation)")
 parser.add_argument('--skip-histogram', action="store_true", default=False,
-                    help="Skip histogram tests (and artifact generation)")
+                    help="Skip histogram tests (and artefact generation)")
 parser.add_argument('--skip-karabo-data', action="store_true", default=False,
-                    help="Skip karabo_data tests (and artifact generation)")
+                    help="Skip karabo_data tests (and artefact generation)")
 parser.add_argument('--skip-report-gen', action="store_true", default=False,
                     help="Skip report generation tests")
-parser.add_argument('--artefact-dir', type=str, default="./artifacts/",
-                    help="Set directory to place artifacts in.")
+parser.add_argument('--artefact-dir', type=str, default="./artefacts/",
+                    help="Set directory to place artefacts in.")
 parser.add_argument('unittest_args', nargs='*',
                     help="Any arguments to be passed to unittest")
 args = parser.parse_args()
 
 
 class Failures(Enum):
-    ARTIFACT_MISSING = "No artifact"
-    EMPTY_ARTIFACT = "Empty artifact"
+    ARTEFACT_MISSING = "No artefact"
+    EMPTY_ARTEFACT = "Empty artefact"
 
 
 def _do_generate():
-    """ Determine if artifacts should be generated
+    """ Determine if artefacts should be generated
     """
     return args.generate or args.generate_wo_execution
 
@@ -72,8 +72,8 @@ def get_last_commit():
     return last_commit
 
 
-def get_artifact_dir(cls):
-    """ Get the artifact director for the last commit
+def get_artefact_dir(cls):
+    """ Get the artefact directory for the last commit
 
     :param cls: Test class
     """
@@ -84,12 +84,12 @@ def get_artifact_dir(cls):
     suffix = "G" if _do_generate() else "T"
     path = "{}/{}/{}_{}".format(art_base, last_commit,
                                 test_name, suffix)
-    print("Artifacts will be placed in: {}".format(path))
+    print("artefacts will be placed in: {}".format(path))
     return path
 
 
-def get_artifact_comp_dir(cls):
-    """Get the artifact director for the last commit that generated any
+def get_artefact_comp_dir(cls):
+    """Get the artefact directory for the last commit that generated any
 
     :param cls: Test class
     """
@@ -100,9 +100,9 @@ def get_artifact_comp_dir(cls):
         path = "{}/{}/{}_G".format(art_base, commit, test_name)
         if os.path.exists(path):
             return path
-    msg = ("Could not find any previous artifacts for test {}"
+    msg = ("Could not find any previous artefacts for test {}"
            + " in directory {}! Check if directory is correct"
-           + " or create artifacts using the --generate flag.")
+           + " or create artefacts using the --generate flag.")
     raise FileNotFoundError(msg.format(test_name, art_base))
 
 
@@ -135,8 +135,8 @@ def get_matching_h5_paths(f, template):
     return matches
 
 
-def parallel_hist_gen(hist_paths, artifact_dir, fname):
-    """ Function to generate histogram artifacts in parallel
+def parallel_hist_gen(hist_paths, artefact_dir, fname):
+    """ Function to generate histogram artefacts in parallel
 
     :param hist_paths: paths to create histograms for. Should be a dict with
         the following structure:
@@ -154,7 +154,7 @@ def parallel_hist_gen(hist_paths, artifact_dir, fname):
         histogram, and optionally, `scl_fun` is a scaling function to be
         executed on the data before creating the histogram.
 
-    :param artifact_dir: the directory under which to place histograms.
+    :param artefact_dir: the directory under which to place histograms.
         For each input file a histogram file (a npz archive) containing
         the named histograms defined by `hist_paths` is created.
     :param fname: path to the h5 file containing the paths specified in
@@ -182,13 +182,13 @@ def parallel_hist_gen(hist_paths, artifact_dir, fname):
                     all_hists["{}_{}".format(mpath, htype)] = h
                 del d
     hist_fname = "{}.hist".format(fname)
-    hpath = "{}/{}".format(artifact_dir, os.path.basename(hist_fname))
+    hpath = "{}/{}".format(artefact_dir, os.path.basename(hist_fname))
     if len(all_hists):
         np.savez(hpath, **all_hists)
 
 
 def parallel_hist_eval(hist_paths, cls, fname):
-    """ Function to compare histogram artifacts in parallel
+    """ Function to compare histogram artefacts in parallel
 
     :param hist_paths: paths to create histograms for. Should be a dict with
         the following structure:
@@ -216,16 +216,16 @@ def parallel_hist_eval(hist_paths, cls, fname):
     ret = []
     with h5py.File(fname, "r") as f:
         hist_fname = "{}.hist.npz".format(fname)
-        test_art_dir = get_artifact_comp_dir(cls)
+        test_art_dir = get_artefact_comp_dir(cls)
         hpath = "{}/{}".format(test_art_dir, os.path.basename(hist_fname))
-        has_artifact = os.path.exists(hpath)
-        if not has_artifact:
-            return Failures.ARTIFACT_MISSING
+        has_artefact = os.path.exists(hpath)
+        if not has_artefact:
+            return Failures.ARTEFACT_MISSING
 
         try:
             test_hists = np.load(hpath)
         except OSError:
-            return Failures.EMPTY_ARTIFACT  # likely empty data
+            return Failures.EMPTY_ARTEFACT  # likely empty data
 
         for path, hists in hist_paths.items():
             mpaths = get_matching_h5_paths(f, path)
@@ -257,8 +257,8 @@ class CorrectionTestBase:
     task = None
     parms = {}
     rel_file_ext = ".h5"
-    artifact_dir = None
-    artifact_comp_dir = None
+    artefact_dir = None
+    artefact_comp_dir = None
     hist_paths = []
     karabo_data_inspects = []
     expected_reports = []
@@ -268,7 +268,7 @@ class CorrectionTestBase:
         """
         Sets up the test by executing the notebook under test
 
-        If artifact generation is requested an artifact directory is
+        If artefact generation is requested an artefact directory is
         created.
 
         Note that this method will block until any slurm jobs scheduled
@@ -283,10 +283,10 @@ class CorrectionTestBase:
         print("Executing {}".format(" ".join(cmd)))
 
         if _do_generate():
-            print("Creating data paths for artifacts")
-            cls.artifact_dir = get_artifact_dir(cls)
-            if not os.path.exists(cls.artifact_dir):
-                os.makedirs(cls.artifact_dir)
+            print("Creating data paths for artefacts")
+            cls.artefact_dir = get_artefact_dir(cls)
+            if not os.path.exists(cls.artefact_dir):
+                os.makedirs(cls.artefact_dir)
 
         if args.generate_wo_execution or args.test_wo_execution:
             return
@@ -326,7 +326,7 @@ class CorrectionTestBase:
             sleep(10)
 
     @unittest.skipUnless(_do_generate() and not args.skip_checksum,
-                         "Artifact generation is not requested")
+                         "artefact generation is not requested")
     def test_generate_checksums(self):
         """ Generate Fletcher32 checksums of output files from notebook
         """
@@ -345,7 +345,7 @@ class CorrectionTestBase:
                 f.visititems(visitor)
                 
                 chkfname = "{}.checksum".format(fname)
-                chkpath = "{}/{}".format(self.artifact_dir,
+                chkpath = "{}/{}".format(self.artefact_dir,
                                          os.path.basename(chkfname))
                 with open(chkpath, 'wb') as fc:
                     pickle.dump(d, fc, pickle.HIGHEST_PROTOCOL) 
@@ -353,7 +353,7 @@ class CorrectionTestBase:
     @unittest.skipIf(args.skip_checksum,
                      "User requested to skip checksum test")
     def test_checksums(self):
-        """ Compare Fletcher32 checksums of notebook's output with artifacts
+        """ Compare Fletcher32 checksums of notebook's output with artefacts
 
         This test will verify if datasets with checksums are identical. 
         Even for small changes in the correction logic this test is likely 
@@ -371,7 +371,7 @@ class CorrectionTestBase:
             "{}/*{}".format(out_folder, self.rel_file_ext))
         for fname in files_to_check:
             chkfname = "{}.checksum".format(fname)
-            test_art_dir = get_artifact_comp_dir(self.__class__)
+            test_art_dir = get_artefact_comp_dir(self.__class__)
             chkpath = "{}/{}".format(test_art_dir, os.path.basename(chkfname))
             with self.subTest(msg="Verifying against: {}".format(chkpath)):
                 self.assertTrue(os.path.exists(chkpath),
@@ -392,22 +392,22 @@ class CorrectionTestBase:
                     f.visititems(visitor)
 
     @unittest.skipUnless(_do_generate() and not args.skip_histogram,
-                         "Artifact generation is not requested")
+                         "artefact generation is not requested")
     def test_generate_histograms(self):
-        """ Generate histogram artifacts for the output of the notebook
+        """ Generate histogram artefacts for the output of the notebook
         """
         out_folder = self._output_to_path()
         files_to_check = glob.glob(
             "{}/*{}".format(out_folder, self.rel_file_ext))
         with Pool(8) as p:
-            pf = partial(parallel_hist_gen, self.hist_paths, self.artifact_dir)
+            pf = partial(parallel_hist_gen, self.hist_paths, self.artefact_dir)
             p.map(pf, files_to_check)
         self.assertTrue(True)
 
     @unittest.skipIf(args.skip_histogram,
                      "User requested to skip histogram test")
     def test_histograms(self):
-        """ Compare histograms of notebook output with previous artifacts
+        """ Compare histograms of notebook output with previous artefacts
 
         Comparison is performed in multiple tests:
 
@@ -440,11 +440,11 @@ class CorrectionTestBase:
             for i, rvals in enumerate(r):
                 msg = "Verifying: {}".format(files_to_check[i])
                 with self.subTest(msg=msg):
-                    self.assertNotEqual(Failures.ARTIFACT_MISSING,
+                    self.assertNotEqual(Failures.ARTEFACT_MISSING,
                                         "No comparison histograms found")
-                if rvals is Failures.ARTIFACT_MISSING:
+                if rvals is Failures.ARTEFACT_MISSING:
                     return
-                if rvals is Failures.EMPTY_ARTIFACT:
+                if rvals is Failures.EMPTY_ARTEFACT:
                     return
                 else:
                     for rval in rvals:  # inner loop
@@ -481,15 +481,15 @@ class CorrectionTestBase:
                                 self.assertGreaterEqual(p, cl)
 
     @unittest.skipUnless(_do_generate() and not args.skip_karabo_data,
-                         "Artifact generation is not requested")
+                         "artefact generation is not requested")
     def test_generate_karabo_data(self):
-        """ Generate artifacts for the Karabo Data test of notebook output
+        """ Generate artefacts for the Karabo Data test of notebook output
 
         Note that Karabo Data related imports are inline in this test so
         that other tests may be executed without Karabo data installed.
         """
         out_folder = self._output_to_path()
-        kdata = "{}/karabo.data".format(self.artifact_dir)
+        kdata = "{}/karabo.data".format(self.artefact_dir)
         # we inline the import to be able to skip if not installed
         import karabo_data as kd
         rd = kd.RunDirectory(out_folder)
@@ -542,12 +542,12 @@ class CorrectionTestBase:
         that other tests may be executed without Karabo data installed.
         """
         out_folder = self._output_to_path()
-        kdata = "{}/karabo.data".format(get_artifact_comp_dir(self.__class__))
+        kdata = "{}/karabo.data".format(get_artefact_comp_dir(self.__class__))
         # we inline the import to be able to skip if not installed
         import karabo_data as kd
         rd = kd.RunDirectory(out_folder)
 
-        # test against artifacts
+        # test against artefacts
         with open(kdata, 'rb') as f:
             d = pickle.load(f)
 
@@ -584,7 +584,7 @@ class CorrectionTestBase:
         """ Verify expected reports are generated
         
         Also verifies that no additional reports are present, and copies
-        the report to the artifact dir.
+        the report to the artefact dir.
         """
         out_folder = self._output_to_path()
         for report in self.expected_reports:
@@ -592,8 +592,8 @@ class CorrectionTestBase:
             with self.subTest(msg=msg):
                 rpath = "{}/{}".format(out_folder, report)
                 self.assertTrue(os.path.exists(rpath))
-                # copy report to artifacts
-                dpath = "{}/{}".format(get_artifact_dir(self.__class__),
+                # copy report to artefacts
+                dpath = "{}/{}".format(get_artefact_dir(self.__class__),
                                        os.path.basename(rpath))
                 shutil.copyfile(rpath, dpath)
         # verify no additional reports exist
diff --git a/tests/test_agipd.py b/tests/test_agipd.py
index edefe24927c7f6654decd07c97d0208700c0229e..68442968d959a9b1b98f6a2e925b837b1823c554 100644
--- a/tests/test_agipd.py
+++ b/tests/test_agipd.py
@@ -4,7 +4,7 @@ import unittest
 import numpy as np
 import xmlrunner
 
-from correction_base import CorrectionTestBase, args, get_artifact_dir
+from correction_base import CorrectionTestBase, args, get_artefact_dir
 
 
 class TestAGIPDCorrection(CorrectionTestBase, unittest.TestCase):
@@ -62,7 +62,7 @@ if __name__ == '__main__':
     ln = lambda f: "generate" not in f
     lncmp = lambda a, b: (ln(a) > ln(b)) - (ln(a) < ln(b))
     loader.sortTestMethodsUsing = lncmp
-    output = get_artifact_dir(TestAGIPDCorrection)
+    output = get_artefact_dir(TestAGIPDCorrection)
     unittest.main(testLoader=loader,
                   testRunner=xmlrunner.XMLTestRunner(output=output),
                   verbosity=3, failfast=False, buffer=False, catchbreak=False)
diff --git a/tests/test_lpd.py b/tests/test_lpd.py
index 2e4a1ab25e7c71c56c5ceb62d589399e15f0cbc6..fa17284f3904b6e1f7c1468b82e29533c442893e 100644
--- a/tests/test_lpd.py
+++ b/tests/test_lpd.py
@@ -4,7 +4,7 @@ import unittest
 import numpy as np
 import xmlrunner
 
-from correction_base import CorrectionTestBase, args, get_artifact_dir
+from correction_base import CorrectionTestBase, args, get_artefact_dir
 
 np.warnings.filterwarnings('ignore')
 
@@ -64,7 +64,7 @@ if __name__ == '__main__':
     ln = lambda f: "generate" not in f
     lncmp = lambda a, b: (ln(a) > ln(b)) - (ln(a) < ln(b))
     loader.sortTestMethodsUsing = lncmp
-    output = get_artifact_dir(TestLPDCorrection)
+    output = get_artefact_dir(TestLPDCorrection)
     unittest.main(testLoader=loader,
                   testRunner=xmlrunner.XMLTestRunner(output=output),
                   verbosity=3, failfast=False, buffer=False, catchbreak=False)