diff --git a/notebooks/Jungfrau/Jungfrau_Gain_Correct_and_Verify_NBC.ipynb b/notebooks/Jungfrau/Jungfrau_Gain_Correct_and_Verify_NBC.ipynb
index cd968ed1f16d1109ae4175e64337aa8d30bcf9f0..5573d26bb1ef0f499bbcf29b5b61e4fd302bf1b2 100644
--- a/notebooks/Jungfrau/Jungfrau_Gain_Correct_and_Verify_NBC.ipynb
+++ b/notebooks/Jungfrau/Jungfrau_Gain_Correct_and_Verify_NBC.ipynb
@@ -27,7 +27,7 @@
     "receiver_id = \"JNGFR{:02d}\"  # inset for receiver devices\n",
     "receiver_control_id = \"CONTROL\"  # inset for control devices\n",
     "path_template = 'RAW-R{:04d}-{}-S{:05d}.h5'  # template to use for file name\n",
-    "h5path = '/INSTRUMENT/{}/DET/{}:daqOutput/data'  # path in H5 file under which images are located\n",
+    "h5path = '{}/DET/{}:daqOutput'  # path in H5 file under which images are located\n",
     "h5path_run = '/RUN/{}/DET/{}'  # path to run data\n",
     "h5path_cntrl = '/CONTROL/{}/DET/{}'  # path to control data\n",
     "karabo_id_control = \"\"  # if control is on a different ID, set to empty string if it is the same a karabo-id\n",
@@ -45,10 +45,11 @@
     "chunk_size_idim = 1  # chunking size of imaging dimension, adjust if user software is sensitive to this.\n",
     "integration_time = 4.96  # integration time in us, will be overwritten by value in file\n",
     "mem_cells = 0  # leave memory cells equal 0, as it is saved in control information starting 2019.\n",
-    "db_module = [\"Jungfrau_M275\"]  # ID of module in calibration database\n",
+    "db_module = [\"\"]  # ID of module in calibration database\n",
     "manual_slow_data = False  # if true, use manually entered bias_voltage and integration_time values\n",
     "chunk_size = 0\n",
     "\n",
+    "\n",
     "def balance_sequences(in_folder, run, sequences, sequences_per_node, karabo_da):\n",
     "    from xfel_calibrate.calibrate import balance_sequences as bs\n",
     "    return bs(in_folder, run, sequences, sequences_per_node, karabo_da)"
@@ -63,7 +64,9 @@
     "import copy\n",
     "import multiprocessing\n",
     "import time\n",
+    "import traceback\n",
     "import warnings\n",
+    "from extra_data import RunDirectory\n",
     "from functools import partial\n",
     "from pathlib import Path\n",
     "\n",
@@ -72,6 +75,10 @@
     "import matplotlib.pyplot as plt\n",
     "import numpy as np\n",
     "import tabulate\n",
+    "from IPython.display import Latex, Markdown, display\n",
+    "from matplotlib.colors import LogNorm\n",
+    "\n",
+    "from cal_tools import jungfraulib\n",
     "from cal_tools.enums import BadPixels\n",
     "from cal_tools.tools import (\n",
     "    get_constant_from_db_and_time,\n",
@@ -79,8 +86,6 @@
     "    map_modules_from_folder,\n",
     ")\n",
     "from iCalibrationDB import Conditions, Constants\n",
-    "from IPython.display import Latex, display\n",
-    "from matplotlib.colors import LogNorm\n",
     "\n",
     "warnings.filterwarnings('ignore')\n",
     "\n",
@@ -97,6 +102,7 @@
     "in_folder = Path(in_folder)\n",
     "out_folder = Path(out_folder)\n",
     "ped_dir = in_folder / f'r{run:04d}'\n",
+    "run_dir = RunDirectory(ped_dir)\n",
     "h5path = h5path.format(karabo_id, receiver_id)\n",
     "\n",
     "if out_folder.exists() and not overwrite:\n",
@@ -123,25 +129,6 @@
     "    karabo_id_control = karabo_id"
    ]
   },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "def check_memory_cells(file_name, path):\n",
-    "    with h5py.File(file_name, 'r') as f:\n",
-    "        t_stamp = np.array(f[path + '/storageCells/timestamp'])\n",
-    "        st_cells = np.array(f[path + '/storageCells/value'])\n",
-    "        sc_start = np.array(f[path + '/storageCellStart/value'])\n",
-    "\n",
-    "    valid_train = t_stamp > 0\n",
-    "    n_scs = st_cells[valid_train][0] + 1\n",
-    "    sc_s = sc_start[valid_train][0]\n",
-    "\n",
-    "    return n_scs, sc_s"
-   ]
-  },
   {
    "cell_type": "code",
    "execution_count": null,
@@ -169,7 +156,7 @@
     "\n",
     "# restore the queue\n",
     "mapped_files, mod_ids, total_sequences, sequences_qm, _ = map_modules_from_folder(\n",
-    "    in_folder, run, path_template, karabo_da, sequences\n",
+    "    in_folder, run, path_template, karabo_da, sequences, qm_naming=False\n",
     ")"
    ]
   },
@@ -179,31 +166,36 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "if not manual_slow_data:\n",
-    "    with h5py.File(fp_path_contr, 'r') as f:\n",
-    "        run_path = h5path_run.format(karabo_id_control, receiver_control_id)\n",
-    "        integration_time = float(f[f'{run_path}/exposureTime/value'][()]*1e6)\n",
-    "        bias_voltage = int(np.squeeze(f[f'{run_path}/vHighVoltage/value'])[0])\n",
-    "\n",
-    "\n",
-    "control_path = h5path_cntrl.format(karabo_id_control, receiver_control_id)\n",
+    "memory_cells = None\n",
     "try:\n",
-    "    this_run_mcells, sc_start = check_memory_cells(fp_path_contr, control_path)\n",
+    "    this_run_mcells, sc_start, integration_time, bias_voltage = jungfraulib.check_memory_cells(  # noqa\n",
+    "        run_dir,\n",
+    "        karabo_id_control,\n",
+    "        manual_slow_data,\n",
+    "        integration_time,\n",
+    "        bias_voltage,\n",
+    "    )\n",
     "    if this_run_mcells == 1:\n",
     "        memory_cells = 1\n",
-    "        print(f'Dark runs in single cell mode\\n storage cell start: {sc_start:02d}')\n",
+    "        print(\"Dark runs in single cell mode.\\n\"\n",
+    "              f\"Storage cell start: {sc_start:02d}\")\n",
     "    else:\n",
     "        memory_cells = 16\n",
-    "        print(f'Dark runs in burst mode\\n storage cell start: {sc_start:02d}')\n",
+    "        print(f\"Dark runs in burst mode.\\n\"\n",
+    "              f\"Storage cell start: {sc_start:02d}\")\n",
     "except Exception as e:\n",
     "    if \"Unable to open object\" in str(e):\n",
     "        if mem_cells==0:\n",
     "            memory_cells = 1\n",
     "        else:\n",
     "            memory_cells = mem_cells\n",
-    "        print(f'Set memory cells to {memory_cells} as it is not saved in control information.')\n",
+    "        print(f\"Set memory cells to {memory_cells}, as \"\n",
+    "              \"it is not saved in control information.\")\n",
     "    else:\n",
-    "        print(f\"Error trying to access memory cell from contol information: {e}\")\n",
+    "        display(Markdown(\n",
+    "            \"### <span style='color: #ff0000'>ERROR: </span> \"\n",
+    "            \"Accessing control information \"\n",
+    "            f\": {e}\"))\n",
     "\n",
     "print(f\"Integration time is {integration_time} us\")\n",
     "print(f\"Bias voltage is {bias_voltage} V\")\n",
@@ -242,9 +234,11 @@
     "        cal_db_interface=cal_db_interface,\n",
     "        creation_time=creation_time,\n",
     "        timeout=cal_db_timeout,\n",
+    "        print_once=False,\n",
     "    )\n",
     "    offset_map, when[\"Offset\"] = retrieval_function(\n",
-    "        constant=Constants.jungfrau.Offset(), empty_constant=np.zeros((1024, 512, 1, 3))\n",
+    "        constant=Constants.jungfrau.Offset(),\n",
+    "        empty_constant=np.zeros((1024, 512, 1, 3))\n",
     "    )\n",
     "    mask, when[\"BadPixelsDark\"] = retrieval_function(\n",
     "        constant=Constants.jungfrau.BadPixelsDark(),\n",
@@ -284,6 +278,7 @@
     "with multiprocessing.Pool() as pool:\n",
     "    r = pool.map(get_constants_for_module, karabo_da)\n",
     "\n",
+    "# Print timestamps for the retrieved constants.\n",
     "constants = {}\n",
     "for offset_map, mask, gain_map, k_da, when in r:\n",
     "    print(f'Constants for module {k_da}:')\n",
@@ -293,34 +288,8 @@
     "    if gain_map is None:\n",
     "        print(\"  No gain map found\")\n",
     "        no_relative_gain = True\n",
-    "        \n",
-    "    constants[k_da] = (offset_map, mask, gain_map)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "def copy_and_sanitize_non_cal_data(infile, outfile, h5base):\n",
-    "    \"\"\" Copy and sanitize data in `infile` that is not touched by `correctLPD`\n",
-    "    \"\"\"\n",
-    "\n",
-    "    h5base = h5base.lstrip(\"/\")\n",
-    "    dont_copy = [\"adc\", ]\n",
-    "    dont_copy = [f'{h5base}/{dnc}' for dnc in dont_copy]\n",
     "\n",
-    "    def visitor(k, item):\n",
-    "        if k not in dont_copy:\n",
-    "            if isinstance(item, h5py.Group):\n",
-    "                outfile.create_group(k)\n",
-    "            elif isinstance(item, h5py.Dataset):\n",
-    "                group = str(k).split(\"/\")\n",
-    "                group = \"/\".join(group[:-1])\n",
-    "                infile.copy(k, outfile[group])\n",
-    "\n",
-    "    infile.visititems(visitor)"
+    "    constants[k_da] = (offset_map, mask, gain_map)"
    ]
   },
   {
@@ -330,7 +299,14 @@
    "outputs": [],
    "source": [
     "# Correct a chunk of images for offset and gain\n",
-    "def correct_chunk(offset_map, mask, gain_map, memory_cells, no_relative_gain, inp):\n",
+    "def correct_chunk(\n",
+    "    offset_map,\n",
+    "    mask, gain_map,\n",
+    "    memory_cells,\n",
+    "    no_relative_gain,\n",
+    "    inp\n",
+    "):\n",
+    "\n",
     "    fim_data = None\n",
     "    gim_data = None\n",
     "    rim_data = None\n",
@@ -338,11 +314,11 @@
     "    err = ''\n",
     "\n",
     "    try:\n",
-    "        d, g, m, ind, copy_sample = inp\n",
+    "        d, g, m, ind = inp\n",
     "        g[g==3] = 2\n",
     "\n",
-    "        if copy_sample and ind==0:\n",
-    "            if memory_cells==1:\n",
+    "        if ind == 0:\n",
+    "            if memory_cells == 1:\n",
     "                rim_data = np.squeeze(copy.copy(d))\n",
     "            else:\n",
     "                rim_data = np.squeeze(copy.copy(d[:,0,...]))\n",
@@ -357,23 +333,36 @@
     "            mask_cell = mask\n",
     "\n",
     "        # Offset correction\n",
-    "        offset = np.choose(g, (offset_map_cell[...,0], offset_map_cell[...,1], offset_map_cell[...,2]))\n",
+    "        offset = np.choose(\n",
+    "            g, (\n",
+    "                offset_map_cell[...,0],\n",
+    "                offset_map_cell[...,1],\n",
+    "                offset_map_cell[...,2],\n",
+    "            )\n",
+    "        )\n",
     "        d -= offset\n",
     "\n",
     "        # Gain correction\n",
     "        if not no_relative_gain:\n",
-    "            if memory_cells>1:\n",
+    "            if memory_cells > 1:\n",
     "                gain_map_cell = gain_map[m,...]\n",
     "            else:\n",
     "                gain_map_cell = gain_map\n",
-    "            cal = np.choose(g, (gain_map_cell[..., 0], gain_map_cell[..., 1], gain_map_cell[..., 2]))\n",
+    "            cal = np.choose(\n",
+    "                g, (\n",
+    "                    gain_map_cell[..., 0],\n",
+    "                    gain_map_cell[..., 1],\n",
+    "                    gain_map_cell[..., 2],\n",
+    "                )\n",
+    "            )\n",
     "            d /= cal      \n",
     "\n",
-    "        msk = np.choose(g, (mask_cell[...,0], mask_cell[...,1], mask_cell[...,2]))\n",
+    "        msk = np.choose(\n",
+    "            g, (mask_cell[...,0], mask_cell[...,1], mask_cell[...,2]))\n",
     "\n",
     "        # Store sample of data for plotting\n",
-    "        if copy_sample and ind==0:\n",
-    "            if memory_cells==1:\n",
+    "        if ind == 0:\n",
+    "            if memory_cells == 1:\n",
     "                fim_data = np.squeeze(copy.copy(d))\n",
     "                gim_data = np.squeeze(copy.copy(g))\n",
     "                msk_data = np.squeeze(copy.copy(msk))\n",
@@ -383,8 +372,9 @@
     "                msk_data = np.squeeze(copy.copy(msk[:,1,...]))\n",
     "\n",
     "    except Exception as e:\n",
-    "        err = e\n",
-    "\n",
+    "        display(Markdown(\n",
+    "            f\"### <span style='color: #ff0000'>ERROR: </span> : {traceback.format_exc()}\"))\n",
+    "        err = traceback.format_exc() + e\n",
     "    return ind, d, msk, rim_data, fim_data, gim_data, msk_data, err"
    ]
   },
@@ -402,87 +392,109 @@
     "# For each module, chunks will be processed by pool\n",
     "pool = multiprocessing.Pool()\n",
     "# Loop over modules\n",
-    "for local_karabo_da, mapped_files_module in zip(karabo_da, mapped_files.values()):\n",
+    "for local_karabo_da, mapped_files_module in mapped_files.items():\n",
     "    h5path_f = h5path.format(int(local_karabo_da[-2:]))\n",
-    "    # Loop over sequences for given module\n",
-    "    for sequence_file_number, sequence_file in enumerate(mapped_files_module.queue):\n",
-    "        sequence_file = Path(sequence_file)\n",
-    "        offset_map, mask, gain_map = constants[local_karabo_da]\n",
-    "                                 \n",
-    "        with h5py.File(sequence_file, 'r') as infile:\n",
-    "            # The processed files are saved here in a folder with the run name.\n",
-    "            out_filename = out_folder / sequence_file.name.replace(\"RAW\", \"CORR\")\n",
-    "            print(f'Process file: {sequence_file}, with path {h5path_f}')\n",
-    "            try:\n",
-    "                with h5py.File(out_filename, \"w\") as outfile:\n",
-    "                    copy_and_sanitize_non_cal_data(infile, outfile, h5path_f)\n",
-    "\n",
-    "                    oshape = infile[h5path_f+\"/adc\"].shape\n",
-    "                    print(f'Data shape: {oshape}')\n",
-    "                    if not oshape[0]:\n",
-    "                        raise ValueError(f\"No image data: shape {oshape}\")\n",
-    "                    # Chunk always contains >= 1 complete image\n",
-    "                    chunk_shape = (chunk_size_idim, 1) + oshape[-2:]\n",
-    "\n",
-    "                    ddset = outfile.create_dataset(h5path_f+\"/adc\",\n",
-    "                                                   oshape,\n",
-    "                                                   chunks=chunk_shape,\n",
-    "                                                   dtype=np.float32)\n",
-    "\n",
-    "                    mskset = outfile.create_dataset(h5path_f+\"/mask\",\n",
-    "                                                    oshape,\n",
-    "                                                    chunks=chunk_shape,\n",
-    "                                                    dtype=np.uint32,\n",
-    "                                                    compression=\"gzip\", compression_opts=1, shuffle=True)\n",
-    "                    # Parallelize over chunks of images\n",
-    "                    inp = []\n",
-    "                    max_ind = oshape[0]\n",
-    "                    ind = 0\n",
-    "\n",
-    "                    # If chunk size is not given maximum 12+1 chunks is expected\n",
-    "                    if chunk_size == 0:\n",
-    "                        chunk_size = max_ind//12\n",
-    "                        print(f'Chunk size: {chunk_size}')\n",
-    "\n",
-    "                    ts = time.time()\n",
-    "                    while ind<max_ind:\n",
-    "                        d = infile[h5path_f+\"/adc\"][ind:ind+chunk_size,...].astype(np.float32)\n",
-    "                        g = infile[h5path_f+\"/gain\"][ind:ind+chunk_size,...]\n",
-    "                        if h5path_f+\"/memoryCell\" in infile:\n",
-    "                            m = infile[h5path_f+\"/memoryCell\"][ind:ind+chunk_size,...]\n",
-    "                        else:\n",
-    "                            m = None\n",
-    "                        print(f'To process: {d.shape}')\n",
-    "                        inp.append((d, g, m, ind, sequence_file_number==0))\n",
-    "                        ind += chunk_size\n",
-    "\n",
-    "                    print('Preparation time: ', time.time() - ts)\n",
-    "                    ts = time.time()\n",
-    "\n",
-    "                    print(f'Run {len(inp)} processes')\n",
-    "                    p = partial(correct_chunk, offset_map, mask, gain_map, memory_cells, no_relative_gain)\n",
-    "\n",
-    "                    r = pool.map(p, inp)\n",
-    "                    \n",
-    "                    if sequence_file_number == 0:\n",
-    "                        (_,_,_,\n",
-    "                         rim_data[local_karabo_da], fim_data[local_karabo_da],\n",
-    "                         gim_data[local_karabo_da], msk_data[local_karabo_da], _) = r[0]\n",
-    "\n",
-    "                    print('Correction time: ', time.time() - ts)\n",
-    "                    ts = time.time()\n",
-    "\n",
-    "                    for rr in r:\n",
-    "                        ind, cdata, cmask, _,_,_,_, err = rr\n",
-    "                        data_size = cdata.shape[0]\n",
-    "                        ddset[ind:ind+data_size,...] = cdata\n",
-    "                        mskset[ind:ind+data_size,...] = cmask\n",
-    "                        if err != '':\n",
-    "                            print(f'Error: {err}')\n",
-    "\n",
-    "                    print('Saving time: ', time.time() - ts)\n",
-    "            except Exception as e:\n",
-    "                print(f\"Error: {e}\")\n",
+    "\n",
+    "    sequence_file = Path(sorted(list(mapped_files_module.queue))[0])\n",
+    "\n",
+    "    # The processed files are saved here in the out-folder\n",
+    "    # with the run name and the first sequence file.\n",
+    "    out_file = out_folder / sequence_file.name.replace(\"RAW\", \"CORR\")\n",
+    "    run_dir.select(\n",
+    "        f\"*{karabo_id}*{karabo_da}*\", \"*\").deselect(h5path_f, \"data.adc\").write(out_file)  # noqa\n",
+    "    print(f'Process file: {sequence_file}, with path {h5path_f}')\n",
+    "    offset_map, mask, gain_map = constants[local_karabo_da]\n",
+    "\n",
+    "    inp = []\n",
+    "    try:\n",
+    "        data = run_dir.get_array(h5path_f, \"data.adc\")\n",
+    "        dshape = data.shape\n",
+    "        gain = run_dir.get_array(h5path_f, \"data.gain\")\n",
+    "        print(f\"Data shape: {dshape}\")\n",
+    "\n",
+    "        n_imgs = dshape[0]\n",
+    "\n",
+    "        if not dshape[0]:\n",
+    "            raise ValueError(f\"No image data: data shape is {dshape}\")\n",
+    "\n",
+    "        # Chunk always contains >= 1 complete image.\n",
+    "        chunk_shape = (chunk_size_idim, 1) + dshape[-2:]\n",
+    "        # If chunk size is not given maximum 12+1 chunks is expected\n",
+    "        if chunk_size == 0:\n",
+    "            chunk_size = dshape[0] // 12\n",
+    "            print(f'Chunk size: {chunk_size}')\n",
+    "\n",
+    "        with h5py.File(out_file, \"r+\") as outfile:\n",
+    "\n",
+    "            ddset = outfile.create_dataset(\n",
+    "                \"INSTRUMENT/\"+h5path_f+\"/data/adc\",\n",
+    "                dshape,\n",
+    "                chunks=chunk_shape,\n",
+    "                dtype=np.float32,\n",
+    "            )\n",
+    "\n",
+    "            mskset = outfile.create_dataset(\n",
+    "                \"INSTRUMENT/\"+h5path_f+\"/data/mask\",\n",
+    "                dshape,\n",
+    "                chunks=chunk_shape,\n",
+    "                dtype=np.uint32,\n",
+    "                compression=\"gzip\",\n",
+    "                compression_opts=1,\n",
+    "                shuffle=True,\n",
+    "            )\n",
+    "\n",
+    "            ts = time.time()\n",
+    "            ind = 0\n",
+    "            for chunk in run_dir.select_trains(\n",
+    "                np.s_[:n_imgs]).split_trains(n_imgs//chunk_size):\n",
+    "                m = None\n",
+    "                d = chunk.get_array(h5path_f, \"data.adc\").astype(np.float32).values\n",
+    "                g = chunk.get_array(h5path_f, \"data.gain\").values\n",
+    "                #TODO: validate that memoryCell is an available key\n",
+    "                #if h5path_f+\"/memoryCell\" in infile:\n",
+    "                m = chunk.get_array(h5path_f, \"data.memoryCell\")\n",
+    "\n",
+    "                print(f'To process: {d.shape}')\n",
+    "                inp.append((d, g, m, ind))\n",
+    "                ind += d.shape[0]\n",
+    "\n",
+    "            print('Preparation time: ', time.time() - ts)\n",
+    "            ts = time.time()\n",
+    "\n",
+    "            print(f'Run {len(inp)} processes')\n",
+    "            p = partial(\n",
+    "                correct_chunk,\n",
+    "                offset_map,\n",
+    "                mask,\n",
+    "                gain_map,\n",
+    "                memory_cells,\n",
+    "                no_relative_gain,\n",
+    "            )\n",
+    "\n",
+    "            r = pool.map(p, inp)\n",
+    "\n",
+    "            (_,_,_,\n",
+    "             rim_data[local_karabo_da], fim_data[local_karabo_da],\n",
+    "             gim_data[local_karabo_da], msk_data[local_karabo_da], _) = r[0]\n",
+    "\n",
+    "            print('Correction time: ', time.time() - ts)\n",
+    "            ts = time.time()\n",
+    "\n",
+    "            for rr in r:\n",
+    "                ind, cdata, cmask, _,_,_,_, err = rr\n",
+    "                if err != '':\n",
+    "                    print(f'Error: {err}')\n",
+    "                data_size = cdata.shape[0]\n",
+    "                ddset[ind:ind+data_size,...] = cdata\n",
+    "                mskset[ind:ind+data_size,...] = cmask\n",
+    "\n",
+    "\n",
+    "            print('Saving time: ', time.time() - ts)\n",
+    "    except Exception as e:\n",
+    "        display(Markdown(\n",
+    "            \"### <span style='color: #ff0000'>ERROR: </span> : \"\n",
+    "            f\"{traceback.format_exc()}\"\n",
+    "        ))\n",
     "pool.close()"
    ]
   },
diff --git a/notebooks/Jungfrau/Jungfrau_dark_analysis_all_gains_burst_mode_NBC.ipynb b/notebooks/Jungfrau/Jungfrau_dark_analysis_all_gains_burst_mode_NBC.ipynb
index 6d52e311f37e9513f06ac2b589bc71e00e292907..d628865ec133286667a4bb070c1a9cacd68ad7b8 100644
--- a/notebooks/Jungfrau/Jungfrau_dark_analysis_all_gains_burst_mode_NBC.ipynb
+++ b/notebooks/Jungfrau/Jungfrau_dark_analysis_all_gains_burst_mode_NBC.ipynb
@@ -84,8 +84,9 @@
     ")\n",
     "from XFELDetAna.plotting.heatmap import heatmapPlot\n",
     "from XFELDetAna.plotting.histogram import histPlot\n",
-    "from cal_tools.ana_tools import save_dict_to_hdf5\n",
     "from XFELDetAna.util import env\n",
+    "from cal_tools import jungfraulib\n",
+    "from cal_tools.ana_tools import save_dict_to_hdf5\n",
     "from cal_tools.enums import BadPixels\n",
     "from cal_tools.tools import (\n",
     "    get_dir_creation_date,\n",
@@ -197,8 +198,13 @@
     "\n",
     "        run_dir = RunDirectory(f\"{in_folder}/r{r_n:04d}/\")\n",
     "\n",
-    "        (run_mcells, sc_start, integration_time, bias_voltage) = check_memory_cells(\n",
-    "            run_dir, karabo_id_control, manual_slow_data, integration_time, bias_voltage)\n",
+    "        run_mcells, sc_start, integration_time, bias_voltage = jungfraulib.check_memory_cells(  # noqa\n",
+    "            run_dir,\n",
+    "            karabo_id_control,\n",
+    "            manual_slow_data,\n",
+    "            integration_time,\n",
+    "            bias_voltage,\n",
+    "        )\n",
     "\n",
     "        if mod not in noise_map.keys():\n",
     "\n",
diff --git a/src/cal_tools/jungfraulib.py b/src/cal_tools/jungfraulib.py
new file mode 100644
index 0000000000000000000000000000000000000000..2ba86dcac4192e11b64813a6963b7dde2d5eb054
--- /dev/null
+++ b/src/cal_tools/jungfraulib.py
@@ -0,0 +1,28 @@
+from typing import Tuple
+
+def check_memory_cells(
+    run_dir: "extra_data.reader.DataCollection",
+    karabo_id_control: str,
+    use_slow_data: bool,
+    integration_time: float,
+    bias_voltage: int,
+) -> Tuple[int, int, float, int]:
+    """Read slow data from RUN source.
+    :param run_dir: EXtra-data RunDirectory DataCollection object.
+    :param karabo_id_control: Karabo ID for control h5file with slow data.
+    :param use_slow_data: A flag to overwrite bias_voltage and
+                          integration_time with stored slow data values. 
+    :param integration_time: Initial integration_time value.
+    :param bias_voltage: Initial bias_voltage value.
+    """
+    n_scs = int(run_dir.get_run_value(
+        f"{karabo_id_control}/DET/CONTROL", "storageCells.value")) + 1
+    sc_s = int(run_dir.get_run_value(
+        f"{karabo_id_control}/DET/CONTROL", "storageCellStart.value"))
+    if not use_slow_data:
+        integration_time = float(run_dir.get_run_value(
+            f"{karabo_id_control}/DET/CONTROL", "exposureTime.value")) * 1e6
+        bias_voltage = int(run_dir.get_run_value(
+            f"{karabo_id_control}/DET/CONTROL", "vHighVoltage.value")[0])
+
+    return n_scs, sc_s, integration_time, bias_voltage
diff --git a/src/cal_tools/tools.py b/src/cal_tools/tools.py
index d13d97691bc6ab85afc246bb063f554f2e606fe3..5a02a4b5b7e8f1060a83aeeccfff2bcd394fb4e3 100644
--- a/src/cal_tools/tools.py
+++ b/src/cal_tools/tools.py
@@ -57,7 +57,7 @@ def run_prop_seq_from_path(filename):
 
 
 def map_modules_from_folder(in_folder, run, path_template, karabo_da,
-                            sequences=None):
+                            sequences=None, qm_naming=True):
     """
     Prepare queues of files to process.
     Queues are stored in dictionary with module name Q{}M{} as a key
@@ -67,7 +67,9 @@ def map_modules_from_folder(in_folder, run, path_template, karabo_da,
     :param path_template: Template for file name
                           e.g. `RAW-R{:04d}-{}-S{:05d}.h5`
     :param karabo_da: List of data aggregators e.g. [AGIPD00, AGIPD01]
-    :param sequences: List of sequences to be considered
+    :param sequences: List of sequences to be considered.
+    :param qm_naming: Flag to use Q{}M{} naming convention for dict keys,
+                      instead of the karabo-da.
     :return: Dictionary of queues of files, dictionary of module indexes,
     total number of sequences, dictionary of number of sequences per module
     """
@@ -78,7 +80,10 @@ def map_modules_from_folder(in_folder, run, path_template, karabo_da,
     sequences_qm = {}
     for inset in karabo_da:
         module_idx = int(inset[-2:])
-        name = module_index_to_qm(module_idx)
+        if qm_naming:
+            name = module_index_to_qm(module_idx)
+        else:
+            name = inset
         module_files[name] = Queue()
         sequences_qm[name] = 0
         mod_ids[name] = module_idx