diff --git a/notebooks/Jungfrau/Jungfrau_dark_analysis_all_gains_burst_mode_NBC.ipynb b/notebooks/Jungfrau/Jungfrau_dark_analysis_all_gains_burst_mode_NBC.ipynb
index 05bda3f95cba5ca42228762483d597f03863ae37..144ce000930afe71d5a8c0cb1f6721d71ddacb50 100644
--- a/notebooks/Jungfrau/Jungfrau_dark_analysis_all_gains_burst_mode_NBC.ipynb
+++ b/notebooks/Jungfrau/Jungfrau_dark_analysis_all_gains_burst_mode_NBC.ipynb
@@ -43,8 +43,8 @@
     "badpixel_threshold_sigma = 5.  # bad pixels defined by values outside n times this std from median\n",
     "offset_abs_threshold_low = [1000, 10000, 10000]  # absolute bad pixel threshold in terms of offset, lower values\n",
     "offset_abs_threshold_high = [8000, 15000, 15000]  # absolute bad pixel threshold in terms of offset, upper values\n",
-    "max_trains = 0  # Maximum trains to process darks. Set to 0 to process all available train images.\n",
-    "min_trains = 1  # Minimum number of trains that should be available to process dark constants. Default 1.\n",
+    "max_trains = 1000  # Maximum trains to process darks. Set to 0 to process all available train images. 1000 trains is enough resolution to create the dark constants\n",
+    "min_trains = 100  # Minimum number of trains to process dark constants. Raise a warning if the run has fewer trains.\n",
     "manual_slow_data = False  # if true, use manually entered bias_voltage and integration_time values\n",
     "time_limits = 0.025  # to find calibration constants later on, the integration time is allowed to vary by 0.5 us\n",
     "\n",
@@ -71,10 +71,9 @@
    },
    "outputs": [],
    "source": [
-    "import glob\n",
     "import os\n",
     "import warnings\n",
-    "from pathlib import Path\n",
+    "from logging import warning\n",
     "warnings.filterwarnings('ignore')\n",
     "\n",
     "import matplotlib\n",
@@ -92,7 +91,6 @@
     "from XFELDetAna.plotting.heatmap import heatmapPlot\n",
     "from XFELDetAna.plotting.histogram import histPlot\n",
     "from cal_tools import jungfraulib, step_timing\n",
-    "from cal_tools.ana_tools import save_dict_to_hdf5\n",
     "from cal_tools.enums import BadPixels, JungfrauGainMode\n",
     "from cal_tools.tools import (\n",
     "    get_dir_creation_date,\n",
@@ -199,11 +197,11 @@
     "\n",
     "# A transperent workaround for old raw data with wrong/missing medium and low settings\n",
     "if med_low_settings == [None, None]:\n",
-    "    print(\"WARNING: run.settings is not stored in the data to read. \"\n",
-    "          f\"Hence assuming gain_mode = {gain_mode} for adaptive old data.\")\n",
+    "    warning(\"run.settings is not stored in the data to read. \"\n",
+    "            f\"Hence assuming gain_mode = {gain_mode} for adaptive old data.\")\n",
     "elif med_low_settings == [\"dynamicgain\", \"forceswitchg1\"]:\n",
-    "    print(f\"WARNING: run.settings for medium and low gain runs are wrong {med_low_settings}. \"\n",
-    "          f\"This is an expected bug for old raw data. Setting gain_mode to {gain_mode}.\")\n",
+    "    warning(f\"run.settings for medium and low gain runs are wrong {med_low_settings}. \"\n",
+    "            f\"This is an expected bug for old raw data. Setting gain_mode to {gain_mode}.\")\n",
     "# Validate that low_med_settings is not a mix of adaptive and fixed settings.\n",
     "elif not (sorted(med_low_settings) in [fixed_settings, dynamic_settings, old_fixed_settings]):  # noqa\n",
     "    raise ValueError(\n",
@@ -310,7 +308,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "context = psh.context.ThreadContext(num_workers=multiprocessing.cpu_count())"
+    "context = psh.context.ThreadContext(num_workers=memory_cells)"
    ]
   },
   {
@@ -343,7 +341,8 @@
     "\n",
     "    print(f\"\\n- Instrument data path for {mod} is {instrument_src}.\")\n",
     "\n",
-    "    offset_map[mod] = context.alloc(shape=(sensor_size+(memory_cells, 3)), fill=0)\n",
+    "    offset_map[mod] = context.alloc(\n",
+    "        shape=(sensor_size+(memory_cells, 3)), fill=0, dtype=np.float32)\n",
     "    noise_map[mod] = context.alloc(like=offset_map[mod], fill=0)\n",
     "    bad_pixels_map[mod] = context.alloc(like=offset_map[mod], dtype=np.uint32, fill=0)\n",
     "\n",
@@ -351,46 +350,53 @@
     "\n",
     "        def process_cell(worker_id, array_index, cell_number):\n",
     "            cell_slice_idx = acelltable == cell_number\n",
-    "            thiscell = images[..., cell_slice_idx]\n",
+    "            thiscell = images[..., cell_slice_idx]  # [1024, 512, n_trains]\n",
     "\n",
     "            # Identify cells/trains with images of 0 pixels.\n",
     "            # TODO: An investigation is ongoing by DET to identify reason for these empty images.\n",
-    "            nonzero_adc = np.any(thiscell != 0 , axis=(0, 1))\n",
+    "            nonzero_adc = np.any(thiscell != 0 , axis=(0, 1))  # [n_trains]\n",
     "\n",
     "            # Exclude empty images with 0 pixels, before calculating offset and noise\n",
     "            thiscell = thiscell[..., nonzero_adc]\n",
-    "            offset_map[mod][..., cell_number, gain] = np.mean(thiscell, axis=2)\n",
-    "            noise_map[mod][..., cell_number, gain] = np.std(thiscell, axis=2)\n",
-    "\n",
+    "            offset_map[mod][..., cell_number, gain] = np.mean(  # [1024, 512]\n",
+    "                thiscell, axis=2, dtype=np.float32)\n",
+    "            noise_map[mod][..., cell_number, gain] = np.std(  # [1024, 512]\n",
+    "                thiscell, axis=2, dtype=np.float32)\n",
+    "            del thiscell\n",
     "            # Check if there are wrong bad gain values.\n",
     "            # 1. Exclude empty images.\n",
     "            # 2. Indicate pixels with wrong gain value for any train for each cell.\n",
     "            # TODO: mean is used to use thresholds for accepting gain values, even if not 0 mean value.\n",
-    "            gain_avg = np.mean(\n",
-    "                gain_vals[..., cell_slice_idx][..., nonzero_adc], axis=2)\n",
+    "            gain_avg = np.mean(  # [1024, 512]\n",
+    "                gain_vals[..., cell_slice_idx][..., nonzero_adc],\n",
+    "                axis=2, dtype=np.float32\n",
+    "            )\n",
     "\n",
+    "            # [1024, 512]\n",
     "            bad_pixels_map[mod][..., cell_number, gain][gain_avg != raw_g] |= BadPixels.WRONG_GAIN_VALUE.value\n",
+    "\n",
     "        print(f\"Gain stage {gain}, run {run_n}\")\n",
     "\n",
     "        # load shape of data for memory cells, and detector size (imgs, cells, x, y)\n",
-    "        n_imgs = run_dc[instrument_src, \"data.adc\"].shape[0]\n",
+    "        n_trains = run_dc[instrument_src, \"data.adc\"].shape[0]\n",
     "        # load number of data available, including trains with empty data.\n",
-    "        n_trains = len(run_dc.train_ids)\n",
+    "        all_trains = len(run_dc.train_ids)\n",
     "        instr_dc = run_dc.select(instrument_src, require_all=True)\n",
-    "        empty_trains = n_trains - n_imgs\n",
+    "        empty_trains = all_trains - n_trains\n",
     "        if empty_trains != 0:\n",
-    "            print(f\"\\tWARNING: {mod} has {empty_trains} trains with empty data out of {n_trains} trains\")  # noqa\n",
+    "            print(f\"{mod} has {empty_trains} empty trains out of {all_trains} trains\")\n",
     "        if max_trains > 0:\n",
-    "            n_imgs = min(n_imgs, max_trains)\n",
-    "        print(f\"Processing {n_imgs} images.\")\n",
-    "        # Select only requested number of images to process darks.\n",
-    "        instr_dc = instr_dc.select_trains(np.s_[:n_imgs])\n",
+    "            n_trains = min(n_trains, max_trains)\n",
+    "        print(f\"Processing {n_trains} images.\")\n",
+    "        \n",
+    "        if n_trains == 0:\n",
+    "            raise ValueError(f\"{run_n} has no trains to process.\")\n",
     "\n",
-    "        if n_imgs < min_trains:\n",
-    "            raise ValueError(\n",
-    "                f\"Less than {min_trains} trains are available in RAW data.\"\n",
-    "                \" Not enough data to process darks.\")\n",
+    "        if n_trains < min_trains:\n",
+    "            warning(f\"Less than {min_trains} trains are available in RAW data.\")\n",
     "\n",
+    "        # Select only requested number of images to process darks.\n",
+    "        instr_dc = instr_dc.select_trains(np.s_[:n_trains])\n",
     "        images = np.transpose(\n",
     "            instr_dc[instrument_src, \"data.adc\"].ndarray(), (3, 2, 1, 0))\n",
     "        acelltable = np.transpose(instr_dc[instrument_src, \"data.memoryCell\"].ndarray())\n",
@@ -413,7 +419,9 @@
     "\n",
     "        # Calculate offset and noise maps\n",
     "        context.map(process_cell, range(memory_cells))\n",
-    "\n",
+    "        del images\n",
+    "        del acelltable\n",
+    "        del gain_vals\n",
     "    step_timer.done_step(f'Creating Offset and noise constants for a module.')"
    ]
   },