diff --git a/notebooks/DynamicFF/Characterize_DynamicFF_NBC.ipynb b/notebooks/DynamicFF/Characterize_DynamicFF_NBC.ipynb
index 41cb215e2afdd024dc3f4078735cab84fec1416d..d9e512d47cff537bf2008816eacd48675a550571 100644
--- a/notebooks/DynamicFF/Characterize_DynamicFF_NBC.ipynb
+++ b/notebooks/DynamicFF/Characterize_DynamicFF_NBC.ipynb
@@ -33,8 +33,10 @@
     "cal_db_timeout = 30000  # Unused, calibration DB timeout\n",
     "db_output = False # if True, the notebook sends dark constants to the calibration database\n",
     "local_output = True # if True, the notebook saves dark constants locally\n",
+    "creation_time = \"\" # To overwrite the measured creation_time. Required Format: YYYY-MM-DD HH:MM:SS.00 e.g. 2019-07-04 11:02:41.00\n",
     "\n",
     "# Calibration constants parameters\n",
+    "frame_range = [4]  # range list [start, end, step] of frame indices within a train to use for characterization.\n",
     "n_components = 50  # Number of principal components of flat-field to compute (default: 50)"
    ]
   },
@@ -63,9 +65,8 @@
     "%matplotlib inline\n",
     "from cal_tools.step_timing import StepTimer\n",
     "from cal_tools.tools import (\n",
-    "    get_dir_creation_date,\n",
     "    run_prop_seq_from_path,\n",
-    "    save_dict_to_hdf5\n",
+    "    calcat_creation_time,\n",
     ")\n",
     "from cal_tools.restful_config import calibration_client, extra_calibration_client\n",
     "from cal_tools.shimadzu import ShimadzuHPVX2\n",
@@ -84,9 +85,11 @@
     "extra_calibration_client()  # Configure CalibrationData.\n",
     "\n",
     "cc = calibration_client()\n",
-    "pdus = cc.get_all_phy_det_units_from_detector(\n",
-    "    {\"detector_identifier\": karabo_id})  # TODO: Use creation_time for snapshot_at\n",
-    "\n",
+    "pdus = cc.get_all_phy_det_units_from_detector({\n",
+    "    \"detector_identifier\": karabo_id,\n",
+    "    \"pdu_snapshot_at\": calcat_creation_time(\n",
+    "        in_folder, min(run_low, run_high), creation_time),\n",
+    "})\n",
     "if not pdus[\"success\"]:\n",
     "    raise ValueError(\"Failed to retrieve PDUs\")\n",
     "\n",
@@ -116,6 +119,10 @@
     "\n",
     "constants = {}\n",
     "\n",
+    "# make a frame slice\n",
+    "frame_range += [None] * (3 - len(frame_range))\n",
+    "frame_slice = slice(*frame_range)\n",
+    "\n",
     "step_timer = StepTimer()"
    ]
   },
@@ -133,7 +140,7 @@
    "outputs": [],
    "source": [
     "dark_run = run_high\n",
-    "dark_creation_time = get_dir_creation_date(in_folder, dark_run)\n",
+    "dark_creation_time = calcat_creation_time(in_folder, dark_run, creation_time)\n",
     "print(f\"Using {dark_creation_time} as creation time of Offset constant.\")\n",
     "\n",
     "for da, meta in modules.items():\n",
@@ -155,10 +162,10 @@
     "    conditions = detector.conditions(dark_dc, meta[\"module\"])\n",
     "\n",
     "    key_data = dark_dc[source_name, image_key]\n",
-    "    images_dark = key_data.ndarray()\n",
+    "    images_dark = key_data.ndarray(roi=frame_slice)\n",
     "    ntrain, npulse, ny, nx = images_dark.shape\n",
     "\n",
-    "    print(f\"N image: {ntrain * npulse} (ntrain: {ntrain}, npulse: {npulse})\")\n",
+    "    print(f\"N image: {ntrain * npulse} (ntrain: {ntrain}, npulse: {npulse}/{key_data.shape[1]})\")\n",
     "    print(f\"Image size: {ny} x {nx} px\")\n",
     "    step_timer.done_step(\"Read dark images\")\n",
     "\n",
@@ -192,13 +199,11 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "scrolled": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "flat_run = run_low\n",
-    "flat_creation_time = get_dir_creation_date(in_folder, flat_run)\n",
+    "flat_creation_time = calcat_creation_time(in_folder, flat_run, creation_time)\n",
     "print(f\"Using {flat_creation_time} as creation time of DynamicFF constant.\")\n",
     "\n",
     "for da, meta in modules.items():\n",
@@ -227,10 +232,10 @@
     "                         f\"the dark run conditions ({dark_conditions}). Skip flat-field characterization.\")\n",
     "\n",
     "    key_data = flat_dc[source_name][image_key]\n",
-    "    images_flat = key_data.ndarray()\n",
+    "    images_flat = key_data.ndarray(roi=frame_slice)\n",
     "    ntrain, npulse, ny, nx = images_flat.shape\n",
     "\n",
-    "    print(f\"N image: {ntrain * npulse} (ntrain: {ntrain}, npulse: {npulse})\")\n",
+    "    print(f\"N image: {ntrain * npulse} (ntrain: {ntrain}, npulse: {npulse}/{key_data.shape[1]})\")\n",
     "    print(f\"Image size: {ny} x {nx} px\")\n",
     "    step_timer.done_step(\"Read flat-field images\")\n",
     "\n",
@@ -346,7 +351,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.8.11"
+   "version": "3.11.9"
   }
  },
  "nbformat": 4,
diff --git a/notebooks/DynamicFF/Correct_DynamicFF_NBC.ipynb b/notebooks/DynamicFF/Correct_DynamicFF_NBC.ipynb
index 885a87ab790d8c85b6044932baf50a0d514a15f8..db6e15736331d1f5fe64f98ef1fd7f2934dfc3b8 100644
--- a/notebooks/DynamicFF/Correct_DynamicFF_NBC.ipynb
+++ b/notebooks/DynamicFF/Correct_DynamicFF_NBC.ipynb
@@ -29,6 +29,7 @@
     "# Database access parameters.\n",
     "cal_db_interface = \"tcp://max-exfl-cal001:8021\"  # Unused, calibration DB interface to use\n",
     "cal_db_timeout = 30000  # Unused, calibration DB timeout\n",
+    "creation_time = \"\" # To overwrite the measured creation_time. Required Format: YYYY-MM-DD HH:MM:SS.00 e.g. 2019-07-04 11:02:41.00\n",
     "\n",
     "# Correction parameters\n",
     "n_components = 20  # number of principal components of flat-field to use in correction\n",
@@ -44,6 +45,7 @@
    "outputs": [],
    "source": [
     "import os\n",
+    "import sys\n",
     "import h5py\n",
     "import warnings\n",
     "from logging import warning\n",
@@ -60,7 +62,7 @@
     "%matplotlib inline\n",
     "from cal_tools.step_timing import StepTimer\n",
     "from cal_tools.files import sequence_trains, DataFile\n",
-    "from cal_tools.tools import get_dir_creation_date\n",
+    "from cal_tools.tools import calcat_creation_time\n",
     "\n",
     "from cal_tools.restful_config import calibration_client, extra_calibration_client\n",
     "from cal_tools.calcat_interface2 import CalibrationData\n",
@@ -68,7 +70,7 @@
     "\n",
     "from dynflatfield import (\n",
     "    DynamicFlatFieldCorrectionCython as DynamicFlatFieldCorrection,\n",
-    "    FlatFieldCorrectionFileProcessor\n",
+    "    FileDynamicFlatFieldProcessor\n",
     ")\n",
     "from dynflatfield.draw import plot_images, plot_camera_image"
    ]
@@ -79,15 +81,16 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "creation_time = get_dir_creation_date(in_folder, run)\n",
+    "creation_time = calcat_creation_time(in_folder, run, creation_time)\n",
     "print(f\"Creation time is {creation_time}\")\n",
     "\n",
     "extra_calibration_client()  # Configure CalibrationData API.\n",
     "\n",
     "cc = calibration_client()\n",
-    "pdus = cc.get_all_phy_det_units_from_detector(\n",
-    "    {\"detector_identifier\": karabo_id})  # TODO: Use creation_time for snapshot_at\n",
-    "\n",
+    "pdus = cc.get_all_phy_det_units_from_detector({\n",
+    "    \"detector_identifier\": karabo_id,\n",
+    "    \"pdu_snapshot_at\": creation_time,\n",
+    "})\n",
     "if not pdus[\"success\"]:\n",
     "    raise ValueError(\"Failed to retrieve PDUs\")\n",
     "\n",
@@ -110,7 +113,7 @@
     "    instrument_source_name = detector.instrument_source(module)\n",
     "    corrected_source_name = detector.corrected_source(module)\n",
     "    print('-', da, db_module, module, instrument_source_name)\n",
-    "    \n",
+    "\n",
     "    modules[da] = dict(\n",
     "        db_module=db_module,\n",
     "        module=module,\n",
@@ -145,6 +148,8 @@
     "aggregators = {}\n",
     "corrections = {}\n",
     "for da in modules:\n",
+    "    file_da, _, _ = da.partition('/')\n",
+    "    aggregators.setdefault(file_da, []).append(da)\n",
     "    try:\n",
     "        dark = caldata[\"Offset\", da].ndarray()\n",
     "        flat = caldata[\"DynamicFF\", da].ndarray()\n",
@@ -156,12 +161,9 @@
     "            dark, flat, components, downsample_factors)\n",
     "\n",
     "        corrections[da] = dffc\n",
-    "        \n",
-    "        file_da, _, _ = da.partition('/')\n",
-    "        aggregators.setdefault(file_da, []).append(da)\n",
     "    except (KeyError, FileNotFoundError):\n",
-    "        warning(f\"Constants are not found for module {da}. \"\n",
-    "                \"The module will not calibrated\")\n",
+    "        # missed constants are reported later\n",
+    "        pass\n",
     "\n",
     "step_timer.done_step(\"Load calibration constants\")        "
    ]
@@ -182,7 +184,8 @@
     "# Output Folder Creation:\n",
     "os.makedirs(out_folder, exist_ok=True)\n",
     "\n",
-    "report = []\n",
+    "missed_constants = 0\n",
+    "report = {}\n",
     "for file_da, file_modules in aggregators.items():\n",
     "    dc = RunDirectory(f\"{in_folder}/r{run:04d}\", f\"RAW-R{run:04d}-{file_da}-S*.h5\")\n",
     "\n",
@@ -191,17 +194,27 @@
     "    process_modules = []\n",
     "    for da in file_modules:\n",
     "        instrument_source = modules[da][\"raw_source_name\"]\n",
-    "        if instrument_source in dc.all_sources:\n",
-    "            keydata = dc[instrument_source][image_key].drop_empty_trains()\n",
-    "            train_ids.update(keydata.train_ids)\n",
-    "            process_modules.append(da)\n",
-    "        else:\n",
+    "        if instrument_source not in dc.all_sources:\n",
     "            print(f\"Source {instrument_source} for module {da} is missed\")\n",
+    "            continue\n",
+    "        if da not in corrections:\n",
+    "            missed_constants += 1\n",
+    "            warning(f\"Constants are not found for module {da}. \"\n",
+    "                    \"The module will not calibrated\")\n",
+    "            continue\n",
+    "\n",
+    "        keydata = dc[instrument_source][image_key].drop_empty_trains()\n",
+    "        train_ids.update(keydata.train_ids)\n",
+    "        process_modules.append(da)\n",
+    "\n",
+    "    if not process_modules:\n",
+    "        continue\n",
     "        \n",
     "    train_ids = np.array(sorted(train_ids))\n",
     "    ts = dc.select_trains(by_id[train_ids]).train_timestamps().astype(np.uint64)\n",
     "\n",
     "    # correct and write sequence files\n",
+    "    seq_report = {}\n",
     "    for seq_id, train_mask in sequence_trains(train_ids, 200):\n",
     "        step_timer.start()\n",
     "        print('* sequence', seq_id)\n",
@@ -219,7 +232,6 @@
     "        f.create_index(seq_train_ids, timestamps=seq_timestamps)\n",
     "\n",
     "        # create file structure\n",
-    "        seq_report = {}\n",
     "        file_datasets = {}\n",
     "        for da in process_modules:\n",
     "            instrument_source = modules[da][\"raw_source_name\"]\n",
@@ -257,7 +269,7 @@
     "\n",
     "            dffc = corrections[da]\n",
     "            instrument_source = modules[da][\"raw_source_name\"]\n",
-    "            proc = FlatFieldCorrectionFileProcessor(dffc, num_proc, instrument_source, image_key)\n",
+    "            proc = FileDynamicFlatFieldProcessor(dffc, num_proc, instrument_source, image_key)\n",
     "\n",
     "            proc.start_workers()\n",
     "            proc.run(dc_seq)\n",
@@ -272,11 +284,22 @@
     "                keydata, ds = file_datasets[da][key]\n",
     "                ds[:] = keydata.ndarray()\n",
     "\n",
-    "            seq_report[da] = (raw_images[0, 0], corrected_images[:20, 0])\n",
+    "            rep_cix = np.argmax(np.mean(raw_images[:20], axis=(-1, -2)), axis=1)\n",
+    "            rep_rix = rep_cix[0]\n",
+    "            da_report = seq_report.setdefault(da, [])\n",
+    "            da_report.append((raw_images[0, rep_rix],\n",
+    "                              corrected_images[range(len(rep_cix)), rep_cix]))\n",
     "            step_timer.done_step(\"Correct flat-field\")\n",
     "\n",
     "        f.close()\n",
-    "        report.append(seq_report)"
+    "    report.update(seq_report)\n",
+    "\n",
+    "if not report:\n",
+    "    if missed_constants:\n",
+    "        raise ValueError(\"Calibration constants are not found for any module\")\n",
+    "    else:\n",
+    "        warning(\"No data to correct\")\n",
+    "        sys.exit(0)"
    ]
   },
   {
@@ -287,15 +310,23 @@
    "source": [
     "step_timer.start()\n",
     "if report:\n",
-    "    for da, (raw_image, corrected_images) in report[0].items():\n",
+    "    for da, da_report in report.items():\n",
+    "        if len(da_report) > 0:\n",
+    "            raw_images, corrected_images = zip(*da_report)\n",
+    "            raw_images = np.stack(raw_images)\n",
+    "            corrected_images = np.concatenate(corrected_images, axis=0)\n",
+    "        else:\n",
+    "            raw_images, corrected_images = da_report\n",
+    "            raw_images = raw_images[None, ...]\n",
+    "\n",
     "        source = modules[da][\"raw_source_name\"]\n",
     "        display(Markdown(f\"## {source}\"))\n",
     "\n",
-    "        display(Markdown(\"### The first raw image\"))\n",
-    "        plot_camera_image(raw_images[0, 0])\n",
+    "        display(Markdown(\"### The brightest raw image from the first train\"))\n",
+    "        plot_camera_image(raw_images[0])\n",
     "        plt.show()\n",
     "\n",
-    "        display(Markdown(\"### The first corrected image\"))\n",
+    "        display(Markdown(\"### The brightest corrected image from the first train\"))\n",
     "        plot_camera_image(corrected_images[0])\n",
     "        plt.show()\n",
     "\n",
@@ -309,7 +340,7 @@
     "                    np.full(\n",
     "                        (min_images - corrected_images.shape[0], *corrected_images.shape[1:]),\n",
     "                        fill_value=np.nan)])\n",
-    "        display(Markdown(\"### The first corrected images in the trains (up to 20)\"))\n",
+    "        display(Markdown(\"### The brightest corrected images by one from a train (up to 20 first trains)\"))\n",
     "        plot_images(corrected_images, figsize=(13, 8))\n",
     "        plt.show()\n",
     "\n",
@@ -329,7 +360,7 @@
  ],
  "metadata": {
   "kernelspec": {
-   "display_name": "Python 3",
+   "display_name": "Python 3 (ipykernel)",
    "language": "python",
    "name": "python3"
   },
@@ -343,7 +374,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.8.11"
+   "version": "3.11.7"
   }
  },
  "nbformat": 4,
diff --git a/setup.py b/setup.py
index f0a67b6268c1b407656fc448ac55cebaca9aad27..b8ce52d6d54df2a7be36b59fbd90f74a2c4ee64a 100644
--- a/setup.py
+++ b/setup.py
@@ -61,7 +61,7 @@ install_requires = [
         "dill==0.3.8",
         "docutils==0.20.1",
         "dynaconf==3.2.4",
-        "dynflatfield==1.0.0",
+        "dynflatfield==1.1.0",
         "env_cache==0.1",
         "extra_data==1.16.0",
         "extra_geom==1.11.0",