From e4672d891a8555b3911dabbc897e461b65c391a1 Mon Sep 17 00:00:00 2001
From: ahmedk <karim.ahmed@xfel.eu>
Date: Thu, 11 Jul 2024 10:41:04 +0200
Subject: [PATCH] refactor: remove outp_source to instr_src_group

---
 notebooks/ePix100/Correction_ePix100_NBC.ipynb | 16 ++++++++--------
 1 file changed, 8 insertions(+), 8 deletions(-)

diff --git a/notebooks/ePix100/Correction_ePix100_NBC.ipynb b/notebooks/ePix100/Correction_ePix100_NBC.ipynb
index 318de79a5..4017881ca 100644
--- a/notebooks/ePix100/Correction_ePix100_NBC.ipynb
+++ b/notebooks/ePix100/Correction_ePix100_NBC.ipynb
@@ -615,10 +615,10 @@
     "            instrument_channels=sorted({f'{output_src}/data',f'{input_src}/data'})\n",
     "        )\n",
     "        # Create Instrument section to later add corrected datasets.\n",
-    "        outp_source = ofile.create_instrument_source(output_src)\n",
+    "        instr_src_group = ofile.create_instrument_source(output_src)\n",
     "\n",
     "        # Create count/first datasets at INDEX source.\n",
-    "        outp_source.create_index(data=image_counts)\n",
+    "        instr_src_group.create_index(data=image_counts)\n",
     "\n",
     "        image_raw_fields = [  # /data/image/\n",
     "            \"binning\", \"bitsPerPixel\", \"dimTypes\", \"dims\",\n",
@@ -627,18 +627,18 @@
     "        for field in image_raw_fields:\n",
     "            field_arr = seq_dc[input_src, f\"data.image.{field}\"].ndarray()\n",
     "\n",
-    "            outp_source.create_key(\n",
+    "            instr_src_group.create_key(\n",
     "                f\"data.image.{field}\", data=field_arr,\n",
     "                chunks=(chunk_size_idim, *field_arr.shape[1:]))\n",
     "\n",
     "        # Add main corrected `data.image.pixels` dataset and store corrected data.\n",
-    "        outp_source.create_key(\n",
+    "        instr_src_group.create_key(\n",
     "            \"data.image.pixels\", data=data, chunks=dataset_chunk)\n",
-    "        outp_source.create_key(\n",
+    "        instr_src_group.create_key(\n",
     "            \"data.trainId\", data=seq_dc.train_ids, chunks=min(50, len(seq_dc.train_ids)))\n",
     "        \n",
     "        if np.isin('data.pulseId', list(seq_dc[input_src].keys())): # some runs are missing 'data.pulseId'\n",
-    "            outp_source.create_key(\n",
+    "            instr_src_group.create_key(\n",
     "                \"data.pulseId\",\n",
     "                data=list(seq_dc[input_src]['data.pulseId'].ndarray()[:, 0]),\n",
     "                chunks=min(50, len(seq_dc.train_ids)),\n",
@@ -646,9 +646,9 @@
     "        \n",
     "        if pattern_classification:\n",
     "            # Add main corrected `data.image.pixels` dataset and store corrected data.\n",
-    "            outp_source.create_key(\n",
+    "            instr_src_group.create_key(\n",
     "                \"data.image.pixels_classified\", data=data_clu, chunks=dataset_chunk)\n",
-    "            outp_source.create_key(\n",
+    "            instr_src_group.create_key(\n",
     "                \"data.image.patterns\", data=data_patterns, chunks=dataset_chunk)\n",
     "\n",
     "        if output_src != input_src:\n",
-- 
GitLab