From ebfd38605d861856308d43ef41556daa2a544911 Mon Sep 17 00:00:00 2001
From: ahmedk <karim.ahmed@xfel.eu>
Date: Thu, 20 Oct 2022 11:09:08 +0200
Subject: [PATCH] style change and replace data_path

---
 ...Jungfrau_Gain_Correct_and_Verify_NBC.ipynb | 19 +++++++++----------
 src/cal_tools/files.py                        |  3 ++-
 2 files changed, 11 insertions(+), 11 deletions(-)

diff --git a/notebooks/Jungfrau/Jungfrau_Gain_Correct_and_Verify_NBC.ipynb b/notebooks/Jungfrau/Jungfrau_Gain_Correct_and_Verify_NBC.ipynb
index 30f66d5be..c25b72495 100644
--- a/notebooks/Jungfrau/Jungfrau_Gain_Correct_and_Verify_NBC.ipynb
+++ b/notebooks/Jungfrau/Jungfrau_Gain_Correct_and_Verify_NBC.ipynb
@@ -538,7 +538,7 @@
     "\n",
     "        # Correct data per train\n",
     "        context.map(correct_train, data)\n",
-    "        step_timer.done_step(f'Correction time.')\n",
+    "        step_timer.done_step(f\"Correction time.\")\n",
     "\n",
     "        step_timer.start()\n",
     "\n",
@@ -550,12 +550,12 @@
     "            # Create INDEX datasets.\n",
     "            outp_file.create_index(\n",
     "                train_ids=seq_dc.train_ids,\n",
-    "                timestamps=seq_dc.files[0].file['INDEX/timestamp'][sel_trains],\n",
+    "                timestamps=seq_dc.files[0].file[\"INDEX/timestamp\"][sel_trains],\n",
     "                flags=seq_dc.files[0].validity_flag[sel_trains])\n",
     "\n",
     "            # Create METDATA datasets\n",
     "            outp_file.create_metadata(\n",
-    "                like=seq_dc, instrument_channels=(f'{instrument_src_kda}/data',))\n",
+    "                like=seq_dc, instrument_channels=(f\"{instrument_src_kda}/data\",))\n",
     "\n",
     "            # Create Instrument section to later add corrected datasets.\n",
     "            outp_source = outp_file.create_instrument_source(instrument_src_kda)\n",
@@ -566,21 +566,20 @@
     "            # RAW memoryCell and frameNumber are not corrected. But we are storing only\n",
     "            # the values for the corrected trains. \n",
     "            outp_source.create_key(\n",
-    "                'data.memoryCell', data=memcells,\n",
+    "                \"data.memoryCell\", data=memcells,\n",
     "                chunks=(min(chunks_ids, memcells.shape[0]), 1))\n",
     "            outp_source.create_key(\n",
-    "                'data.frameNumber', data=frame_number,\n",
+    "                \"data.frameNumber\", data=frame_number,\n",
     "                chunks=(min(chunks_ids, frame_number.shape[0]), 1))\n",
     "            # Add main corrected `data.adc`` dataset and store corrected data.\n",
     "            outp_source.create_key(\n",
-    "                'data.adc', data=data_corr,\n",
+    "                \"data.adc\", data=data_corr,\n",
     "                chunks=(min(chunks_data, data_corr.shape[0]), *dshape[1:]))\n",
-    "            # Only store gain values for the corrected trains.\n",
-    "            data_path = \"INSTRUMENT/\" + instrument_src_kda + \"/data\"\n",
+    "\n",
     "            write_compressed_frames(\n",
-    "                gain, outp_file, f'{data_path}/gain', comp_threads=8)\n",
+    "                gain, outp_file, f\"{outp_source.name}/data/gain\", comp_threads=8)\n",
     "            write_compressed_frames(\n",
-    "                mask_corr, outp_file, f'{data_path}/mask', comp_threads=8)\n",
+    "                mask_corr, outp_file, f\"{outp_source.name}/data/mask\", comp_threads=8)\n",
     "\n",
     "            save_reduced_rois(outp_file, data_corr, mask_corr, local_karabo_da)\n",
     "\n",
diff --git a/src/cal_tools/files.py b/src/cal_tools/files.py
index 35c4f3cd6..674f44ad7 100644
--- a/src/cal_tools/files.py
+++ b/src/cal_tools/files.py
@@ -330,7 +330,8 @@ class DataFile(h5py.File):
                                 shape=data_sources_shape,
                                 data=[name.encode('ascii')
                                       for name in source_names],
-                                maxshape=(None,))
+                                maxshape=(None,),
+                               )
         md_group.create_dataset('dataSources/root', shape=data_sources_shape,
                                 data=[sources[name].encode('ascii')
                                       for name in source_names],
-- 
GitLab