diff --git a/notebooks/Gotthard2/Characterize_Darks_Gotthard2_NBC.ipynb b/notebooks/Gotthard2/Characterize_Darks_Gotthard2_NBC.ipynb index fda112e5d0e08a1edafe0023adab63a805b8f710..6e667a75668866251978934b081c417a649c930a 100644 --- a/notebooks/Gotthard2/Characterize_Darks_Gotthard2_NBC.ipynb +++ b/notebooks/Gotthard2/Characterize_Darks_Gotthard2_NBC.ipynb @@ -31,8 +31,9 @@ "\n", "# Parameters used to access raw data.\n", "karabo_id = \"FXE_XAD_G2XES\" # karabo prefix of Gotthard-II devices\n", - "karabo_da = [\"GH201\"] # data aggregators\n", - "receiver_template = \"RECEIVER\" # receiver template used to read INSTRUMENT keys.\n", + "karabo_da = [\"\"] # data aggregators\n", + "receiver_template = \"RECEIVER{}\" # receiver template used to read INSTRUMENT keys.\n", + "receiver_affixes = [\"\"] # The affix to format into the receiver template to be able to load the correct receiver name from the data.\n", "control_template = \"CONTROL\" # control template used to read CONTROL keys.\n", "ctrl_source_template = '{}/DET/{}' # template for control source name (filled with karabo_id_control)\n", "karabo_id_control = \"\" # Control karabo ID. Set to empty string to use the karabo-id\n", @@ -127,8 +128,8 @@ "source": [ "run_dc = RunDirectory(in_folder / f\"r{run_high:04d}\")\n", "file_loc = f\"proposal:{run_dc.run_metadata()['proposalNumber']} runs:{run_high} {run_med} {run_low}\" # noqa\n", - "\n", - "receivers = sorted(list(run_dc.select(f'{karabo_id}/DET/{receiver_template}*').all_sources))" + "receiver_names = [f\"*{receiver_template.format(x)}*\" for x in receiver_affixes]\n", + "data_sources = list(run_dc.select(receiver_names).all_sources)" ] }, { @@ -233,6 +234,8 @@ "source": [ "def specify_trains_to_process(\n", " img_key_data: \"extra_data.KeyData\",\n", + " run_num: int,\n", + " src: str,\n", "):\n", " \"\"\"Specify total number of trains to process.\n", " Based on given min_trains and max_trains, if given.\n", @@ -244,7 +247,7 @@ " n_trains = img_key_data.shape[0]\n", " all_trains = len(img_key_data.train_ids)\n", " print(\n", - " f\"{receiver} has {all_trains - n_trains} \"\n", + " f\"{src} for run {run} has {all_trains - n_trains}\"\n", " f\"trains with empty frames out of {all_trains} trains\"\n", " )\n", "\n", @@ -289,7 +292,7 @@ "def convert_train(wid, index, tid, d):\n", " \"\"\"Convert a Gotthard2 train from 12bit to 10bit.\"\"\"\n", " gotthard2algs.convert_to_10bit(\n", - " d[receiver][\"data.adc\"], lut, data_10bit[index, ...]\n", + " d[src][\"data.adc\"], lut, data_10bit[index, ...]\n", " )" ] }, @@ -310,7 +313,7 @@ " np.uint16\n", ")\n", "empty_lut = np.stack(1280 * [np.stack([empty_lut] * 2)], axis=0)\n", - "for mod, receiver in zip(karabo_da, receivers):\n", + "for mod, src in zip(karabo_da, data_sources):\n", "\n", " # Retrieve LUT constant\n", " lut, time = get_constant_from_db_and_time(\n", @@ -334,10 +337,10 @@ "\n", " for run_num, [gain, run_dc] in run_dcs_dict.items():\n", " step_timer.start()\n", - " n_trains = specify_trains_to_process(run_dc[receiver, \"data.adc\"])\n", + " n_trains = specify_trains_to_process(run_dc[src, \"data.adc\"], run_num, src)\n", "\n", " # Select requested number of trains to process.\n", - " dc = run_dc.select(receiver, require_all=True).select_trains(\n", + " dc = run_dc.select(src, require_all=True).select_trains(\n", " np.s_[:n_trains]\n", " )\n", "\n", @@ -346,7 +349,7 @@ " step_timer.start()\n", " # Convert 12bit data to 10bit\n", " data_10bit = context.alloc(\n", - " shape=dc[receiver, \"data.adc\"].shape, dtype=np.float32\n", + " shape=dc[src, \"data.adc\"].shape, dtype=np.float32\n", " )\n", " context.map(convert_train, dc)\n", " step_timer.done_step(\"convert to 10bit\")\n", @@ -374,7 +377,7 @@ " context.map(offset_noise_cell, (even_data, odd_data))\n", "\n", " # Split even and odd gain data.\n", - " data_gain = dc[receiver, \"data.gain\"].ndarray()\n", + " data_gain = dc[src, \"data.gain\"].ndarray()\n", " even_gain = data_gain[:, 20::2, :]\n", " odd_gain = data_gain[:, 21::2, :]\n", " raw_g = 3 if gain == 2 else gain\n", diff --git a/notebooks/Gotthard2/Correction_Gotthard2_NBC.ipynb b/notebooks/Gotthard2/Correction_Gotthard2_NBC.ipynb index aff16f80037d928e20ea830865d763ffa636d065..62853ba7de5b05b6a03f249c6e7d8df7e7fa59f1 100644 --- a/notebooks/Gotthard2/Correction_Gotthard2_NBC.ipynb +++ b/notebooks/Gotthard2/Correction_Gotthard2_NBC.ipynb @@ -60,11 +60,13 @@ "# Parameters used to access raw data.\n", "karabo_id = \"DETLAB_25UM_GH2\" # karabo prefix of Gotthard-II devices\n", "karabo_da = [\"\"] # data aggregators\n", - "receiver_template = \"RECEIVER\" # receiver template used to read INSTRUMENT keys.\n", + "receiver_template = \"RECEIVER{}\" # receiver template used to read INSTRUMENT keys.\n", + "receiver_affixes = [\"\"] # The affix to format into the receiver template to be able to load the correct receiver name from the data.\n", "control_template = \"CONTROL\" # control template used to read CONTROL keys.\n", "ctrl_source_template = \"{}/DET/{}\" # template for control source name (filled with karabo_id_control)\n", "karabo_id_control = \"\" # Control karabo ID. Set to empty string to use the karabo-id\n", - "corr_data_source = \"{}/CORR/{}:daqOutput\" # Correction data source. filled with karabo_id and correction receiver\n", + "corr_source_template = \"{}/CORR/{}:daqOutput\" # Correction data source template. filled with karabo_id and correction receiver\n", + "corr_receiver = \"\" # The receiver name of the corrected data. Leave empty for using the same receiver name for the 50um GH2 or the first(Master) receiver for the 25um GH2.\n", "\n", "# Parameters for calibration database.\n", "cal_db_interface = \"tcp://max-exfl-cal001:8016#8025\" # the database interface to use.\n", @@ -234,11 +236,16 @@ "print(f\"Process modules: {db_modules} for run {run}\")\n", "\n", "# Create the correction receiver name.\n", - "receivers = sorted(list(run_dc.select(f'{karabo_id}/DET/{receiver_template}*').all_sources))\n", - "if gh2_detector == \"25um\": # For 25um use virtual karabo_das for CALCAT data mapping.\n", - " corr_receiver = receivers[0].split(\"/\")[-1].split(\":\")[0][:-2]\n", - "else:\n", - " corr_receiver = receivers[0].split(\"/\")[-1].split(\":\")[0]" + "receiver_names = [f\"*{receiver_template.format(x)}*\" for x in receiver_affixes]\n", + "data_sources = list(run_dc.select(receiver_names).all_sources)\n", + "\n", + "if not corr_receiver:\n", + " # This part assumes this data_source structure: '{karabo_id}/DET/{receiver_name}:{output_channel}'\n", + " if gh2_detector == \"25um\": # For 25um use virtual karabo_das for CALCAT data mapping.\n", + " corr_receiver = data_sources[0].split(\"/\")[-1].split(\":\")[0][:-2]\n", + " else:\n", + " corr_receiver = data_sources[0].split(\"/\")[-1].split(\":\")[0]\n", + " print(f\"Using {corr_receiver} as a receiver name for the corrected data.\")" ] }, { @@ -399,30 +406,30 @@ "metadata": {}, "outputs": [], "source": [ - "corr_data_source = corr_data_source.format(karabo_id, corr_receiver)\n", + "corr_data_source = corr_source_template.format(karabo_id, corr_receiver)\n", "\n", "for raw_file in seq_files:\n", "\n", " out_file = out_folder / raw_file.name.replace(\"RAW\", \"CORR\")\n", " # Select module INSTRUMENT sources and deselect empty trains.\n", - " dc = H5File(raw_file).select(receivers, require_all=True)\n", + " dc = H5File(raw_file).select(data_sources, require_all=True)\n", "\n", " n_trains = len(dc.train_ids)\n", "\n", " # Initialize GH2 data and gain arrays to store in corrected files.\n", " if gh2_detector == \"25um\":\n", - " data_stored = np.zeros((dc[receivers[0], \"data.adc\"].shape[:2] + (1280 * 2,)), dtype=np.float32)\n", - " gain_stored = np.zeros((dc[receivers[0], \"data.adc\"].shape[:2] + (1280 * 2,)), dtype=np.uint8)\n", + " data_stored = np.zeros((dc[data_sources[0], \"data.adc\"].shape[:2] + (1280 * 2,)), dtype=np.float32)\n", + " gain_stored = np.zeros((dc[data_sources[0], \"data.adc\"].shape[:2] + (1280 * 2,)), dtype=np.uint8)\n", " else:\n", " data_stored = None\n", " gain_stored = None\n", "\n", - " for i, (receiver, mod) in enumerate(zip(receivers, karabo_da)):\n", + " for i, (src, mod) in enumerate(zip(data_sources, karabo_da)):\n", " step_timer.start()\n", - " print(f\"Correcting {receiver} for {raw_file}\")\n", + " print(f\"Correcting {src} for {raw_file}\")\n", "\n", - " data = dc[receiver, \"data.adc\"].ndarray()\n", - " gain = dc[receiver, \"data.gain\"].ndarray()\n", + " data = dc[src, \"data.adc\"].ndarray()\n", + " gain = dc[src, \"data.gain\"].ndarray()\n", " step_timer.done_step(\"Preparing raw data\")\n", " dshape = data.shape\n", "\n", @@ -441,7 +448,7 @@ " data_corr[np.isinf(data_corr)] = np.nan\n", "\n", " # Create CORR files and add corrected data sections.\n", - " image_counts = dc[receiver, \"data.adc\"].data_counts(labelled=False)\n", + " image_counts = dc[src, \"data.adc\"].data_counts(labelled=False)\n", "\n", " if gh2_detector == \"25um\":\n", " data_stored[..., i::2] = data_corr.copy()\n", @@ -485,7 +492,7 @@ " # stored in the corrected file.\n", " for field in [\"bunchId\", \"memoryCell\", \"frameNumber\", \"timestamp\"]:\n", " outp_source.create_key(\n", - " f\"data.{field}\", data=dc[receiver, f\"data.{field}\"].ndarray(),\n", + " f\"data.{field}\", data=dc[src, f\"data.{field}\"].ndarray(),\n", " chunks=(chunks_data, data_corr.shape[1])\n", " )\n", " outp_source.create_compressed_key(f\"data.mask\", data=mask)\n", @@ -537,9 +544,9 @@ " mod_dcs[corr_data_source][\"train_raw_data\"] = np.zeros((data_corr.shape[1], 1280 * 2), dtype=np.float32)\n", " mod_dcs[corr_data_source][\"train_raw_gain\"] = np.zeros((data_corr.shape[1], 1280 * 2), dtype=np.uint8)\n", "\n", - "for i, rec_mod in enumerate(receivers):\n", + "for i, src in enumerate(data_sources):\n", " with H5File(first_seq_raw) as in_dc:\n", - " train_dict = in_dc.train_from_id(tid)[1][rec_mod]\n", + " train_dict = in_dc.train_from_id(tid)[1][src]\n", " if gh2_detector == \"25um\":\n", " mod_dcs[corr_data_source][\"train_raw_data\"][..., i::2] = train_dict[\"data.adc\"]\n", " mod_dcs[corr_data_source][\"train_raw_gain\"][..., i::2] = train_dict[\"data.gain\"]\n",