{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Shimadzu HPVX2 Offline Correction\n", "\n", "Author: Egor Sobolev\n", "\n", "Offline dynamic flat-field correction for Shimadzu HPVX2 cameras" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "in_folder = \"/gpfs/exfel/exp/SPB/202121/p002919/raw/\" # input folder, required\n", "out_folder = '/gpfs/exfel/data/scratch/esobolev/test/shimadzu' # output folder, required\n", "metadata_folder = \"\" # Directory containing calibration_metadata.yml when run by xfel-calibrate\n", "run = 30 # which run to read data from, required\n", "\n", "# Data files parameters.\n", "karabo_da = ['HPVX01'] # data aggregators\n", "karabo_id = \"SPB_EHD_HPVX2_2\" # karabo prefix of Shimadzu HPV-X2 devices\n", "#receiver_id = \"PNCCD_FMT-0\" # inset for receiver devices\n", "#path_template = 'RAW-R{:04d}-{}-S{{:05d}}.h5' # the template to use to access data\n", "instrument_source_template = '{}/CAM/CAMERA:daqOutput' # data source path in h5file. Template filled with karabo_id\n", "image_key = \"data.image.pixels\" # image data key in Karabo or exdf notation\n", "\n", "# Database access parameters.\n", "use_dir_creation_date = True # use dir creation date as data production reference date\n", "cal_db_interface = \"tcp://max-exfl-cal001:8021\" # calibration DB interface to use\n", "cal_db_timeout = 300000 # timeout on caldb requests\n", "db_output = False # if True, the notebook sends dark constants to the calibration database\n", "local_output = True # if True, the notebook saves dark constants locally\n", "creation_time = \"\" # To overwrite the measured creation_time. Required Format: YYYY-MM-DD HR:MN:SC.00 e.g. 2019-07-04 11:02:41.00\n", "\n", "n_components = 20 # number of principal components of flat-field to use in correction\n", "downsample_factors = [1, 1] # list of downsample factors for each image dimention (y, x)\n", "\n", "constants_folder = \"/gpfs/exfel/data/scratch/esobolev/test/shimadzu\"\n", "db_module = \"SHIMADZU_HPVX2_M001\"\n", "\n", "num_proc = 32 # number of processes running correction in parallel\n", "\n", "corrected_source_template = '{}/CORR/CAMERA:daqOutput' # data source path in h5file. Template filled with karabo_id" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import os\n", "import h5py\n", "import numpy as np\n", "import matplotlib.pyplot as plt\n", "\n", "from extra_data import RunDirectory\n", "\n", "%matplotlib inline\n", "from cal_tools.step_timing import StepTimer\n", "from cal_tools.files import sequence_trains, DataFile\n", "\n", "from dffc.correction import DynamicFlatFieldCorrectionCython as DynamicFlatFieldCorrection\n", "from dffc.offline import FlatFieldCorrectionFileProcessor\n", "from dffc.draw import plot_images, plot_camera_image" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "instrument = karabo_id.split(\"_\")[0]\n", "source = instrument_source_template.format(karabo_id)\n", "\n", "print(f\"Detector in use is {karabo_id}\")\n", "print(f\"Instrument {instrument}\")\n", "\n", "step_timer = StepTimer()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Calibration constants" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "step_timer.start()\n", "\n", "# Offsets\n", "constant_name = \"Offset\"\n", "const_file = f\"{constants_folder}/const_{constant_name}_{db_module}.h5\"\n", "if not os.path.isfile(const_file):\n", " raise FileNotFoundError(f\"{constant_name} constants are not found for {karabo_id}.\")\n", "\n", "with h5py.File(const_file, 'r') as f:\n", " dark_conditions = dict(\n", " num_frames=int(f[\"condition/Memory cells/value\"][()]),\n", " nx=int(f[\"condition/Pixels X/value\"][()]),\n", " ny=int(f[\"condition/Pixels Y/value\"][()]),\n", " n_components=int(f[\"condition/FF components/value\"][()]),\n", " )\n", " dark = f[\"data\"][:]\n", " dark_creation_time = f[\"creation_time\"][()].decode()\n", "\n", "print(f\"{constant_name}: {dark_creation_time}\")\n", "\n", "# Flat-field components\n", "constant_name = \"ComponentsFF\"\n", "const_file = f\"{constants_folder}/const_{constant_name}_{db_module}.h5\"\n", "if not os.path.isfile(const_file):\n", " raise FileNotFoundError(f\"{constant_name} constants are not found for {karabo_id}.\")\n", "\n", "with h5py.File(const_file, 'r') as f:\n", " flat_conditions = dict(\n", " num_frames=int(f[\"condition/Memory cells/value\"][()]),\n", " nx=int(f[\"condition/Pixels X/value\"][()]),\n", " ny=int(f[\"condition/Pixels Y/value\"][()]),\n", " n_components=int(f[\"condition/FF components/value\"][()]),\n", " )\n", " flat = f[\"data\"][:]\n", " components = flat[1:]\n", " flat = flat[0]\n", " flat_creation_time = f[\"creation_time\"][()].decode()\n", "\n", "print(f\"{constant_name}: {dark_creation_time}\")\n", "\n", "if not all(flat_conditions[key] == value for key, value in dark_conditions.items()):\n", " raise ValueError(\"Conditions for offsets and flat-field components are different\")\n", "\n", "conditions = type(\"Conditions\", (), flat_conditions)\n", "\n", "print(f\"Image size: {conditions.nx} x {conditions.ny} px\")\n", "print(f\"Number of flat-field components: {conditions.n_components}\")\n", "\n", "if conditions.n_components < n_components:\n", " warnings.warn(\n", " f\"The correction set to use {n_components} flat-field components, \"\n", " f\"but constants contains only {conditions.n_components}.\"\n", " \"The settings adjusted to the number of available components.\"\n", " )\n", "else:\n", " components = components[:n_components]\n", "\n", "step_timer.done_step(\"Load calibration constants\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Correction" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "step_timer.start()\n", "dc = RunDirectory(f\"{in_folder}/r{run:04d}\")\n", "\n", "num_trains, num_cells = dc[source][image_key].shape[:2]\n", "num_images = num_trains * num_cells\n", "print(\"Number of trains:\", num_trains)\n", "print(\"Number of images:\", num_images)\n", "\n", "dffc = DynamicFlatFieldCorrection.from_constants(\n", " dark, flat, components, downsample_factors)\n", "\n", "proc = FlatFieldCorrectionFileProcessor(dffc, num_proc, source, image_key)\n", "\n", "proc.start_workers()\n", "proc.run(dc)\n", "proc.join_workers()\n", "\n", "train_ids = proc.rdr.trains\n", "corrected_images = np.stack(proc.rdr.results, 0)\n", "step_timer.done_step(\"Correct images\")" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "step_timer.start()\n", "\n", "corr_source = corrected_source_template.format(karabo_id)\n", "channel = image_key.partition('.')[0]\n", "data_source_id = corr_source + '/' + channel\n", "\n", "ts = dc.train_timestamps().astype(np.uint64)\n", "ts = ts[np.in1d(dc.train_ids, train_ids)]\n", "\n", "for seq_id, train_mask in sequence_trains(train_ids):\n", " seq_train_ids = train_ids[train_mask]\n", " seq_timestamps = ts[train_mask]\n", " ntrains = len(seq_train_ids)\n", " \n", " f = DataFile.from_details(out_folder, karabo_da[0], run, seq_id)\n", " src = f.create_instrument_source(corr_source)\n", " \n", " f.create_metadata(like=dc, instrument_channels=(data_source_id,))\n", " f.create_index(seq_train_ids, timestamps=seq_timestamps)\n", " \n", " channels = {\n", " image_key.partition('.')[0]: np.ones(ntrains, int)\n", " }\n", " src.create_index(**channels)\n", " src.create_key(image_key, corrected_images[train_mask])\n", "\n", " f.close()\n", " \n", "step_timer.done_step(\"Save corrected images\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## The first raw image" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "step_timer.start()\n", "\n", "counts = dc[source][image_key].data_counts()\n", "i = np.flatnonzero(counts.values)\n", "\n", "raw_images = dc[source][image_key].select_trains(np.s_[i]).ndarray()\n", "plot_camera_image(raw_images[0, 0])\n", "plt.show()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## The first corrected image" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "plot_camera_image(corrected_images[0, 0])\n", "plt.show()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## The first corrected images in the trains (up to 20)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "plot_images(corrected_images[:20, 0], figsize=(13, 8))\n", "plt.show()\n", "step_timer.done_step(\"Draw examples of corrected images\")" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "print(f\"Total processing time {step_timer.timespan():.01f} s\")\n", "step_timer.print_summary()" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.11" } }, "nbformat": 4, "nbformat_minor": 4 }