{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# DSSC Characterize Dark Images #\n", "\n", "Author: S. Hauf, Version: 0.1\n", "\n", "The following code analyzes a set of dark images taken with the DSSC detector to deduce detector offsets and noise. Data for the detector is presented in one run and don't acquire multiple gain stages. \n", "\n", "The notebook explicitely does what pyDetLib provides in its offset calculation method for streaming data." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "cluster_profile = \"noDB\" # The ipcluster profile to use\n", "in_folder = \"/gpfs/exfel/exp/SQS/202131/p900210/raw\" # path to input data, required\n", "out_folder = \"/gpfs/exfel/data/scratch/samartse/data/DSSC\" # path to output to, required\n", "metadata_folder = \"\" # Directory containing calibration_metadata.yml when run by xfel-calibrate\n", "sequences = [0] # sequence files to evaluate.\n", "modules = [-1] # modules to run for\n", "run = 20 #run number in which data was recorded, required\n", "\n", "karabo_id = \"SQS_DET_DSSC1M-1\" # karabo karabo_id\n", "karabo_da = ['-1'] # a list of data aggregators names, Default [-1] for selecting all data aggregators\n", "receiver_id = \"{}CH0\" # inset for receiver devices\n", "path_template = 'RAW-R{:04d}-{}-S{:05d}.h5' # the template to use to access data\n", "h5path = '/INSTRUMENT/{}/DET/{}:xtdf/image' # path in the HDF5 file to images\n", "h5path_idx = '/INDEX/{}/DET/{}:xtdf/image' # path in the HDF5 file to images\n", "slow_data_pattern = 'RAW-R{}-DA{}-S00000.h5'\n", "\n", "use_dir_creation_date = True # use the dir creation date for determining the creation time\n", "cal_db_interface = \"tcp://max-exfl-cal001:8020\" # the database interface to use\n", "cal_db_timeout = 3000000 # timeout on caldb requests\"\n", "local_output = True # output constants locally\n", "db_output = False # output constants to database\n", "\n", "mem_cells = 0 # number of memory cells used, set to 0 to automatically infer\n", "bias_voltage = 100 # detector bias voltage\n", "rawversion = 2 # RAW file format version\n", "\n", "thresholds_offset_sigma = 3. # thresholds in terms of n sigma noise for offset deduced bad pixels\n", "thresholds_offset_hard = [4, 125] # thresholds in absolute ADU terms for offset deduced bad pixels,\n", "# minimal threshold at 4 is set at hardware level, DSSC full range 0-511\n", "\n", "thresholds_noise_sigma = 3. # thresholds in terms of n sigma noise for offset deduced bad pixels\n", "thresholds_noise_hard = [0.001, 3] # thresholds in absolute ADU terms for offset deduced bad pixels\n", "offset_numpy_algorithm = \"mean\"\n", "\n", "high_res_badpix_3d = False # set this to True if you need high-resolution 3d bad pixel plots. Runtime: ~ 1h\n", "slow_data_aggregators = [1,1,1,1] # quadrant/aggregator\n", "slow_data_path = 'SQS_NQS_DSSC/FPGA/PPT_Q'\n", "operation_mode = '' # Detector operation mode, optional" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import os\n", "import warnings\n", "\n", "# imports and things that do not usually need to be changed\n", "from datetime import datetime\n", "\n", "warnings.filterwarnings('ignore')\n", "from collections import OrderedDict\n", "\n", "import h5py\n", "import matplotlib\n", "from ipyparallel import Client\n", "from IPython.display import Latex, Markdown, display\n", "\n", "matplotlib.use('agg')\n", "import matplotlib.pyplot as plt\n", "\n", "%matplotlib inline\n", "import numpy as np\n", "import tabulate\n", "import yaml\n", "from iCalibrationDB import Conditions, Constants, Detectors, Versions\n", "\n", "from cal_tools.dssclib import get_dssc_ctrl_data, get_pulseid_checksum\n", "from cal_tools.enums import BadPixels\n", "from cal_tools.plotting import (\n", " create_constant_overview,\n", " plot_badpix_3d,\n", " show_overview,\n", " show_processed_modules,\n", ")\n", "from cal_tools.tools import (\n", " get_dir_creation_date,\n", " get_from_db,\n", " get_notebook_name,\n", " get_pdu_from_db,\n", " get_random_db_interface,\n", " get_report,\n", " map_gain_stages,\n", " parse_runs,\n", " run_prop_seq_from_path,\n", " save_const_to_h5,\n", " send_to_db,\n", ")\n", "\n", "view = Client(profile=cluster_profile)[:]\n", "view.use_dill()\n", "\n", "# make sure a cluster is running with ipcluster start --n=32, give it a while to start\n", "\n", "h5path = h5path.format(karabo_id, receiver_id)\n", "h5path_idx = h5path_idx.format(karabo_id, receiver_id)\n", "gain_names = ['High', 'Medium', 'Low']\n", "\n", "if karabo_da[0] == '-1':\n", " if modules[0] == -1:\n", " modules = list(range(16))\n", " karabo_da = [\"DSSC{:02d}\".format(i) for i in modules]\n", "else:\n", " modules = [int(x[-2:]) for x in karabo_da]\n", " \n", "max_cells = mem_cells\n", " \n", "offset_runs = OrderedDict()\n", "offset_runs[\"high\"] = run\n", "\n", "creation_time=None\n", "if use_dir_creation_date:\n", " creation_time = get_dir_creation_date(in_folder, run)\n", " print(f\"Using {creation_time} as creation time of constant.\")\n", "\n", "run, prop, seq = run_prop_seq_from_path(in_folder)\n", "\n", "print(f\"Detector in use is {karabo_id}\") \n", "\n", "cal_db_interface = get_random_db_interface(cal_db_interface)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "print(\"Parameters are:\")\n", "print(f\"Proposal: {prop}\")\n", "print(f\"Memory cells: {mem_cells}/{max_cells}\")\n", "print(\"Runs: {}\".format([ v for v in offset_runs.values()]))\n", "print(f\"Sequences: {sequences}\")\n", "print(f\"Using DB: {db_output}\")\n", "print(f\"Input: {in_folder}\")\n", "print(f\"Output: {out_folder}\")\n", "print(f\"Bias voltage: {bias_voltage}V\")\n", "file_loc = f'proposal:{prop} runs:{[ v for v in offset_runs.values()][0]}'\n", "\n", "report = get_report(metadata_folder)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "The following lines will create a queue of files which will the be executed module-parallel. Distinguishing between different gains." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# set everything up filewise\n", "os.makedirs(out_folder, exist_ok=True)\n", "gmf = map_gain_stages(in_folder, offset_runs, path_template, karabo_da, sequences)\n", "gain_mapped_files, total_sequences, total_file_size = gmf\n", "print(f\"Will process a total of {total_sequences} file.\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Calculate Offsets, Noise and Thresholds ##\n", "\n", "The calculation is performed per-pixel and per-memory-cell. Offsets are simply the median value for a set of dark data taken at a given gain, noise the standard deviation, and gain-bit values the medians of the gain array." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import copy\n", "from functools import partial\n", "\n", "\n", "def characterize_module(cells, bp_thresh, rawversion, karabo_id, h5path, h5path_idx, inp):\n", " import copy\n", "\n", " import h5py\n", " import numpy as np\n", " from cal_tools.enums import BadPixels\n", " from cal_tools.dssclib import get_num_cells\n", " \n", " filename, channel = inp\n", " \n", " h5path = h5path.format(channel)\n", " h5path_idx = h5path_idx.format(channel)\n", " if cells == 0:\n", " cells = get_num_cells(filename, h5path)\n", " if cells is None:\n", " raise ValueError(f\"ERROR! Empty image data file for channel {channel}\")\n", " \n", "\n", " print(f\"Using {cells} memory cells\")\n", " \n", " pulseid_checksum = None\n", "\n", " thresholds_offset_hard, thresholds_offset_sigma, thresholds_noise_hard, thresholds_noise_sigma = bp_thresh \n", "\n", " infile = h5py.File(filename, \"r\")\n", " if rawversion == 2:\n", " count = np.squeeze(infile[f\"{h5path_idx}/count\"])\n", " first = np.squeeze(infile[f\"{h5path_idx}/first\"])\n", " last_index = int(first[count != 0][-1]+count[count != 0][-1])\n", " first_index = int(first[count != 0][0])\n", " else:\n", " status = np.squeeze(infile[f\"{h5path_idx}/status\"])\n", " if np.count_nonzero(status != 0) == 0:\n", " return\n", " last = np.squeeze(infile[f\"{h5path_idx}/last\"])\n", " first = np.squeeze(infile[f\"{h5path_idx}/first\"])\n", " last_index = int(last[status != 0][-1]) + 1\n", " first_index = int(first[status != 0][0])\n", " im = np.array(infile[f\"{h5path}/data\"][first_index:last_index,...]) \n", " cellIds = np.squeeze(infile[f\"{h5path}/cellId\"][first_index:last_index,...]) \n", " infile.close()\n", " \n", " pulseid_checksum = get_pulseid_checksum(filename, h5path, h5path_idx)\n", " \n", " im = im[:, 0, ...].astype(np.float32)\n", " \n", " im = np.rollaxis(im, 2)\n", " im = np.rollaxis(im, 2, 1)\n", "\n", " mcells = cells\n", " offset = np.zeros((im.shape[0], im.shape[1], mcells), dtype = np.float64)\n", " noise = np.zeros((im.shape[0], im.shape[1], mcells), dtype = np.float64)\n", " \n", " for cc in np.unique(cellIds[cellIds < mcells]):\n", " cellidx = cellIds == cc\n", " if offset_numpy_algorithm == \"mean\":\n", " offset[...,cc] = np.mean(im[..., cellidx], axis=2)\n", " else:\n", " offset[...,cc] = np.median(im[..., cellidx], axis=2)\n", " noise[...,cc] = np.std(im[..., cellidx], axis=2)\n", " \n", " \n", " # bad pixels\n", " bp = np.zeros(offset.shape, np.uint32)\n", " # offset related bad pixels\n", " offset_mn = np.nanmedian(offset, axis=(0,1))\n", " offset_std = np.nanstd(offset, axis=(0,1)) \n", " \n", " bp[(offset < offset_mn-thresholds_offset_sigma*offset_std) |\n", " (offset > offset_mn+thresholds_offset_sigma*offset_std)] |= BadPixels.OFFSET_OUT_OF_THRESHOLD.value\n", " bp[(offset < thresholds_offset_hard[0]) | (offset > thresholds_offset_hard[1])] |= BadPixels.OFFSET_OUT_OF_THRESHOLD.value\n", " bp[~np.isfinite(offset)] |= BadPixels.OFFSET_NOISE_EVAL_ERROR.value\n", " \n", " # noise related bad pixels\n", " noise_mn = np.nanmedian(noise, axis=(0,1))\n", " noise_std = np.nanstd(noise, axis=(0,1)) \n", " \n", " bp[(noise < noise_mn-thresholds_noise_sigma*noise_std) |\n", " (noise > noise_mn+thresholds_noise_sigma*noise_std)] |= BadPixels.NOISE_OUT_OF_THRESHOLD.value\n", " bp[(noise < thresholds_noise_hard[0]) | (noise > thresholds_noise_hard[1])] |= BadPixels.NOISE_OUT_OF_THRESHOLD.value\n", " bp[~np.isfinite(noise)] |= BadPixels.OFFSET_NOISE_EVAL_ERROR.value\n", "\n", "\n", " return offset, noise, bp, cells, pulseid_checksum\n", " \n", " \n", "offset_g = OrderedDict()\n", "noise_g = OrderedDict()\n", "gain_g = OrderedDict()\n", "badpix_g = OrderedDict()\n", "gg = 0\n", "\n", "start = datetime.now()\n", "all_cells = []\n", "checksums = {}\n", "\n", "try:\n", " tGain, encodedGain, operatingFreq = get_dssc_ctrl_data(in_folder + \"/r{:04d}/\".format(offset_runs[\"high\"]),\n", " slow_data_pattern,\n", " slow_data_aggregators,\n", " offset_runs[\"high\"], slow_data_path)\n", "except IOError:\n", " print(\"ERROR: Couldn't access slow data to read tGain, encodedGain, and operatingFreq \\n\")\n", " \n", "for gain, mapped_files in gain_mapped_files.items():\n", " inp = []\n", " dones = []\n", " for i in modules:\n", " qm = \"Q{}M{}\".format(i//4 +1, i % 4 + 1) \n", " if qm in mapped_files and not mapped_files[qm].empty():\n", " fname_in = mapped_files[qm].get()\n", " print(\"Process file: \", fname_in)\n", " dones.append(mapped_files[qm].empty())\n", " else:\n", " continue\n", " inp.append((fname_in, i))\n", "\n", " p = partial(characterize_module, max_cells,\n", " (thresholds_offset_hard, thresholds_offset_sigma,\n", " thresholds_noise_hard, thresholds_noise_sigma), rawversion, karabo_id, h5path, h5path_idx)\n", " \n", " \n", " results = list(map(p, inp))\n", " \n", " for ii, r in enumerate(results):\n", " i = modules[ii]\n", " offset, noise, bp, thiscell, pulseid_checksum = r\n", " all_cells.append(thiscell)\n", " qm = \"Q{}M{}\".format(i//4 +1, i % 4 + 1)\n", " if qm not in offset_g:\n", " offset_g[qm] = np.zeros((offset.shape[0], offset.shape[1], offset.shape[2]))\n", " noise_g[qm] = np.zeros_like(offset_g[qm])\n", " \n", " badpix_g[qm] = np.zeros_like(offset_g[qm], np.uint32)\n", " checksums[qm] = pulseid_checksum\n", " \n", " offset_g[qm][...] = offset\n", " noise_g[qm][...] = noise\n", " badpix_g[qm][...] = bp\n", " gg +=1\n", "\n", "if len(all_cells) > 0:\n", " max_cells = np.max(all_cells)\n", " print(f\"Using {max_cells} memory cells\")\n", "else:\n", " raise ValueError(\"0 processed memory cells. No raw data available.\")" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# TODO: add db_module when received from myMDC\n", "# Create the modules dict of karabo_das and PDUs\n", "qm_dict = OrderedDict()\n", "for i, k_da in zip(modules, karabo_da):\n", " qm = f\"Q{i//4+1}M{i%4+1}\"\n", " qm_dict[qm] = {\"karabo_da\": k_da,\n", " \"db_module\": \"\"}" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Retrieve existing constants for comparison\n", "clist = [\"Offset\", \"Noise\"]\n", "old_const = {}\n", "old_mdata = {}\n", "\n", "print('Retrieve pre-existing constants for comparison.')\n", "for qm in offset_g.keys():\n", " old_const[qm] = {}\n", " old_mdata[qm] = {}\n", " qm_db = qm_dict[qm]\n", " karabo_da = qm_db[\"karabo_da\"]\n", " for const in clist:\n", " \n", " dconst =getattr(Constants.DSSC, const)()\n", " condition = Conditions.Dark.DSSC(memory_cells=max_cells,\n", " bias_voltage=bias_voltage,\n", " pulseid_checksum=checksums[qm],\n", " acquisition_rate=operatingFreq[qm], \n", " target_gain=tGain[qm],\n", " encoded_gain=encodedGain[qm])\n", "\n", " # This should be used in case of running notebook \n", " # by a different method other than myMDC which already\n", " # sends CalCat info.\n", " # TODO: Set db_module to \"\" by default in the first cell\n", " if not qm_db[\"db_module\"]:\n", " qm_db[\"db_module\"] = get_pdu_from_db(karabo_id, karabo_da, dconst,\n", " condition, cal_db_interface,\n", " snapshot_at=creation_time)[0]\n", " data, mdata = get_from_db(karabo_id, karabo_da,\n", " dconst,\n", " condition,\n", " None,\n", " cal_db_interface, creation_time=creation_time,\n", " verbosity=2, timeout=cal_db_timeout)\n", "\n", " old_const[qm][const] = data\n", "\n", " if mdata is None or data is None:\n", " old_mdata[qm][const] = {\n", " \"timestamp\": \"Not found\",\n", " \"filepath\": None,\n", " \"h5path\": None\n", " }\n", " else:\n", " old_mdata[qm][const] = {\n", " \"timestamp\": mdata.calibration_constant_version.begin_at.isoformat(),\n", " \"filepath\": os.path.join(\n", " mdata.calibration_constant_version.hdf5path,\n", " mdata.calibration_constant_version.filename,\n", " ),\n", " \"h5path\": mdata.calibration_constant_version.h5path,\n", " }\n", " with open(f\"{out_folder}/module_metadata_{qm}.yml\", \"w\") as fd:\n", " yaml.safe_dump(\n", " {\"module\": qm, \"pdu\": qm_db[\"db_module\"], \"old-constants\": old_mdata[qm]},\n", " fd,\n", " )" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "res = OrderedDict()\n", "for i in modules:\n", " qm = f\"Q{i//4+1}M{i%4+1}\"\n", " try:\n", " res[qm] = {'Offset': offset_g[qm],\n", " 'Noise': noise_g[qm],\n", " }\n", " except Exception as e:\n", " print(f\"Error: No constants for {qm}: {e}\")" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Push the same constant two different times.\n", "# One with the generated pulseID check sum setting for the offline calibration.\n", "# And another for the online calibration as it doesn't have this pulseID checksum, yet.\n", "md = None\n", "for dont_use_pulseIds in [True, False]:\n", " for qm in res.keys():\n", " karabo_da = qm_dict[qm][\"karabo_da\"]\n", " db_module = qm_dict[qm][\"db_module\"]\n", " for const in res[qm].keys():\n", " dconst = getattr(Constants.DSSC, const)()\n", " dconst.data = res[qm][const]\n", "\n", " opfreq = None if dont_use_pulseIds else operatingFreq[qm]\n", " targetgain = None if dont_use_pulseIds else tGain[qm]\n", " encodedgain = None if dont_use_pulseIds else encodedGain[qm]\n", " pidsum = None if dont_use_pulseIds else checksums[qm]\n", " \n", " # set the operating condition\n", " condition = Conditions.Dark.DSSC(memory_cells=max_cells,\n", " bias_voltage=bias_voltage,\n", " pulseid_checksum=pidsum,\n", " acquisition_rate=opfreq, \n", " target_gain=targetgain,\n", " encoded_gain=encodedgain)\n", " for parm in condition.parameters:\n", " if parm.name == \"Memory cells\":\n", " parm.lower_deviation = max_cells\n", " parm.upper_deviation = 0\n", "\n", " if db_output:\n", " md = send_to_db(db_module, karabo_id, dconst, condition, file_loc, report,\n", " cal_db_interface, creation_time=creation_time, timeout=cal_db_timeout)\n", "\n", " if local_output and dont_use_pulseIds: # Don't save constant localy two times.\n", " md = save_const_to_h5(db_module, karabo_id, dconst, condition,\n", " dconst.data, file_loc, report,\n", " creation_time, out_folder)\n", " print(f\"Calibration constant {const} is stored locally.\\n\")\n", " \n", " if not dont_use_pulseIds:\n", " print(\"Constants parameter conditions are:\\n\")\n", " print(f\"• memory_cells: {max_cells}\\n• bias_voltage: {bias_voltage}\\n\"\n", " f\"• pulseid_checksum: {pidsum}\\n• acquisition_rate: {opfreq}\\n\"\n", " f\"• target_gain: {targetgain}\\n• encoded_gain: {encodedgain}\\n\"\n", " f\"• creation_time: {creation_time}\\n\")" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "mnames = []\n", "for i in modules:\n", " qm = f\"Q{i//4+1}M{i % 4+1}\"\n", " display(Markdown(f'## Position of the module {qm} and its ASICs##'))\n", " mnames.append(qm)\n", " \n", "show_processed_modules(karabo_id, constants=None, mnames=mnames, mode=\"position\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Single-Cell Overviews ##\n", "\n", "Single cell overviews allow to identify potential effects on all memory cells, e.g. on sensor level. Additionally, they should serve as a first sanity check on expected behaviour, e.g. if structuring on the ASIC level is visible in the offsets, but otherwise no immediate artifacts are visible." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "cell = 9\n", "gain = 0\n", "out_folder = None\n", "show_overview(res, cell, gain, out_folder=out_folder, infix=\"_{}\".format(run))" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "cols = {BadPixels.NOISE_OUT_OF_THRESHOLD.value: (BadPixels.NOISE_OUT_OF_THRESHOLD.name, '#FF000080'),\n", " BadPixels.OFFSET_NOISE_EVAL_ERROR.value: (BadPixels.OFFSET_NOISE_EVAL_ERROR.name, '#0000FF80'),\n", " BadPixels.OFFSET_OUT_OF_THRESHOLD.value: (BadPixels.OFFSET_OUT_OF_THRESHOLD.name, '#00FF0080'),\n", " BadPixels.OFFSET_OUT_OF_THRESHOLD.value | BadPixels.NOISE_OUT_OF_THRESHOLD.value: ('MIXED', '#DD00DD80')}\n", "\n", "if high_res_badpix_3d:\n", " display(Markdown(\"\"\"\n", " \n", " ## Global Bad Pixel Behaviour ##\n", "\n", " The following plots show the results of bad pixel evaluation for all evaluated memory cells. \n", " Cells are stacked in the Z-dimension, while pixels values in x/y are rebinned with a factor of 2. \n", " This excludes single bad pixels present only in disconnected pixels. \n", " Hence, any bad pixels spanning at least 4 pixels in the x/y-plane, or across at least two memory cells are indicated. \n", " Colors encode the bad pixel type, or mixed type.\n", "\n", " \"\"\"))\n", " # set rebin_fac to 1 for avoiding rebining and\n", " # losing real values of badpixels(High resolution).\n", " gain = 0\n", " for mod, data in badpix_g.items():\n", " plot_badpix_3d(data, cols, title=mod, rebin_fac=2)\n", " plt.show()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Aggregate values, and per Cell behaviour ##\n", "\n", "The following tables and plots give an overview of statistical aggregates for each constant, as well as per cell behavior." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "create_constant_overview(offset_g, \"Offset (ADU)\", max_cells, entries=1)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "create_constant_overview(noise_g, \"Noise (ADU)\", max_cells, 0, 100, entries=1)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "bad_pixel_aggregate_g = OrderedDict()\n", "for m, d in badpix_g.items():\n", " bad_pixel_aggregate_g[m] = d.astype(np.bool).astype(np.float)\n", "create_constant_overview(bad_pixel_aggregate_g, \"Bad pixel fraction\", max_cells, entries=1)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Summary tables ##\n", "\n", "The following tables show summary information for the evaluated module. Values for currently evaluated constants are compared with values for pre-existing constants retrieved from the calibration database." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "time_summary = []\n", "for qm, qm_data in old_mdata.items():\n", " time_summary.append(f\"The following pre-existing constants are used for comparison for module {qm}:\")\n", " for const, const_data in qm_data.items():\n", " time_summary.append(f\"- {const} created at {const_data['timestamp']}\")\n", "display(Markdown(\"\\n\".join(time_summary)))" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "header = ['Parameter', \n", " \"New constant\", \"Old constant \", \n", " \"New constant\", \"Old constant \", \n", " \"New constant\", \"Old constant \"]\n", "\n", "for const in ['Offset', 'Noise']:\n", " table = [['','High gain', 'High gain']]\n", " for qm in res.keys():\n", "\n", " data = np.copy(res[qm][const])\n", "\n", " if old_const[qm][const] is not None:\n", " dataold = np.copy(old_const[qm][const])\n", "\n", " f_list = [np.nanmedian, np.nanmean, np.nanstd, np.nanmin, np.nanmax]\n", " n_list = ['Median', 'Mean', 'Std', 'Min', 'Max']\n", "\n", " for i, f in enumerate(f_list):\n", " line = [n_list[i]]\n", " line.append('{:6.1f}'.format(f(data[...,gain])))\n", " if old_const[qm][const] is not None:\n", " line.append('{:6.1f}'.format(f(dataold[...,gain])))\n", " else:\n", " line.append('-')\n", "\n", " table.append(line)\n", "\n", " display(Markdown('### {} [ADU], good and bad pixels ###'.format(const)))\n", " md = display(Latex(tabulate.tabulate(table, tablefmt='latex', headers=header))) " ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.8" } }, "nbformat": 4, "nbformat_minor": 1 }