From 4ac94c1e1b281556f35525f03d1f869b40ddf525 Mon Sep 17 00:00:00 2001 From: karnem <mikhail.karnevskiy@desy.de> Date: Wed, 10 Jul 2019 12:21:59 +0200 Subject: [PATCH] Update report of LPD dark-characterization notebook --- cal_tools/cal_tools/tools.py | 2 +- notebooks/LPD/LPDChar_Darks_NBC.ipynb | 982 +++++++++++++++--- notebooks/LPD/LPDChar_Darks_Summary_NBC.ipynb | 419 ++++++++ 3 files changed, 1253 insertions(+), 150 deletions(-) create mode 100644 notebooks/LPD/LPDChar_Darks_Summary_NBC.ipynb diff --git a/cal_tools/cal_tools/tools.py b/cal_tools/cal_tools/tools.py index c48c8e26d..8eb72105f 100644 --- a/cal_tools/cal_tools/tools.py +++ b/cal_tools/cal_tools/tools.py @@ -150,7 +150,7 @@ def make_timing_summary(run_path, joblist): break tmpl = Template(''' - Timing summary + Runtime summary ============== .. math:: diff --git a/notebooks/LPD/LPDChar_Darks_NBC.ipynb b/notebooks/LPD/LPDChar_Darks_NBC.ipynb index f6fff191a..a364b2fe2 100644 --- a/notebooks/LPD/LPDChar_Darks_NBC.ipynb +++ b/notebooks/LPD/LPDChar_Darks_NBC.ipynb @@ -4,13 +4,47 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Offset Characterization #\n", + "# Offset, Noise and Dead Pixels Characterization #\n", "\n", "Author: S. Hauf, Version: 0.1\n", "\n", - "This notebook allows you to recharacterize dark images to get a new offset map. It will correctly handle veto settings, but note that if you veto cells you will not be able to use these offsets for runs with different veto settings - vetoed cells will have zero offset.\n", + "This notebook performs recharacterize of dark images to get offset, noise and bad-pixel maps. All 3 type of constants are evaluated per pixel and per memory cell.\n", "\n", - "Usually you will only need to alter the cell directly below this comment." + "Notebook will correctly handle veto settings, but note that if you veto cells you will not be able to use these offsets for runs with different veto settings - vetoed cells will have zero offset.\n", + "\n", + "Evaluated calibration constants are stored locally and injected in the calibration data base.\n", + "\n", + "**Offset** ($O$) is defined as median ($M$) of dark signal ($Ds$) over trains ($t$) for a given pixel ($x,y$) and memory cell ($c$). **Noise** $N$ is a standard deviation $\\delta$ of dark signal.\n", + "\n", + "$$ O_{x,y,c} = M(Ds)_{t} ,\\,\\,\\,\\,\\,\\, N_{x,y,c} = \\delta(Ds)_{t}$$\n", + "\n", + "**Bad-pixel** mask is evaluated bitweise.\n", + "\n", + "** Bit: \"Offset_out_of_threshold\": **\n", + "\n", + "Offset outside of bounds:\n", + "\n", + "$$M(O)_{x,y} - \\delta(O)_{x,y} * thresholds\\_offset\\_sigma < O < M(O)_{x,y} + \\delta(O)_{x,y} * thresholds\\_offset\\_sigma $$\n", + "\n", + "Or offset outside of hard limits\n", + "\n", + "$$thresholds\\_offset\\_hard\\_low < O < thresholds\\_offset\\_hard\\_high $$\n", + "\n", + "** Bit: \"Noise_out_of_threshold\": **\n", + "\n", + "Noise outside of bounds:\n", + "\n", + "$$M(N)_{x,y} - \\delta(N)_{x,y} * thresholds\\_noise\\_sigma < N < M(N)_{x,y} + \\delta(N)_{x,y} * thresholds\\_noise\\_sigma $$\n", + "\n", + "Or noise outside of hard limits\n", + "\n", + "$$thresholds\\_noise\\_hard\\_low < N < thresholds\\_noise\\_hard\\_high $$\n", + "\n", + "** Bit: \"\"OFFSET_NOISE_EVAL_ERROR\": **\n", + "\n", + "Offset and Noise both not $nan$ values \n", + "\n", + "Values: $thresholds\\_offset\\_sigma$, $thresholds\\_offset\\_hard$, $thresholds\\_noise\\_sigma$, $thresholds\\_noise\\_hard$ are given as parameters." ] }, { @@ -22,31 +56,31 @@ "outputs": [], "source": [ "cluster_profile = \"noDB\" # The ipcluster profile to use\n", - "in_folder = \"/gpfs/exfel/exp/FXE/201701/p900020/raw/\" # path to input data, required\n", - "out_folder = \"/gpfs/exfel/exp/FXE/201830/p900020/proc/calibration/dark/\" # path to output to, required\n", - "sequences = [0] # sequence files to evaluate.\n", - "modules = [-1] # modules to evaluate, range allowed\n", + "in_folder = \"/gpfs/exfel/exp/FXE/201930/p900063/raw\" # path to input data, required\n", + "out_folder = \"/gpfs/exfel/data/scratch/karnem/LPD/\" # path to output to, required\n", + "sequences = [0] # sequence files to evaluate\n", + "modules = [7] # list of modules to evaluate, RANGE ALLOWED\n", "\n", "capacitor_setting = 5 # capacitor_setting for which data was taken, required\n", - "run_high = 603 # run number in which high gain data was recorded, required\n", - "run_med = 604 # run number in which medium gain data was recorded, required\n", - "run_low = 605 # run number in which low gain data was recorded, required\n", + "run_high = 358 # run number in which high gain data was recorded, required\n", + "run_med = 359 # run number in which medium gain data was recorded, required\n", + "run_low = 360 # run number in which low gain data was recorded, required\n", "\n", - "mem_cells = 128 # number of memory cells used\n", + "mem_cells = 512 # number of memory cells used\n", "local_output = True # output constants locally\n", "db_output = True # output constants to database\n", "bias_voltage = 250 # detector bias voltage\n", "cal_db_interface = \"tcp://max-exfl016:8017\" # the database interface to use\n", "\n", - "thresholds_offset_sigma = 3.\n", - "thresholds_offset_hard = [400, 1500]\n", - "thresholds_noise_sigma = 7.\n", - "thresholds_noise_hard = [1, 35]\n", - "skip_first_ntrains = 10\n", - "use_dir_creation_date = True # use the creation date of the directory for database time derivation\n", - "instrument = \"FXE\"\n", - "ntrains = 300\n", - "high_res_badpix_3d = False" + "thresholds_offset_sigma = 3. # bad pixel offset-sigma threshold\n", + "thresholds_offset_hard = [400, 1500] # bad pixel hard threshold\n", + "thresholds_noise_sigma = 7. # bad pixel noise-sigma threshold\n", + "thresholds_noise_hard = [1, 35] # bad pixel hard threshold\n", + "skip_first_ntrains = 10 # Number of first trains to skip\n", + "use_dir_creation_date = False # use the creation date of the directory for database time derivation\n", + "instrument = \"FXE\" # instrument name\n", + "ntrains = 10 # number of trains to use\n", + "high_res_badpix_3d = False # plot bad-pixel summary in high resolution" ] }, { @@ -68,21 +102,26 @@ "matplotlib.use(\"agg\")\n", "import matplotlib.pyplot as plt\n", "%matplotlib inline\n", + "#%config InlineBackend.figure_format = 'svg'\n", + "\n", "from ipyparallel import Client\n", "\n", "from iCalibrationDB import ConstantMetaData, Constants, Conditions, Detectors, Versions\n", - "from cal_tools.tools import gain_map_files, parse_runs, run_prop_seq_from_path, get_notebook_name, get_dir_creation_date\n", + "from cal_tools.tools import (gain_map_files, parse_runs, run_prop_seq_from_path, get_notebook_name, get_dir_creation_date, get_from_db)\n", "from cal_tools.influx import InfluxLogger\n", "from cal_tools.enums import BadPixels\n", "from cal_tools.plotting import show_overview, plot_badpix_3d, create_constant_overview\n", "\n", + "from XFELDetAna import xfelpyanatools as xana\n", + "from IPython.display import display, Markdown, Latex\n", + "\n", "client = Client(profile=cluster_profile)\n", "view = client[:]\n", "view.use_dill()\n", - "\n", "gains = np.arange(3)\n", "max_cells = mem_cells\n", "cells = np.arange(max_cells)\n", + "gain_names = ['High', 'Medium', 'Low']\n", "\n", "if modules[0] == -1:\n", " modules = list(range(16))\n", @@ -98,7 +137,6 @@ " gain_runs[\"med_50pf\"] = \"r{:04d}\".format(run_med)\n", " gain_runs[\"low_50pf\"] = \"r{:04d}\".format(run_low)\n", "\n", - "\n", "capacitor_settings = [capacitor_setting]\n", "capacitor_settings = ['{}pf'.format(c) for c in capacitor_settings]\n", "\n", @@ -115,7 +153,7 @@ "logger = InfluxLogger(detector=\"LPD\", instrument=instrument, mem_cells=mem_cells,\n", " notebook=get_notebook_name(), proposal=prop)\n", " \n", - "print(\"Parameters are:\")\n", + "display(Markdown('## Evaluated parameters'))\n", "print(\"Proposal: {}\".format(prop))\n", "print(\"Memory cells: {}/{}\".format(mem_cells, max_cells))\n", "print(\"Runs: {}, {}, {}\".format(run_high, run_med, run_low))\n", @@ -126,6 +164,74 @@ "print(\"Bias voltage: {}V\".format(bias_voltage))" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import matplotlib.patches as patches\n", + "\n", + "qm = \"Q{}M{}\".format(modules[0]//4+1, modules[0]%4+1)\n", + "display(Markdown('## Position of the module {}, it tiles and ASICs of tile ##'.format(qm)))\n", + "\n", + "fig, ax = plt.subplots(1,figsize=(10,10))\n", + "ax.set_axis_off()\n", + "\n", + "ax.set_xlim(0, 97)\n", + "ax.set_ylim(0, 97)\n", + "\n", + "q_poses = np.array([[51,47], [47,1], [1,5], [5,51]])\n", + "m_poses= np.array([[22.5,20.5], [22.5,0.5], [0.5,0.5], [0.5,20.5]])\n", + "\n", + "for iq, q_pos in enumerate(q_poses):\n", + " ax.add_patch(patches.Rectangle(q_pos,45,45,linewidth=2,edgecolor='r',\n", + " facecolor='y', fill=True))\n", + "\n", + " ax.text(q_pos[0]+20, q_pos[1]+41.5, 'Q{}'.format(iq+1), fontsize=22)\n", + " for im, m_pos in enumerate(m_poses):\n", + " ax.add_patch(patches.Rectangle(q_pos+m_pos,22,20,linewidth=3,edgecolor='r',\n", + " facecolor='g', fill=True))\n", + "\n", + " if iq*4+im==modules[0]:\n", + " for a_posx in range(2):\n", + " for a_posy in range(8):\n", + " a_pos = np.array([a_posx*11., a_posy*20/8.])\n", + " pos = q_pos+m_pos+a_pos\n", + " \n", + " ax.add_patch(patches.Rectangle(q_pos+m_pos+a_pos,11,20/8.,linewidth=1,edgecolor='black',\n", + " facecolor='r', fill=True))\n", + " \n", + " if a_posx == 0:\n", + " label = str(a_posy+9)\n", + " else:\n", + " label = str(-a_posy+(a_posx*8))\n", + " \n", + " ax.text(pos[0]+4, pos[1]+0.3, label, fontsize=14)\n", + " else:\n", + " #pass\n", + " #print(q_pos[0]+4, m_pos[0]+0.3, 'Q{}M{}'.format(iq+1, im+1))\n", + " pos = q_pos+m_pos+np.array([5,8])\n", + " ax.text(pos[0], pos[1], 'Q{}M{}'.format(iq+1, im+1), fontsize=22, color='y')\n", + " \n", + "ax.add_patch(patches.Rectangle([65,93],30, 4,linewidth=1,edgecolor='black',\n", + " facecolor='r', fill=True))\n", + "\n", + "ax.text(52, 94, 'ASICs:', fontsize=22, color='black')\n", + "\n", + "for i_pos in range(8):\n", + " \n", + " pos = np.array([65,93]) + np.array([i_pos*30/8.+0.3, 0.3])\n", + " \n", + " ax.add_patch(patches.Rectangle(pos,24/8., 3.4,linewidth=1,edgecolor='black',\n", + " facecolor='deepskyblue', fill=True))\n", + " \n", + " \n", + " ax.text(pos[0]+0.5, pos[1]+0.5, '{}'.format(i_pos + 1), fontsize=18, color='black')" + ] + }, { "cell_type": "code", "execution_count": null, @@ -141,26 +247,36 @@ "gmf = gain_map_files(in_folder, gain_runs, sequences, DET_FILE_INSET, QUADRANTS, MODULES_PER_QUAD)\n", "gain_mapped_files, total_sequences, total_file_size = gmf\n", "\n", - "print(\"Will process at total of {} sequences: {:0.2f} GB of data.\".format(total_sequences, total_file_size))" + "#print(\"Will process at total of {} sequences: {:0.2f} GB of data.\".format(total_sequences, total_file_size))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Data processing" ] }, { "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": true + "collapsed": false }, "outputs": [], "source": [ "# the actual characterization - to not eded this without consultation\n", "import copy\n", "from functools import partial\n", + "\n", + "\n", "def characterize_module(cells, bp_thresh, skip_first_ntrains, ntrains, inp):\n", " import numpy as np\n", " import copy\n", " import h5py\n", " from cal_tools.enums import BadPixels\n", - " \n", + " import scipy.stats\n", + "\n", " def splitOffGainLPD(d):\n", " msk = np.zeros(d.shape, np.uint16)\n", " msk[...] = 0b0000111111111111\n", @@ -169,13 +285,15 @@ " gain = np.bitwise_and(d, msk)//4096\n", " gain[gain > 2] = 2\n", " return data, gain\n", - " \n", + "\n", " filename, filename_out, channel = inp\n", - " thresholds_offset_hard, thresholds_offset_sigma, thresholds_noise_hard, thresholds_noise_sigma = bp_thresh \n", + " thresholds_offset_hard, thresholds_offset_sigma, thresholds_noise_hard, thresholds_noise_sigma = bp_thresh\n", "\n", " infile = h5py.File(filename, \"r\", driver=\"core\")\n", - " im = np.array(infile[\"/INSTRUMENT/FXE_DET_LPD1M-1/DET/{}CH0:xtdf/image/data\".format(channel)][skip_first_ntrains*cells:skip_first_ntrains*cells+ntrains*cells,...])\n", - " cellid = np.squeeze(np.array(infile[\"/INSTRUMENT/FXE_DET_LPD1M-1/DET/{}CH0:xtdf/image/cellId\".format(channel)][skip_first_ntrains*cells:skip_first_ntrains*cells+ntrains*cells,...]))\n", + " im = np.array(infile[\"/INSTRUMENT/FXE_DET_LPD1M-1/DET/{}CH0:xtdf/image/data\".format(\n", + " channel)][skip_first_ntrains*cells:skip_first_ntrains*cells+ntrains*cells, ...])\n", + " cellid = np.squeeze(np.array(infile[\"/INSTRUMENT/FXE_DET_LPD1M-1/DET/{}CH0:xtdf/image/cellId\".format(\n", + " channel)][skip_first_ntrains*cells:skip_first_ntrains*cells+ntrains*cells, ...]))\n", " infile.close()\n", "\n", " im, g = splitOffGainLPD(im[:, 0, ...])\n", @@ -183,42 +301,50 @@ "\n", " im = np.rollaxis(im, 2)\n", " im = np.rollaxis(im, 2, 1)\n", - " \n", "\n", " offset = np.zeros((im.shape[0], im.shape[1], cells))\n", " noise = np.zeros((im.shape[0], im.shape[1], cells))\n", + " normal_test = np.zeros((im.shape[0], im.shape[1], cells))\n", " for cc in range(cells):\n", " idx = cellid == cc\n", " if np.any(idx):\n", - " \n", - " offset[...,cc] = np.median(im[:,:, idx], axis=2)\n", - " noise[...,cc] = np.std(im[:,:,idx], axis=2)\n", - " \n", + "\n", + " offset[..., cc] = np.median(im[:, :, idx], axis=2)\n", + " noise[..., cc] = np.std(im[:, :, idx], axis=2)\n", + " _, normal_test[..., cc] = scipy.stats.normaltest(\n", + " im[:, :, idx], axis=2)\n", + "\n", " # bad pixels\n", " bp = np.zeros(offset.shape, np.uint32)\n", " # offset related bad pixels\n", - " offset_mn = np.nanmedian(offset, axis=(0,1))\n", - " offset_std = np.nanstd(offset, axis=(0,1)) \n", - " \n", + " offset_mn = np.nanmedian(offset, axis=(0, 1))\n", + " offset_std = np.nanstd(offset, axis=(0, 1))\n", + "\n", " bp[(offset < offset_mn-thresholds_offset_sigma*offset_std) |\n", " (offset > offset_mn+thresholds_offset_sigma*offset_std)] |= BadPixels.OFFSET_OUT_OF_THRESHOLD.value\n", - " bp[(offset < thresholds_offset_hard[0]) | (offset > thresholds_offset_hard[1])] |= BadPixels.OFFSET_OUT_OF_THRESHOLD.value\n", + " bp[(offset < thresholds_offset_hard[0]) | (\n", + " offset > thresholds_offset_hard[1])] |= BadPixels.OFFSET_OUT_OF_THRESHOLD.value\n", " bp[~np.isfinite(offset)] |= BadPixels.OFFSET_NOISE_EVAL_ERROR.value\n", - " \n", + "\n", " # noise related bad pixels\n", - " noise_mn = np.nanmedian(noise, axis=(0,1))\n", - " noise_std = np.nanstd(noise, axis=(0,1)) \n", - " \n", + " noise_mn = np.nanmedian(noise, axis=(0, 1))\n", + " noise_std = np.nanstd(noise, axis=(0, 1))\n", + "\n", " bp[(noise < noise_mn-thresholds_noise_sigma*noise_std) |\n", " (noise > noise_mn+thresholds_noise_sigma*noise_std)] |= BadPixels.NOISE_OUT_OF_THRESHOLD.value\n", - " bp[(noise < thresholds_noise_hard[0]) | (noise > thresholds_noise_hard[1])] |= BadPixels.NOISE_OUT_OF_THRESHOLD.value\n", + " bp[(noise < thresholds_noise_hard[0]) | (\n", + " noise > thresholds_noise_hard[1])] |= BadPixels.NOISE_OUT_OF_THRESHOLD.value\n", " bp[~np.isfinite(noise)] |= BadPixels.OFFSET_NOISE_EVAL_ERROR.value\n", - " \n", - " return offset, noise, channel, bp\n", - " \n", + "\n", + " idx = cellid == 12\n", + " return offset, noise, channel, bp, im[12, 12, idx], normal_test\n", + "\n", + "\n", "offset_g = OrderedDict()\n", "noise_g = OrderedDict()\n", "badpix_g = OrderedDict()\n", + "data_g = OrderedDict()\n", + "ntest_g = OrderedDict()\n", "\n", "gg = 0\n", "old_cap = None\n", @@ -231,38 +357,53 @@ " offset_g[cap] = OrderedDict()\n", " noise_g[cap] = OrderedDict()\n", " badpix_g[cap] = OrderedDict()\n", - " \n", - " dones = [] \n", + " data_g[cap] = OrderedDict()\n", + " ntest_g[cap] = OrderedDict()\n", + "\n", + " dones = []\n", " inp = []\n", - " \n", + "\n", " for i in modules:\n", - " qm = \"Q{}M{}\".format(i//4 +1, i % 4 + 1) \n", + " qm = \"Q{}M{}\".format(i//4 + 1, i % 4 + 1)\n", " if qm in mapped_files and not mapped_files[qm].empty():\n", - " fname_in = mapped_files[qm].get() \n", + " fname_in = mapped_files[qm].get()\n", " dones.append(mapped_files[qm].empty())\n", - " \n", + "\n", " else:\n", " continue\n", - " fout = os.path.abspath(\"{}/{}\".format(out_folder, (os.path.split(fname_in)[-1]).replace(\"RAW\", \"CORR\")))\n", + " fout = os.path.abspath(\n", + " \"{}/{}\".format(out_folder, (os.path.split(fname_in)[-1]).replace(\"RAW\", \"CORR\")))\n", + " print(\"Process file: \", fout)\n", " inp.append((fname_in, fout, i))\n", " first = False\n", " p = partial(characterize_module, max_cells,\n", - " (thresholds_offset_hard, thresholds_offset_sigma,\n", - " thresholds_noise_hard, thresholds_noise_sigma),\n", + " (thresholds_offset_hard, thresholds_offset_sigma,\n", + " thresholds_noise_hard, thresholds_noise_sigma),\n", " skip_first_ntrains, ntrains)\n", " results = view.map_sync(p, inp)\n", " for r in results:\n", - " offset, noise, i, bp= r\n", - " qm = \"Q{}M{}\".format(i//4 +1, i % 4 + 1)\n", + " offset, noise, i, bp, data, normal = r\n", + " qm = \"Q{}M{}\".format(i//4 + 1, i % 4 + 1)\n", " if qm not in offset_g[cap]:\n", - " offset_g[cap][qm] = np.zeros((offset.shape[0], offset.shape[1], offset.shape[2], 3))\n", + " offset_g[cap][qm] = np.zeros(\n", + " (offset.shape[0], offset.shape[1], offset.shape[2], 3))\n", " noise_g[cap][qm] = np.zeros_like(offset_g[cap][qm])\n", " badpix_g[cap][qm] = np.zeros_like(offset_g[cap][qm])\n", - " offset_g[cap][qm][...,gg] = offset\n", - " noise_g[cap][qm][...,gg] = noise \n", - " badpix_g[cap][qm][...,gg] = bp \n", - " gg +=1\n", - " \n", + " data_g[cap][qm] = np.zeros((data.shape[0], 3))\n", + " ntest_g[cap][qm] = np.zeros_like(offset_g[cap][qm])\n", + "\n", + " offset_g[cap][qm][..., gg] = offset\n", + " noise_g[cap][qm][..., gg] = noise\n", + " badpix_g[cap][qm][..., gg] = bp\n", + " data_g[cap][qm][..., gg] = data\n", + " ntest_g[cap][qm][..., gg] = normal\n", + "\n", + " hn, cn = np.histogram(data, bins=20)\n", + " print(\"{} gain. Module: {}. Number of processed trains per cell: {}.\\n\".format(\n", + " gain_names[gg], qm, data.shape[0]))\n", + " gg += 1\n", + " plt.show()\n", + "\n", "duration = (datetime.now()-start).total_seconds()\n", "logger.runtime_summary_entry(success=True, runtime=duration,\n", " total_sequences=total_sequences,\n", @@ -274,7 +415,7 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": true + "collapsed": false }, "outputs": [], "source": [ @@ -288,17 +429,57 @@ " store_file[\"{}/Offset/0/data\".format(qm)] = offset_g[cap][qm]\n", " store_file[\"{}/Noise/0/data\".format(qm)] = noise_g[cap][qm]\n", " store_file[\"{}/BadPixelsDark/0/data\".format(qm)] = badpix_g[cap][qm]\n", - " store_file.close()\n" + " store_file.close()\n", + " print('Constants are stored to {}'.format(ofile))\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": true + "collapsed": false + }, + "outputs": [], + "source": [ + "# Retrieve existing constants for comparison\n", + "clist = [\"Offset\", \"Noise\", \"BadPixelsDark\"]\n", + "old_const = {}\n", + "old_mdata = {}\n", + "import dateutil.parser\n", + "creation_time = dateutil.parser.parse(\"2019-02-14\")\n", + "print('Retrieve pre-existing constants for comparison.')\n", + "for cap in capacitor_settings:\n", + " for qm in offset_g[cap].keys():\n", + " for const in clist:\n", + "\n", + " condition = Conditions.Dark.LPD(memory_cells=max_cells, bias_voltage=bias_voltage,\n", + " capacitor=cap)\n", + "\n", + " data, mdata = get_from_db(getattr(Detectors.LPD1M1, qm),\n", + " getattr(Constants.LPD, const)(),\n", + " condition,\n", + " None,\n", + " cal_db_interface, creation_time=creation_time,\n", + " verbosity=2, timeout=30000)\n", + " \n", + " old_const[const] = data\n", + " \n", + " if mdata is not None and data is not None:\n", + " time = mdata.calibration_constant_version.begin_at\n", + " old_mdata[const] = time.isoformat()\n", + " else:\n", + " old_mdata[const] = \"Not found\"\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false }, "outputs": [], "source": [ + "# Save constants in the calibration DB\n", "if db_output:\n", " for cap in capacitor_settings:\n", " for qm in offset_g[cap].keys():\n", @@ -311,7 +492,6 @@ " condition = Conditions.Dark.LPD(memory_cells=max_cells, bias_voltage=bias_voltage,\n", " capacitor=cap)\n", " device = getattr(Detectors.LPD1M1, qm)\n", - " \n", " if device:\n", "\n", " metadata.detector_condition = condition\n", @@ -322,7 +502,7 @@ " else:\n", " metadata.calibration_constant_version = Versions.Timespan(device=device,\n", " start=creation_time)\n", - " metadata.send(cal_db_interface, timeout=3000000)\n", + " # metadata.send(cal_db_interface)\n", " \n", " metadata = ConstantMetaData()\n", " noise = Constants.LPD.Noise()\n", @@ -337,14 +517,14 @@ " if device:\n", "\n", " metadata.detector_condition = condition\n", - "\n", + " \n", " # specify the a version for this constant\n", " if creation_time is None:\n", " metadata.calibration_constant_version = Versions.Now(device=device)\n", " else:\n", " metadata.calibration_constant_version = Versions.Timespan(device=device,\n", " start=creation_time)\n", - " metadata.send(cal_db_interface, timeout=3000000)\n", + " # metadata.send(cal_db_interface)\n", " \n", " metadata = ConstantMetaData()\n", " badpixels = Constants.LPD.BadPixelsDark()\n", @@ -355,25 +535,21 @@ " condition = Conditions.Dark.LPD(memory_cells=max_cells, bias_voltage=bias_voltage,\n", " capacitor=cap)\n", " device = getattr(Detectors.LPD1M1, qm)\n", - " \n", " if device:\n", "\n", " metadata.detector_condition = condition\n", "\n", " # specify the a version for this constant\n", - " if creation_time is None:\n", - " metadata.calibration_constant_version = Versions.Now(device=device)\n", - " else:\n", - " metadata.calibration_constant_version = Versions.Timespan(device=device,\n", - " start=creation_time)\n", - " metadata.send(cal_db_interface, timeout=3000000)" + " metadata.calibration_constant_version = Versions.Now(device=device)\n", + " # metadata.send(cal_db_interface)\n", + " print(\"Injected to the calibration DB. Begin at: {}\".format(metadata.calibration_constant_version.begin_at))" ] }, { "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": true + "collapsed": false }, "outputs": [], "source": [ @@ -385,7 +561,7 @@ "\n", " res[cap][qm] = {'Offset': offset_g[cap][qm],\n", " 'Noise': noise_g[cap][qm],\n", - " 'BadPixels': badpix_g[cap][qm] \n", + " 'BadPixelsDark': badpix_g[cap][qm] \n", " }\n" ] }, @@ -393,60 +569,172 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Single-Cell Overviews ##\n", + "## Raw pedestal distribution ##\n", "\n", - "Single cell overviews allow to identify potential effects on all memory cells, e.g. on sensor level. Additionally, they should serve as a first sanity check on expected behaviour, e.g. if structuring on the ASIC level is visible in the offsets, but otherwise no immediate artifacts are visible." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### High Gain ###" + "Distribution of a pedestal (ADUs) over trains for the pixel (12,12), memory cell 12. A median of the distribution is shown in yellow. A standard deviation is shown in red. The green line shows average over all pixels for a given memory cell and gain stage." ] }, { "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false, - "scrolled": true + "collapsed": false }, "outputs": [], "source": [ + "import matplotlib.pyplot as plt\n", + "\n", + "#plt.rcParams.update({'font.size': 20})\n", + "fig, grid = plt.subplots(3,1,sharex=\"col\", sharey=\"row\",figsize=(10,7))\n", + "fig.subplots_adjust(wspace=0, hspace=0)\n", + "\n", "for cap in capacitor_settings:\n", - " cell = 12\n", - " gain = 0\n", - " show_overview(res[cap], cell, gain, out_folder=out_folder, infix=\"_\".join(gain_runs.values()))" + " for i in modules:\n", + " qm = \"Q{}M{}\".format(i//4+1, i%4+1)\n", + " if data_g[cap][qm].shape[0] == 0:\n", + " break\n", + " for gain in range(3):\n", + " data = data_g[cap][qm][:,gain]\n", + " offset = np.nanmedian(data)\n", + " noise = np.nanstd(data)\n", + " xrange = [np.nanmin(data_g[cap][qm]), np.nanmax(data_g[cap][qm])]\n", + " nbins = int( xrange[1] - xrange[0])\n", + " \n", + " hn,cn = np.histogram(data, bins=nbins, range=xrange )\n", + "\n", + " grid[gain].hist(data, range=xrange, bins=nbins)\n", + " grid[gain].plot([offset-noise,offset-noise], [0,np.nanmax(hn)], linewidth=1.5, color='red', \n", + " label='1 $\\sigma$ deviation')\n", + " grid[gain].plot([offset+noise,offset+noise], [0,np.nanmax(hn)], linewidth=1.5, color='red')\n", + " grid[gain].plot([offset,offset], [0,0], linewidth=1.5, color='y', label='median')\n", + " \n", + " grid[gain].plot([np.nanmedian(offset_g[cap][qm][:,:,12,gain]),np.nanmedian(offset_g[cap][qm][:,:,12,gain])], \n", + " [0,np.nanmax(hn)], linewidth=1.5, color='green', label='average over pixels')\n", + " \n", + " grid[gain].set_xlim(xrange)\n", + " grid[gain].set_ylim(0, np.nanmax(hn)*1.1)\n", + " grid[gain].set_xlabel(\"Offset value [ADU]\")\n", + " grid[gain].set_ylabel(\"# of occurance\")\n", + " \n", + " if gain == 0:\n", + " leg = grid[gain].legend(loc='outside-top', ncol=3, bbox_to_anchor=(0.1, 0.25, 0.7, 1.0))\n", + " \n", + " \n", + " grid[gain].text(820, np.nanmax(hn)*0.4, \"{} gain\".format(gain_names[gain]), fontsize=20)\n", + " \n", + " a = plt.axes([.125, .1, 0.775, .8], frame_on=False)\n", + " a.patch.set_alpha(0.05)\n", + " a.set_xlim(xrange)\n", + " plt.plot([offset,offset], [0,1], linewidth=1.5, color='y')\n", + " #plt.title('Probability')\n", + " plt.xticks([])\n", + " plt.yticks([])\n", + " \n", + " \n", + " ypos = 0.9\n", + " x1pos = (np.nanmedian(data_g[cap][qm][:,0])+np.nanmedian(data_g[cap][qm][:,2]))/2.\n", + " x2pos = (np.nanmedian(data_g[cap][qm][:,2])+np.nanmedian(data_g[cap][qm][:,1]))/2.-10\n", + " \n", + " plt.annotate(\"\", xy=(np.nanmedian(data_g[cap][qm][:,0]), ypos), xycoords='data',\n", + " xytext=(np.nanmedian(data_g[cap][qm][:,2]), ypos), textcoords='data',\n", + " arrowprops=dict(arrowstyle=\"<->\", connectionstyle=\"arc3\"))\n", + " \n", + " plt.annotate('{}'.format(np.nanmedian(data_g[cap][qm][:,0])-np.nanmedian(data_g[cap][qm][:,2])), \n", + " xy=(x1pos, ypos), xycoords='data', xytext=(5, 5), textcoords='offset points')\n", + " \n", + " plt.annotate(\"\", xy=(np.nanmedian(data_g[cap][qm][:,2]), ypos), xycoords='data',\n", + " xytext=(np.nanmedian(data_g[cap][qm][:,1]), ypos), textcoords='data',\n", + " arrowprops=dict(arrowstyle=\"<->\", connectionstyle=\"arc3\"))\n", + " \n", + " plt.annotate('{}'.format(np.nanmedian(data_g[cap][qm][:,2])-np.nanmedian(data_g[cap][qm][:,1])), \n", + " xy=(x2pos, ypos), xycoords='data', xytext=(5, 5), textcoords='offset points')\n", + " \n", + "plt.show()\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### Medium Gain ###" + "## Normality test ##\n", + "\n", + "Distributions of raw pedestal values have been tested if they are normally distributed. A normality test have been performed for each pixel and each memory cell. Plots below show histogram of p-Values and a 2D distribution for the memory cell 12." ] }, { "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false, - "scrolled": true + "collapsed": false }, "outputs": [], "source": [ + "from XFELDetAna.plotting.heatmap import *\n", + "\n", + "# Loop over capacitor settings, modules, constants\n", "for cap in capacitor_settings:\n", - " cell = 12\n", - " gain = 1\n", - " show_overview(res[cap], cell, gain, out_folder=out_folder, infix=\"_\".join(gain_runs.values()))" + " for i in modules:\n", + " qm = \"Q{}M{}\".format(i//4+1, i%4+1)\n", + "\n", + " data = np.copy(ntest_g[cap][qm][:,:,:,:])\n", + " data[badpix_g[cap][qm][:,:,:,:]>0] = 1.01\n", + " \n", + " hn,cn = np.histogram(data[:,:,:,0], bins=100)\n", + " \n", + " d = [{'x': np.arange(100)*0.01+0.01,\n", + " 'y': np.histogram(data[:,:,:,0], bins=100)[0],\n", + " 'drawstyle': 'steps-pre',\n", + " 'label' : 'High gain',\n", + " },\n", + " {'x': np.arange(100)*0.01+0.01,\n", + " 'y': np.histogram(data[:,:,:,1], bins=100)[0],\n", + " 'drawstyle': 'steps-pre',\n", + " 'label' : 'Medium gain',\n", + " },\n", + " {'x': np.arange(100)*0.01+0.01,\n", + " 'y': np.histogram(data[:,:,:,2], bins=100)[0],\n", + " 'drawstyle': 'steps-pre',\n", + " 'label' : 'Low gain',\n", + " },\n", + " ]\n", + " \n", + "\n", + " fig = plt.figure(figsize=(15,15), tight_layout={'pad': 0.5, 'w_pad': 0.3})\n", + "\n", + " for gain in range(3):\n", + " ax = fig.add_subplot(221+gain)\n", + " heatmapPlot(data[:,:,12,gain], add_panels=False, cmap='viridis', figsize=(10,10),\n", + " y_label='Rows', x_label='Columns',\n", + " lut_label='p-Value',\n", + " use_axis=ax,\n", + " title='p-Value for cell 12, {} gain'.format(gain_names[gain]) )\n", + " \n", + " ax = fig.add_subplot(224)\n", + " _ = xana.simplePlot(d, #aspect=1.6, \n", + " x_label = \"p-Value\".format(gain), \n", + " y_label=\"# of occurance\",\n", + " use_axis=ax,\n", + " y_log=False, legend='outside-top-ncol3-frame', legend_pad=0.05, legend_size='5%')\n", + " ax.ticklabel_format(style='sci', axis='y', scilimits=(4,6))\n", + " \n" + ] + }, + { + "cell_type": "raw", + "metadata": {}, + "source": [ + ".. raw:: latex\n", + "\n", + " \\newpage" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### Low Gain ###" + "## Single-Cell Overviews ##\n", + "\n", + "Single cell overviews allow to identify potential effects on all memory cells, e.g. on a sensor level. Additionally, they should serve as a first sanity check on expected behaviour, e.g. if structuring on the ASIC level is visible in the offsets, but otherwise no immediate artifacts are visible." ] }, { @@ -458,26 +746,101 @@ }, "outputs": [], "source": [ + "cell = 12\n", "for cap in capacitor_settings:\n", - " cell = 12\n", - " gain = 2\n", - " show_overview(res[cap], cell, gain, out_folder=out_folder, infix=\"_\".join(gain_runs.values()))" + " for gain in range(3):\n", + " display(\n", + " Markdown('### Cell-12 overview - {} gain.'.format(gain_names[gain])))\n", + "\n", + " fig = plt.figure(figsize=(15, 12) , tight_layout={'pad': 0.1, 'w_pad': 0.1})\n", + " for qm in res[cap]:\n", + " for iconst, const in enumerate(['Offset', 'Noise', 'BadPixelsDark']):\n", + "\n", + " ax = fig.add_subplot(221+iconst)\n", + "\n", + " data = res[cap][qm][const][:, :, 12, gain]\n", + " vmax = 1.5 * np.nanmedian(res[cap][qm][const][:, :, 12, gain])\n", + " title = const\n", + " label = '{} value [ADU]'.format(const)\n", + " title = '{} value'.format(const)\n", + " if const == 'BadPixelsDark':\n", + " vmax = 4\n", + " data[data == 0] = np.nan\n", + " title = 'Bad pixel code'\n", + " label = title\n", + "\n", + " cb_labels = ['1 {}'.format(BadPixels.NOISE_OUT_OF_THRESHOLD.name),\n", + " '2 {}'.format(BadPixels.OFFSET_NOISE_EVAL_ERROR.name),\n", + " '3 {}'.format(BadPixels.OFFSET_OUT_OF_THRESHOLD.name),\n", + " '4 {}'.format('MIXED')]\n", + "\n", + " heatmapPlot(data, add_panels=False, cmap='viridis',\n", + " y_label='Rows', x_label='Columns',\n", + " lut_label='', vmax=vmax,\n", + " use_axis=ax, cb_ticklabels=cb_labels, cb_ticks = np.arange(4)+1,\n", + " title='{}'.format(title))\n", + "\n", + " else:\n", + "\n", + " heatmapPlot(data, add_panels=False, cmap='viridis',\n", + " y_label='Rows', x_label='Columns',\n", + " lut_label=label, vmax=vmax,\n", + " use_axis=ax,\n", + " title='{}'.format(title))\n", + "\n", + " #show_overview(res[cap], cell, gain, out_folder=out_folder, infix=\"_\".join(gain_runs.values()))\n", + "\n", + " fig = plt.figure(figsize=(10, 5))\n", + " for qm in res[cap]:\n", + " for iconst, const in enumerate(['Offset', 'Noise']):\n", + " data = res[cap][qm][const]\n", + " dataBP = np.copy(data)\n", + " dataBP[res[cap][qm]['BadPixelsDark'] > 0] = -1\n", + "\n", + " x_ranges = [[0, 1500], [0, 40]]\n", + " hn, cn = np.histogram(\n", + " data[:, :, :, gain], bins=100, range=x_ranges[iconst])\n", + " hnBP, cnBP = np.histogram(dataBP[:, :, :, gain], bins=cn)\n", + "\n", + " d = [{'x': cn[:-1],\n", + " 'y': hn,\n", + " 'drawstyle': 'steps-pre',\n", + " 'label': 'All data',\n", + " },\n", + " {'x': cnBP[:-1],\n", + " 'y': hnBP,\n", + " 'drawstyle': 'steps-pre',\n", + " 'label': 'Bad pixels masked',\n", + " },\n", + " ]\n", + "\n", + " ax = fig.add_subplot(121+iconst)\n", + " _ = xana.simplePlot(d, figsize=(5, 7), aspect=1,\n", + " x_label=\"{} value [ADU]\".format(const),\n", + " y_label=\"# of occurance\",\n", + " title='', legend_pad=0.1, legend_size='10%',\n", + " use_axis=ax,\n", + " y_log=True, legend='outside-top-2col-frame')\n", + "\n", + " plt.show()" ] }, { - "cell_type": "markdown", + "cell_type": "raw", "metadata": {}, "source": [ - "## Global Bad Pixel Behaviour ##\n", + ".. raw:: latex\n", "\n", - "The following plots show the results of bad pixel evaluation for all evaluated memory cells. Cells are stacked in the Z-dimension, while pixels values in x/y are rebinned with a factor of 2. This excludes single bad pixels present only in disconnected pixels. Hence, any bad pixels spanning at least 4 pixels in the x/y-plane, or across at least two memory cells are indicated. Colors encode the bad pixel type, or mixed type." + " \\newpage" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### High Gain ###" + "## Global Bad Pixel Behaviour ##\n", + "\n", + "The following plots shows the results of a bad pixel evaluation for all evaluated memory cells. Cells are stacked in the Z-dimension, while pixels values in x/y are re-binned with a factor of 2. This excludes single bad pixels present only in disconnected pixels. Hence, any bad pixels spanning at least 4 pixels in the x/y-plane, or across at least two memory cells are indicated. Colors encode the bad pixel type, or mixed type." ] }, { @@ -495,17 +858,33 @@ "\n", "rebin = 8 if not high_res_badpix_3d else 2\n", "\n", - "gain = 0\n", - "for cap in capacitor_settings:\n", - " for mod, data in badpix_g[cap].items():\n", - " plot_badpix_3d(data[...,gain], cols, title=mod, rebin_fac=rebin)" + "for gain in range(3):\n", + " display(Markdown('### Bad pixel behaviour - {} gain. ###'.format(gain_names[gain])))\n", + " for cap in capacitor_settings:\n", + " for mod, data in badpix_g[cap].items():\n", + " plot_badpix_3d(data[...,gain], cols, title='', rebin_fac=rebin)\n", + " ax = plt.gca()\n", + " leg = ax.get_legend()\n", + " leg.set(alpha=0.5)\n", + " plt.show()" + ] + }, + { + "cell_type": "raw", + "metadata": {}, + "source": [ + ".. raw:: latex\n", + "\n", + " \\newpage" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### Medium Gain ###" + "## Summary across tiles ##\n", + "\n", + "Plots give an overview of calibration constants averaged across tiles. A bad pixel mask is applied. Constants are compared with pre-existing constants retrieved from the calibration database. Differences $\\Delta$ between the old and new constants is shown." ] }, { @@ -516,17 +895,111 @@ }, "outputs": [], "source": [ - "gain = 1\n", - "for cap in capacitor_settings:\n", - " for mod, data in badpix_g[cap].items():\n", - " plot_badpix_3d(data[...,gain], cols, title=mod, rebin_fac=rebin)" + "display(Markdown('Following pre-existing constants are used for comparison: \\n'))\n", + "for key in old_mdata:\n", + " display(Markdown('**{}** at {}'.format(key, old_mdata[key])))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from XFELDetAna.plotting.heatmap import *\n", + "#plt.rcParams.update({'font.size': 14})\n", + "\n", + "# Loop over capacitor settings, modules, constants\n", + "for cap in res:\n", + " for qm in res[cap]:\n", + " for gain in range(3):\n", + " display(Markdown('### Summary across tiles - {} gain.'.format(gain_names[gain])))\n", + "\n", + " for const in res[cap][qm]:\n", + " data = np.copy(res[cap][qm][const][:, :, :, gain])\n", + "\n", + " label = 'Fraction of bad pixels'\n", + "\n", + " if const != 'BadPixelsDark':\n", + " data[badpix_g[cap][qm][:, :, :, gain] > 0] = np.nan\n", + " label = '{} value [ADU]'.format(const)\n", + " else:\n", + " data[data>0] = 1.0\n", + " \n", + " data = data.reshape(\n", + " int(data.shape[0] / 32),\n", + " 32,\n", + " int(data.shape[1] / 128),\n", + " 128,\n", + " data.shape[2])\n", + " data = np.nanmean(data, axis=(1, 3)).swapaxes(\n", + " 0, 2).reshape(512, 16)\n", + "\n", + " fig = plt.figure(figsize=(15, 6))\n", + " ax = fig.add_subplot(121)\n", + "\n", + " _ = heatmapPlot(data[:510, :], add_panels=True,\n", + " y_label='Momery Cell ID', x_label='Tile ID',\n", + " lut_label=label, use_axis=ax,\n", + " panel_y_label=label, panel_x_label=label,\n", + " cmap='viridis', # cb_loc='right',cb_aspect=15,\n", + " x_ticklabels=np.arange(16)+1,\n", + " x_ticks=np.arange(16)+0.5)\n", + "\n", + " if old_const[const] is not None:\n", + " ax = fig.add_subplot(122)\n", + "\n", + " dataold = np.copy(old_const[const][:, :, :, gain])\n", + " \n", + " label = '$\\Delta$ {}'.format(label)\n", + "\n", + " if const != 'BadPixelsDark':\n", + " if old_const['BadPixelsDark'] is not None:\n", + " dataold[old_const['BadPixelsDark'][:, :, :, gain] > 0] = np.nan\n", + " else:\n", + " dataold[:] = np.nan\n", + " else:\n", + " dataold[dataold>0]=1.0\n", + "\n", + " dataold = dataold.reshape(\n", + " int(dataold.shape[0] / 32),\n", + " 32,\n", + " int(dataold.shape[1] / 128),\n", + " 128,\n", + " dataold.shape[2])\n", + " dataold = np.nanmean(dataold, axis=(\n", + " 1, 3)).swapaxes(0, 2).reshape(512, 16)\n", + " dataold = dataold - data\n", + "\n", + " _ = heatmapPlot(dataold[:510, :], add_panels=True,\n", + " y_label='Momery Cell ID', x_label='Tile ID',\n", + " lut_label=label, use_axis=ax,\n", + " panel_y_label=label, panel_x_label=label,\n", + " cmap='viridis', # cb_loc='right',cb_aspect=15,\n", + " x_ticklabels=np.arange(16)+1,\n", + " x_ticks=np.arange(16)+0.5)\n", + " plt.show()\n" + ] + }, + { + "cell_type": "raw", + "metadata": {}, + "source": [ + ".. raw:: latex\n", + "\n", + " \\newpage" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### Low Gain ###" + "## Variation of offset and noise across Tiles and ASICs ##\n", + "\n", + "Following plots show a standard deviation $\\sigma$ of calibration constant. Plot of standard deviation across tiles show pixels of one tile ($128 \\times 32$). Value for each pixel shows a standard deviation across 16 tiles. Standard deviation across ASICs are shown overall tiles. Plot shows pixels of one ASIC ($16 \\times 32$), where the value shows standard deviation across all ACIS of the module.\n", + "\n" ] }, { @@ -537,19 +1010,87 @@ }, "outputs": [], "source": [ - "gain = 2\n", - "for cap in capacitor_settings:\n", - " for mod, data in badpix_g[cap].items():\n", - " plot_badpix_3d(data[...,gain], cols, title=mod, rebin_fac=rebin)" + "# Loop over capacitor settings, modules, constants\n", + "for cap in res:\n", + " for qm in res[cap]:\n", + " for gain in range(3):\n", + " display(Markdown('### Variation of offset and noise across ASICs - {} gain.'.format(gain_names[gain])))\n", + "\n", + " fig = plt.figure(figsize=(15, 6))\n", + " for iconst, const in enumerate(['Offset', 'Noise']):\n", + " data = np.copy(res[cap][qm][const][:, :, :, gain])\n", + " data[badpix_g[cap][qm][:, :, :, gain] > 0] = np.nan\n", + " label = '$\\sigma$ {} [ADU]'.format(const)\n", + "\n", + " dataA = np.nanmean(data, axis=2) # average over cells\n", + " dataA = dataA.reshape(8, 32, 16, 16)\n", + " dataA = np.nanstd(dataA, axis=(0, 2)) # average across ASICs\n", + "\n", + " ax = fig.add_subplot(121+iconst)\n", + " _ = heatmapPlot(dataA, add_panels=True,\n", + " y_label='rows', x_label='columns',\n", + " lut_label=label, use_axis=ax,\n", + " panel_y_label=label, panel_x_label=label,\n", + " cmap='viridis'\n", + " )\n", + "\n", + " plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "# Loop over capacitor settings, modules, constants\n", + "for cap in res:\n", + " for qm in res[cap]:\n", + " for gain in range(3):\n", + " display(Markdown('### Variation of offset and noise across tiles - {} gain.'.format(gain_names[gain])))\n", + "\n", + " fig = plt.figure(figsize=(15, 6))\n", + " for iconst, const in enumerate(['Offset', 'Noise']):\n", + " data = np.copy(res[cap][qm][const][:, :, :, gain])\n", + " data[badpix_g[cap][qm][:, :, :, gain] > 0] = np.nan\n", + " label = '$\\sigma$ {} [ADU]'.format(const)\n", + " \n", + " dataT = data.reshape(\n", + " int(data.shape[0] / 32),\n", + " 32,\n", + " int(data.shape[1] / 128),\n", + " 128,\n", + " data.shape[2])\n", + " dataT = np.nanstd(dataT, axis=(0, 2))\n", + " dataT = np.nanmean(dataT, axis=2)\n", + " \n", + " ax = fig.add_subplot(121+iconst)\n", + " _ = heatmapPlot(dataT, add_panels=True,\n", + " y_label='rows', x_label='columns',\n", + " lut_label=label, use_axis=ax,\n", + " panel_y_label=label, panel_x_label=label,\n", + " cmap='viridis')\n", + " plt.show()" + ] + }, + { + "cell_type": "raw", + "metadata": {}, + "source": [ + ".. raw:: latex\n", + "\n", + " \\newpage" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## Aggregate values, and per Cell behaviour ##\n", + "## Aggregate values, and per cell behaviour ##\n", "\n", - "The following tables and plots give an overview of statistical aggregates for each constant, as well as per cell behavior." + "Following tables and plots give an overview of statistical aggregates for each constant, as well as per cell behavior, averaged across pixels." ] }, { @@ -560,50 +1101,193 @@ }, "outputs": [], "source": [ - "for cap in capacitor_settings:\n", - " create_constant_overview(offset_g[cap], \"Offset (ADU)\", mem_cells, 500, 1500,\n", - " out_folder=out_folder, infix=\"_\".join(gain_runs.values()))" + "# Loop over capacitor settings, modules, constants\n", + "for cap in res:\n", + " for qm in res[cap]:\n", + " for gain in range(3):\n", + " display(Markdown('### Mean over pixels - {} gain.'.format(gain_names[gain])))\n", + " \n", + " fig = plt.figure(figsize=(9,11))\n", + "\n", + " for iconst, const in enumerate(res[cap][qm]):\n", + "\n", + " ax = fig.add_subplot(311+iconst)\n", + " \n", + " data = res[cap][qm][const][:,:,:510,gain]\n", + " if const == 'BadPixelsDark':\n", + " data[data>0] = 1.0\n", + " \n", + " dataBP = np.copy(data)\n", + " dataBP[badpix_g[cap][qm][:,:,:510,gain]>0] = -10\n", + "\n", + " data = np.nanmean(data, axis=(0,1))\n", + " dataBP = np.nanmean(dataBP, axis=(0,1))\n", + " \n", + " d = [{'y': data,\n", + " 'x': np.arange(data.shape[0]),\n", + " 'drawstyle': 'steps-mid',\n", + " 'label' : 'All data'\n", + " }\n", + " ]\n", + " \n", + " if const != 'BadPixelsDark':\n", + " d.append({'y': dataBP,\n", + " 'x': np.arange(data.shape[0]),\n", + " 'drawstyle': 'steps-mid',\n", + " 'label' : 'good pixels only'\n", + " })\n", + " y_title = \"{} value [ADU]\".format(const)\n", + " title = \"{} value, {} gain\".format(const, gain_names[gain])\n", + " else:\n", + " y_title = \"Fraction of Bad Pixels\"\n", + " title = \"Fraction of Bad Pixels, {} gain\".format(gain_names[gain])\n", + " \n", + " data_min = np.min([data, dataBP])if const != 'BadPixelsDark' else np.min([data])\n", + " data_max = np.max([data[20:], dataBP[20:]])\n", + " data_dif = data_max - data_min\n", + " \n", + " local_max = np.max([data[200:300], dataBP[200:300]])\n", + " frac = 0.35\n", + " new_max = (local_max - data_min*(1-frac))/frac\n", + " new_max = np.max([data_max, new_max])\n", + " \n", + " _ = xana.simplePlot(d, figsize=(10,10), aspect=2, xrange=(-12, 510),\n", + " x_label = 'Memory Cell ID', \n", + " y_label=y_title, use_axis=ax,\n", + " title=title,\n", + " title_position=[0.5, 1.15], \n", + " inset='xy-coord-right', inset_x_range=(0,20), inset_indicated=True,\n", + " inset_labeled=True, inset_coord=[0.2,0.5,0.6,0.95],\n", + " inset_lw = 1.0, y_range = [data_min-data_dif*0.05, new_max+data_dif*0.05],\n", + " y_log=False, legend='outside-top-ncol2-frame', legend_size='18%',\n", + " legend_pad=0.00)\n", + " \n", + " #old_min, old_max = ax.get_ylim()\n", + "\n", + " \n", + " plt.tight_layout(pad=1.08, h_pad=0.35)\n", + " \n", + " plt.show()" ] }, { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false, - "scrolled": false - }, - "outputs": [], + "cell_type": "raw", + "metadata": {}, "source": [ - "for cap in capacitor_settings:\n", - " create_constant_overview(noise_g[cap], \"Noise (ADU)\", mem_cells, 0, 25,\n", - " out_folder=out_folder, infix=\"_\".join(gain_runs.values()))" + ".. raw:: latex\n", + "\n", + " \\newpage" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Summary tables ##\n", + "\n", + "Following tables show summary information for evaluated module. Values for currently evaluated constants are compared with values for pre-existing constants retrieved from the calibration database." ] }, { "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false, - "scrolled": false + "collapsed": false }, "outputs": [], "source": [ - "for cap in capacitor_settings:\n", - " bad_pixel_aggregate_g = OrderedDict()\n", - " for m, d in badpix_g[cap].items():\n", - " bad_pixel_aggregate_g[m] = d.astype(np.bool).astype(np.float)\n", - " create_constant_overview(bad_pixel_aggregate_g, \"Bad pixel fraction\", mem_cells, 0, 0.10, 3,\n", - " out_folder=out_folder, infix=\"_\".join(gain_runs.values()))" + "table = []\n", + "\n", + "for cap in res:\n", + " for qm in res[cap]:\n", + " for gain in range(3):\n", + " \n", + " l_data = []\n", + " l_data_old = []\n", + " \n", + " data = np.copy(res[cap][qm]['BadPixelsDark'][:,:,:,gain])\n", + " datau32 = data.astype(np.uint32)\n", + " l_data.append(data)\n", + " l_data.append(datau32 - np.bitwise_or(datau32,BadPixels.NOISE_OUT_OF_THRESHOLD.value))\n", + " l_data.append(datau32 - np.bitwise_or(datau32,BadPixels.OFFSET_OUT_OF_THRESHOLD.value))\n", + " l_data.append(datau32 - np.bitwise_or(datau32,BadPixels.OFFSET_NOISE_EVAL_ERROR.value))\n", + " \n", + " if old_const['BadPixelsDark'] is not None:\n", + " dataold = np.copy(old_const['BadPixelsDark'][:, :, :, gain])\n", + " datau32old = dataold.astype(np.uint32)\n", + " l_data_old.append(dataold)\n", + " l_data_old.append(datau32old - np.bitwise_or(datau32old,BadPixels.NOISE_OUT_OF_THRESHOLD.value))\n", + " l_data_old.append(datau32old - np.bitwise_or(datau32old,BadPixels.OFFSET_OUT_OF_THRESHOLD.value))\n", + " l_data_old.append(datau32old - np.bitwise_or(datau32old,BadPixels.OFFSET_NOISE_EVAL_ERROR.value))\n", + "\n", + " l_data_name = ['All bad pixels', 'NOISE_OUT_OF_THRESHOLD', \n", + " 'OFFSET_OUT_OF_THRESHOLD', 'OFFSET_NOISE_EVAL_ERROR']\n", + " \n", + " l_threshold = ['', '{}'.format(thresholds_noise_sigma), '{}'.format(thresholds_offset_sigma),\n", + " '{}/{}'.format(thresholds_offset_hard, thresholds_noise_hard)]\n", + " \n", + " for i in range(len(l_data)):\n", + " line = ['{}, gain {}'.format(l_data_name[i], gain_names[gain]),\n", + " l_threshold[i],\n", + " len(l_data[i][l_data[i]>0].flatten())\n", + " ]\n", + " \n", + " if old_const['BadPixelsDark'] is not None:\n", + " line += [len(l_data_old[i][l_data_old[i]>0].flatten())]\n", + " else:\n", + " line += ['-']\n", + " \n", + " table.append(line)\n", + "\n", + "import tabulate\n", + "display(Markdown('### Number of bad pixels ###'.format(qm)))\n", + "md = display(Latex(tabulate.tabulate(table, tablefmt='latex', \n", + " headers=[\"Pixel type\", \"Threshold\", \"New constant\", \"Old constant \"]))) " ] }, { "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": true + "collapsed": false }, "outputs": [], - "source": [] + "source": [ + "\n", + "header = ['Parameter', \n", + " \"New constant\", \"Old constant \", \n", + " \"New constant\", \"Old constant \", \n", + " \"New constant\", \"Old constant \"]\n", + "\n", + "for const in ['Offset', 'Noise']:\n", + " table = [['','High gain', 'High gain', 'Medium gain', 'Medium gain', 'Low gain', 'Low gain']]\n", + " for cap in res:\n", + " for qm in res[cap]:\n", + "\n", + " data = np.copy(res[cap][qm][const])\n", + " data[res[cap][qm]['BadPixelsDark']>0] = np.nan\n", + " \n", + " if old_const[const] is not None and old_const['BadPixelsDark'] is not None :\n", + " dataold = np.copy(old_const[const])\n", + " dataold[old_const['BadPixelsDark']>0] = np.nan\n", + "\n", + " f_list = [np.nanmedian, np.nanmean, np.nanstd, np.nanmin, np.nanmax]\n", + " n_list = ['Median', 'Mean', 'Std', 'Min', 'Max']\n", + "\n", + " for i, f in enumerate(f_list):\n", + " line = [n_list[i]]\n", + " for gain in range(3):\n", + " line.append('{:6.1f}'.format(f(data[...,gain])))\n", + " if old_const[const] is not None and old_const['BadPixelsDark'] is not None:\n", + " line.append('{:6.1f}'.format(f(dataold[...,gain])))\n", + " else:\n", + " line.append('-')\n", + "\n", + " table.append(line)\n", + "\n", + " display(Markdown('### {} [ADU]. Good pixels only. ###'.format(const)))\n", + " md = display(Latex(tabulate.tabulate(table, tablefmt='latex', headers=header))) " + ] } ], "metadata": { @@ -622,7 +1306,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.6" + "version": "3.4.3" } }, "nbformat": 4, diff --git a/notebooks/LPD/LPDChar_Darks_Summary_NBC.ipynb b/notebooks/LPD/LPDChar_Darks_Summary_NBC.ipynb new file mode 100644 index 000000000..a0ea28996 --- /dev/null +++ b/notebooks/LPD/LPDChar_Darks_Summary_NBC.ipynb @@ -0,0 +1,419 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Summary of LPD dark characterization #\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "cluster_profile = \"noDB\" # The ipcluster profile to use\n", + "out_folder = \"/gpfs/exfel/data/scratch/karnem/LPD/\" # path to output to, required\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import warnings\n", + "warnings.filterwarnings('ignore')\n", + "from collections import OrderedDict\n", + "from datetime import datetime\n", + "import os\n", + "import h5py\n", + "import numpy as np\n", + "import glob\n", + "import matplotlib\n", + "matplotlib.use(\"agg\")\n", + "import matplotlib.pyplot as plt\n", + "%matplotlib inline\n", + "#%config InlineBackend.figure_format = ['svg', 'pdf']\n", + "from cal_tools.enums import BadPixels\n", + "\n", + "from XFELDetAna import xfelpyanatools as xana\n", + "from XFELDetAna.plotting.heatmap import *\n", + "\n", + "from IPython.display import display, Markdown, Latex\n", + "import tabulate\n", + "gain_names = ['High', 'Medium', 'Low']\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "# Load constants from local files\n", + "\n", + "files = glob.glob('{}/*h5'.format(out_folder))\n", + "\n", + "data = OrderedDict()\n", + "mod_names = []\n", + "# Loop over files\n", + "for filename in files:\n", + " with h5py.File(filename, 'r') as f:\n", + " # Loop over modules\n", + " for mKey in f.keys():\n", + " if mKey not in data:\n", + " mod_names.append(mKey)\n", + " data[mKey] = OrderedDict()\n", + " # Loop over constants\n", + " for cKey in f.get(mKey):\n", + " if cKey not in data[mKey]:\n", + " #print(\"/\".join((mKey, cKey, '0', 'data')))\n", + " data[mKey][cKey] = f.get(\n", + " \"/\".join((mKey, cKey, '0', 'data'))).value\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "mod_idx = np.argsort(mod_names)\n", + "\n", + "constants = {'Offset': np.zeros((len(mod_names), 256, 256,512,3)),\n", + " 'Noise': np.zeros((len(mod_names), 256, 256,512,3)),\n", + " 'BadPixelsDark': np.zeros((len(mod_names), 256, 256,512,3))}\n", + "\n", + "for i, idx in enumerate(mod_idx):\n", + " for key, item in constants.items():\n", + " item[i] = data[mod_names[idx]][key] \n", + " \n", + "mod_names = np.array(mod_names)[mod_idx]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import matplotlib.patches as patches\n", + "\n", + "display(Markdown('## Processed modules ##'))\n", + "\n", + "fig, ax = plt.subplots(1, figsize=(10, 10))\n", + "ax.set_axis_off()\n", + "\n", + "ax.set_xlim(0, 97)\n", + "ax.set_ylim(0, 97)\n", + "\n", + "q_poses = np.array([[51, 47], [47, 1], [1, 5], [5, 51]])\n", + "m_poses = np.array([[22.5, 20.5], [22.5, 0.5], [0.5, 0.5], [0.5, 20.5]])\n", + "\n", + "counter = 0\n", + "for iq, q_pos in enumerate(q_poses):\n", + " ax.add_patch(patches.Rectangle(q_pos, 45, 45, linewidth=2, edgecolor='dodgerblue',\n", + " facecolor='y', fill=True))\n", + "\n", + " ax.text(q_pos[0]+20, q_pos[1]+41.5, 'Q{}'.format(iq+1), fontsize=22)\n", + " for im, m_pos in enumerate(m_poses):\n", + " color = 'gray'\n", + " if 'Q{}M{}'.format(iq+1, im+1) in mod_names:\n", + " color = 'green'\n", + " if np.nanmean(constants['Noise'][counter, :, :, :, 0]) == 0:\n", + " color = 'red'\n", + " counter += 1\n", + " ax.add_patch(patches.Rectangle(q_pos+m_pos, 22, 20, linewidth=3, edgecolor='dodgerblue',\n", + " facecolor=color, fill=True))\n", + "\n", + " pos = q_pos+m_pos+np.array([5, 8])\n", + " ax.text(pos[0], pos[1], 'Q{}M{}'.format(\n", + " iq+1, im+1), fontsize=24, color='yellow')\n", + "\n", + "\n", + "_ = ax.legend(handles=[patches.Patch(facecolor='red', label='No data'),\n", + " patches.Patch(facecolor='gray', label='Not processed'),\n", + " patches.Patch(facecolor='green', label='Processed')],\n", + " loc='outside-top', ncol=3, bbox_to_anchor=(0.1, 0.25, 0.7, 0.8))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Summary figures across Modules ##\n", + "\n", + "Plots give an overview of calibration constants averaged across pixels. A bad pixel mask is applied. Constants are averaged across pixels." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "q_pad = 15\n", + "m_pad = 5\n", + "\n", + "m_size = 256\n", + "q_size = m_size*2+m_pad*2\n", + "\n", + "image = np.zeros((m_size*4+q_pad+m_pad*3, m_size*4+q_pad+m_pad*3))\n", + "\n", + "q_poses = [[q_size+q_pad, q_size],\n", + " [q_size, 0],\n", + " [0, q_pad], [q_pad, q_size+q_pad]]\n", + "m_poses = [[m_size+m_pad, m_size+m_pad], [m_size+m_pad, 0], [0, 0],\n", + " [0, m_size+m_pad]]\n", + "\n", + "# Loop over capacitor settings, modules, constants\n", + "for const_name, const in constants.items():\n", + "\n", + " if const_name == 'BadPixelsDark':\n", + " continue\n", + "\n", + " display(Markdown('### {}'.format(const_name)))\n", + " for gain in range(3):\n", + "\n", + " image[:] = np.nan\n", + " counter = 0\n", + " for iq, q_pos in enumerate(q_poses):\n", + " for im, m_pos in enumerate(m_poses):\n", + " if 'Q{}M{}'.format(iq+1, im+1) in mod_names:\n", + " values = np.nanmean(const[counter, :, :, :, gain], axis=2)\n", + " values[values == 0] = np.nan\n", + " #print(iq, im, 'x ', q_pos[1]+m_pos[1], q_pos[1]+m_pos[1]+m_size)\n", + " #print(iq, im, 'y ', q_pos[0]+m_pos[0], q_pos[0]+m_pos[0] + m_size)\n", + " image[q_pos[1]+m_pos[1]: q_pos[1]+m_pos[1]+m_size,\n", + " q_pos[0]+m_pos[0]: q_pos[0]+m_pos[0] + m_size] = values\n", + " counter += 1\n", + " # break\n", + " # break\n", + "\n", + " std = np.nanstd(image)\n", + " mean = np.nanmedian(image)\n", + " if const_name == 'Noise':\n", + " std=mean/4.\n", + " _ = heatmapPlot(image, add_panels=False, figsize=(20, 20),\n", + " vmin=mean-std*2, vmax=mean+std*2,\n", + " x_label='columns', y_label='rows',\n", + " cb_label='{}, mean over memory cells [ADU]'.format(\n", + " const_name),\n", + " cmap='viridis',\n", + " title='{}. {} gain'.format(const_name, gain_names[gain]))\n", + " plt.show()\n", + " #break" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false, + "scrolled": true + }, + "outputs": [], + "source": [ + "# plt.rcParams.update({'font.size': 14})\n", + "\n", + "# Loop over capacitor settings, modules, constants\n", + "for const_name, const in constants.items():\n", + "\n", + " display(Markdown('### Summary across Modules - {}.'.format(const_name)))\n", + " for gain in range(3):\n", + " data = np.copy(const[:, :, :, :, gain])\n", + "\n", + " if const_name != 'BadPixelsDark':\n", + " label = '{} value [ADU]. Good pixels only.'.format(const_name)\n", + " data[constants['BadPixelsDark'][:, :, :, :, gain] > 0] = np.nan\n", + " datamean = np.nanmean(data, axis=(1, 2))\n", + "\n", + " fig = plt.figure(figsize=(15, 6), tight_layout={\n", + " 'pad': 0.2, 'w_pad': 1.3, 'h_pad': 1.3})\n", + " ax = fig.add_subplot(121)\n", + " else:\n", + " label = 'Fraction of bad pixels'\n", + " data[data > 0] = 1.0\n", + " datamean = np.nanmean(data, axis=(1, 2))\n", + " datamean[datamean == 1.0] = np.nan\n", + "\n", + " fig = plt.figure(figsize=(15, 6), tight_layout={\n", + " 'pad': 0.2, 'w_pad': 1.3, 'h_pad': 1.3})\n", + " ax = fig.add_subplot(111)\n", + "\n", + " '''\n", + " _ = heatmapPlot(datamean, \n", + " add_panels=False,\n", + " y_label='Module ID', x_label='Memory Cell ID',\n", + " lut_label=label, use_axis=ax,\n", + " panel_y_label=label, panel_x_label=label,\n", + " cmap='viridis',\n", + " y_ticklabels=mod_names,\n", + " y_ticks=np.arange(len(mod_names))+0.5,\n", + " title = '{} gain'.format(gain_names[gain])\n", + " )\n", + " \n", + " if const_name != 'BadPixelsDark':\n", + " ax = fig.add_subplot(122)\n", + " label = '$\\sigma$ {} [ADU]'.format(const_name)\n", + " _ = heatmapPlot(np.nanstd(data, axis=(1, 2)), \n", + " add_panels=False,\n", + " y_label='Module ID', x_label='Memory Cell ID',\n", + " lut_label=label, use_axis=ax,\n", + " panel_y_label=label, panel_x_label=label,\n", + " cmap='viridis',\n", + " y_ticklabels=mod_names,\n", + " y_ticks=np.arange(len(mod_names))+0.5,\n", + " title = '{} gain'.format(gain_names[gain])\n", + " )\n", + " '''\n", + " d = []\n", + " for im, mod in enumerate(datamean):\n", + " d.append({'x': np.arange(mod.shape[0]),\n", + " 'y': mod,\n", + " 'drawstyle': 'steps-pre',\n", + " 'label': mod_names[im],\n", + " })\n", + "\n", + " _ = xana.simplePlot(d, figsize=(10, 10), xrange=(-12, 510),\n", + " x_label='Memory Cell ID',\n", + " y_label=label,\n", + " use_axis=ax,\n", + " title='{} gain'.format(gain_names[gain]),\n", + " title_position=[0.5, 1.18],\n", + " legend='outside-top-ncol6-frame', legend_size='18%',\n", + " legend_pad=0.00)\n", + "\n", + " if const_name != 'BadPixelsDark':\n", + " ax = fig.add_subplot(122)\n", + " label = '$\\sigma$ {} [ADU]. Good pixels only.'.format(const_name)\n", + " d = []\n", + " for im, mod in enumerate(np.nanstd(data, axis=(1, 2))):\n", + " d.append({'x': np.arange(mod.shape[0]),\n", + " 'y': mod,\n", + " 'drawstyle': 'steps-pre',\n", + " 'label': mod_names[im],\n", + " })\n", + "\n", + " _ = xana.simplePlot(d, figsize=(10, 10), xrange=(-12, 510),\n", + " x_label='Memory Cell ID',\n", + " y_label=label,\n", + " use_axis=ax,\n", + " title='{} gain'.format(gain_names[gain]),\n", + " title_position=[0.5, 1.18],\n", + " legend='outside-top-ncol6-frame', legend_size='18%',\n", + " legend_pad=0.00)\n", + "\n", + " plt.show()\n", + " # break" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Summary tables across Modules ##\n", + "\n", + "Tables show values averaged across all pixels and memory cells of a given detector module." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "if u'$' in tabulate.LATEX_ESCAPE_RULES:\n", + " del(tabulate.LATEX_ESCAPE_RULES[u'$'])\n", + " \n", + "if u'\\\\' in tabulate.LATEX_ESCAPE_RULES:\n", + " del(tabulate.LATEX_ESCAPE_RULES[u'\\\\'])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "header = ['Module', 'High gain', 'Medium gain', 'Low gain']\n", + "\n", + "for const_name, const in constants.items():\n", + " table = []\n", + "\n", + " for i_mod, mod in enumerate(mod_names):\n", + "\n", + " t_line = [mod]\n", + " for gain in range(3):\n", + "\n", + " data = np.copy(const[i_mod, :, :, :, gain])\n", + " if const_name == 'BadPixelsDark':\n", + " data[data > 0] = 1.0\n", + " datasum = np.nansum(data)\n", + " datamean = np.nanmean(data)\n", + " if datamean == 1.0:\n", + " datamean = np.nan\n", + " datasum = np.nan\n", + "\n", + " t_line.append('{:6.0f} ({:6.3f}) '.format(\n", + " datasum, datamean))\n", + " \n", + " label = '## Number (fraction) of bad pixels.'\n", + " else:\n", + "\n", + " data[constants['BadPixelsDark']\n", + " [i_mod, :, :, :, gain] > 0] = np.nan\n", + "\n", + " t_line.append('{:6.1f} $\\\\pm$ {:6.1f}'.format(\n", + " np.nanmean(data), np.nanstd(data)))\n", + " \n", + " label = '## Average {} [ADU]. Good pixels only. ##'.format(const_name)\n", + " \n", + " \n", + " table.append(t_line)\n", + "\n", + " display(Markdown(label))\n", + " md = display(Latex(tabulate.tabulate(table, tablefmt='latex', headers=header)))" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.4.3" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} -- GitLab