diff --git a/bin/activate.sh b/bin/activate.sh index 9584c3b78f61ec4a2c565e277aee4dafae1ee9de..8c8a0500e9d400a6094aaf05192ea40fbc5bde96 100644 --- a/bin/activate.sh +++ b/bin/activate.sh @@ -1,5 +1,5 @@ source /etc/profile.d/modules.sh module load anaconda/3 -module load texlive +module load texlive/2019 # export path to python environment -export PATH=/home/${USER}/.local/bin:$PATH +export PATH=$HOME/.local/bin:$PATH diff --git a/notebooks/AGIPD/playground/AGIPD_SingleM_test_Dark.ipynb b/notebooks/AGIPD/playground/AGIPD_SingleM_test_Dark.ipynb index eeffc84fef001676ced64ac749c5573c19174daa..2f76d945bd9d9fff6ba63efad205508433742604 100644 --- a/notebooks/AGIPD/playground/AGIPD_SingleM_test_Dark.ipynb +++ b/notebooks/AGIPD/playground/AGIPD_SingleM_test_Dark.ipynb @@ -20,13 +20,13 @@ "# Inputs exposed to xfel-calibrate package should be in this first cell.\n", "\n", "# Parameters for accessing files.\n", - "in_folder = \"/gpfs/exfel/exp/SPB/202030/p900138/raw\" # path to input data, required\n", - "out_folder = \"/gpfs/exfel/data/scratch/ahmedk/test/SPB/AGIPD/DARK/202030/p900138/\" # path to output to, required\n", + "in_folder = \"/gpfs/exfel/exp/SPB/202031/p900146/raw\" # path to input data, required\n", + "out_folder = \"/gpfs/exfel/data/scratch/ahmedk/test/SPB2\" # path to output to, required\n", "sequences = [0] # sequence files to evaluate.\n", "\n", - "run_high = 33 # run number in which high gain data was recorded, required\n", - "run_med = 34 # run number in which medium gain data was recorded, required\n", - "run_low = 35 # run number in which low gain data was recorded, required\n", + "run_high = 67 # run number in which high gain data was recorded, required\n", + "run_med = 68 # run number in which medium gain data was recorded, required\n", + "run_low = 69 # run number in which low gain data was recorded, required\n", "\n", "local_output = True # output constants locally\n", "db_output = False # output constants to database\n", @@ -38,8 +38,12 @@ "module_name = 'AGIPD64K'\n", "\n", "channel = 16\n", + "karabo_da = [\"AGIPD16\"]\n", "\n", "path_template = \"RAW-R{:04d}-{}-S{:05d}\"\n", + "karabo_id_control = \"SPB_IRU_AGIPD1M1\" # karabo-id for control device '\n", + "karabo_da_control = \"AGIPD1MCTRL01\" # karabo DA for control infromation\n", + "h5path_ctrl = '/CONTROL/{}/MDL/FPGA_COMP' # path to control information\n", "\n", "# Parameters for taking dark.\n", "mem_cells = 0 # number of memory cells used, set to 0 to automatically infer\n", @@ -49,11 +53,19 @@ "\n", "dont_use_dir_date = False # don't use the dir creation date for determining the creation time\n", "\n", - "thresholds_offset_sigma = 3. # thresholds in terms of n sigma noise for offset deduced bad pixels\n", - "thresholds_offset_hard = [4000, 8500] # thresholds in absolute ADU terms for offset deduced bad pixels\n", + "thresholds_offset_sigma = 3. # offset sigma thresholds for offset deduced bad pixels\n", + "thresholds_offset_hard = [0, 0] # For setting the same threshold offset for the 3 gains. Left for backcompatability. Default [0, 0] to take the following parameters.\n", + "thresholds_offset_hard_hg = [3000, 7000] # High-gain thresholds in absolute ADU terms for offset deduced bad pixels\n", + "thresholds_offset_hard_mg = [6000, 10000] # Medium-gain thresholds in absolute ADU terms for offset deduced bad pixels\n", + "thresholds_offset_hard_lg = [6000, 10000] # Low-gain thresholds in absolute ADU terms for offset deduced bad pixels\n", "\n", - "thresholds_noise_sigma = 5. # thresholds in terms of n sigma noise for offset deduced bad pixels\n", - "thresholds_noise_hard = [4, 20] # thresholds in absolute ADU terms for offset deduced bad pixels\n", + "thresholds_noise_sigma = 5. # noise sigma thresholds for offset deduced bad pixels\n", + "thresholds_noise_hard = [0, 0] # For setting the same threshold noise for the 3 gains. Left for backcompatability. Default [0, 0] to take the following parameters.\n", + "thresholds_noise_hard_hg = [4, 20] # High-gain thresholds in absolute ADU terms for offset deduced bad pixels\n", + "thresholds_noise_hard_mg = [4, 20] # Medium-gain thresholds in absolute ADU terms for offset deduced bad pixels\n", + "thresholds_noise_hard_lg = [4, 20] # Low-gain thresholds in absolute ADU terms for offset deduced bad pixels\n", + "\n", + "thresholds_gain_sigma = 5. # Gain separation sigma threshold\n", "\n", "# Plotting parameters\n", "high_res_badpix_3d = False # set this to True if you need high-resolution 3d bad pixel plots. Runtime: ~ 1h\n", @@ -76,11 +88,16 @@ "import os\n", "import h5py\n", "import numpy as np\n", + "import dateutil.parser\n", + "\n", "import traceback\n", "import matplotlib\n", "matplotlib.use('agg')\n", "import matplotlib.pyplot as plt\n", "%matplotlib inline\n", + "from IPython.display import display, Markdown, Latex\n", + "import tabulate\n", + "\n", "\n", "from cal_tools.tools import (gain_map_files, parse_runs, \n", " run_prop_seq_from_path, get_notebook_name, \n", @@ -97,6 +114,7 @@ "view.use_dill()\n", "\n", "from iCalibrationDB import ConstantMetaData, Constants, Conditions, Detectors, Versions\n", + "from cal_tools.agipdlib import get_gain_setting\n", "\n", "gains = np.arange(3)\n", "\n", @@ -107,6 +125,9 @@ "offset_runs[\"med\"] = parse_runs(run_med)[0]\n", "offset_runs[\"low\"] = parse_runs(run_low)[0]\n", "\n", + "gain_names = ['High', 'Medium', 'Low']\n", + "runs = [run_high, run_med, run_low]\n", + "\n", "creation_time=None\n", "if not dont_use_dir_date:\n", " creation_time = get_dir_creation_date(in_folder, run_high)\n", @@ -116,21 +137,41 @@ "logger = InfluxLogger(detector=\"AGIPD\", instrument=instrument, mem_cells=mem_cells,\n", " notebook=get_notebook_name(), proposal=prop)\n", "\n", - "print(\"Using {} as creation time of constant.\".format(creation_time))\n", + "print(f\"Using {creation_time} as creation time of constant.\")\n", "\n", "cal_db_interface = get_random_db_interface(cal_db_interface)\n", - "print('Calibration database interface: {}'.format(cal_db_interface))\n", + "print(f'Calibration database interface: {cal_db_interface}')\n", "\n", "# Same used for testing(Temporary) the Single Module\n", "loc = \"SPB_DET_AGIPD1M-1\"\n", "\n", "# Same used for testing(Temporary) the Single Module\n", "dinstance = \"AGIPD1M1\"\n", - "print(\"Detector in use is {}\".format(loc))\n", - "\n", + "print(f\"Detector in use is {loc}\")\n", + "if \"{\" in h5path_ctrl:\n", + " h5path_ctrl = h5path_ctrl.format(karabo_id_control)\n", + "print(h5path_ctrl)\n", "# Convert gain-setting in case of still being 0.1\n", "if gain_setting == 0.1:\n", - " gain_setting = None" + " if creation_time.replace(tzinfo=None) < dateutil.parser.parse('2020-01-31'):\n", + " print(\"Set gain-setting to None for runs taken before 2020-01-31\")\n", + " gain_setting = None\n", + " else:\n", + " try:\n", + " # extract gain setting and validate that all runs have the same setting\n", + " gsettings = []\n", + " for r in runs:\n", + " control_fname = f'{in_folder}/r{r:04d}/RAW-R{r:04d}-{karabo_da_control}-S00000.h5'\n", + " gsettings.append(get_gain_setting(control_fname, h5path_ctrl))\n", + " if not all(g == gsettings[0] for g in gsettings):\n", + " raise ValueError(f\"Different gain settings for the 3 input runs {gsettings}\")\n", + " gain_setting = gsettings[0] \n", + " except Exception as e:\n", + " print(f'Error while reading gain setting from: \\n{control_fname}')\n", + " print(f'Error: {e}')\n", + " if \"component not found\" in str(e):\n", + " print(\"Gain setting is not found in the control information\")\n", + " gain_setting = None" ] }, { @@ -165,8 +206,7 @@ "outputs": [], "source": [ "# set everything up filewise\n", - "if not os.path.exists(out_folder):\n", - " os.makedirs(out_folder)\n", + "os.makedirs(out_folder, exist_ok=True)\n", "\n", "path_inset = \"AGIPD{}\".format(channel)\n", "raw_files = []\n", @@ -185,9 +225,9 @@ "if len(raw_files) < 1:\n", " print(\"WARNING: NO FILES TO CREATE THE DARK!\")\n", "else:\n", - " total_file_size = total_file_size / 1e9\n", + " total_file_size = total_file_size/1e9\n", " total_sequences = len(raw_files)\n", - " print(\"The total size of the processed data: {}GB\".format(total_file_size))" + " print(f\"The total size of the processed data: {total_file_size}GB\")" ] }, { @@ -207,7 +247,7 @@ "source": [ "import copy\n", "from functools import partial\n", - "def characterize_module(cells, bp_thresh, loc, acq_rate, inp):\n", + "def characterize_module(cells, bp_thresh, loc, acq_rate, channel, inp):\n", " import numpy as np\n", " import copy\n", " import h5py\n", @@ -215,7 +255,7 @@ " from cal_tools.enums import BadPixels\n", " from cal_tools.agipdlib import get_num_cells, get_acq_rate\n", " \n", - " filename, filename_out, channel = inp\n", + " filename, gg = inp\n", " \n", " if cells == 0:\n", " cells = get_num_cells(filename, loc, channel)\n", @@ -223,15 +263,20 @@ " if acq_rate == 0.:\n", " acq_rate = get_acq_rate(filename, loc, channel)\n", " \n", - " thresholds_offset_hard, thresholds_offset_sigma, thresholds_noise_hard, thresholds_noise_sigma = bp_thresh \n", + " thresholds_offset, thresholds_offset_sigma, thresholds_noise, thresholds_noise_sigma = bp_thresh \n", + " thresholds_offset_hard = thresholds_offset[gg]\n", + " thresholds_noise_hard = thresholds_noise[gg]\n", + " \n", " infile = h5py.File(filename, \"r\", driver=\"core\")\n", - " count = np.squeeze(infile[\"/INDEX/{}/DET/{}CH0:xtdf/image/count\".format(loc, channel)])\n", - " first = np.squeeze(infile[\"/INDEX/{}/DET/{}CH0:xtdf/image/first\".format(loc, channel)])\n", + " count = np.squeeze(infile[f\"/INDEX/{loc}/DET/{channel}CH0:xtdf/image/count\"])\n", + " first = np.squeeze(infile[f\"/INDEX/{loc}/DET/{channel}CH0:xtdf/image/first\"])\n", + "\n", " last_index = int(first[count != 0][-1]+count[count != 0][-1])\n", " first_index = int(first[count != 0][0])\n", - " im = np.array(infile[\"/INSTRUMENT/{}/DET/{}CH0:xtdf/image/data\".format(loc, channel)][first_index:last_index,...]) \n", - " cellIds = np.squeeze(infile[\"/INSTRUMENT/{}/DET/{}CH0:xtdf/image/cellId\".format(loc, channel)][first_index:last_index,...]) \n", - " \n", + "\n", + " im = np.array(infile[f\"/INSTRUMENT/{loc}/DET/{channel}CH0:xtdf/image/data\"][first_index:last_index,...]) \n", + " cellIds = np.squeeze(infile[f\"/INSTRUMENT/{loc}/DET/{channel}CH0:xtdf/image/cellId\"][first_index:last_index,...]) \n", + "\n", " infile.close()\n", "\n", " ga = im[:, 1, ...]\n", @@ -246,6 +291,7 @@ " mcells = cells\n", " offset = np.zeros((im.shape[0], im.shape[1], mcells))\n", " gains = np.zeros((im.shape[0], im.shape[1], mcells))\n", + " gains_std = np.zeros((im.shape[0], im.shape[1], mcells))\n", " noise = np.zeros((im.shape[0], im.shape[1], mcells))\n", " \n", " for cc in np.unique(cellIds[cellIds < mcells]):\n", @@ -253,6 +299,7 @@ " offset[...,cc] = np.median(im[..., cellidx], axis=2)\n", " noise[...,cc] = np.std(im[..., cellidx], axis=2)\n", " gains[...,cc] = np.median(ga[..., cellidx], axis=2)\n", + " gains_std[...,cc] = np.std(ga[..., cellidx], axis=2)\n", " \n", " # bad pixels\n", " bp = np.zeros(offset.shape, np.uint32)\n", @@ -275,61 +322,80 @@ " bp[(noise < thresholds_noise_hard[0]) | (noise > thresholds_noise_hard[1])] |= BadPixels.NOISE_OUT_OF_THRESHOLD.value\n", " bp[~np.isfinite(noise)] |= BadPixels.OFFSET_NOISE_EVAL_ERROR.value\n", "\n", - "\n", - " return offset, noise, gains, bp, cells, acq_rate\n", - "\n", - "gg = 0\n", + " return offset, noise, gains, gains_std, gg, bp, cells, acq_rate\n", "\n", "start = datetime.now()\n", "all_cells = []\n", "all_acq_rate = []\n", "\n", - "for gain, fname_in in enumerate(raw_files):\n", + "if thresholds_offset_hard == [0, 0]:\n", + " thresholds_offset_hard = [thresholds_offset_hard_hg, thresholds_offset_hard_mg, thresholds_offset_hard_lg]\n", + "else:\n", + " thresholds_offset_hard = [thresholds_offset_hard] * 3\n", "\n", - " inp = []\n", + "if thresholds_noise_hard == [0, 0]:\n", + " thresholds_noise_hard = [thresholds_noise_hard_hg, thresholds_noise_hard_mg, thresholds_noise_hard_lg]\n", + "else:\n", + " thresholds_noise_hard = [thresholds_noise_hard] * 3\n", "\n", - " #replace RAW with CORR in .hf5 file name.\n", - " fout = os.path.abspath(\"{}/{}\".format(out_folder, (os.path.split(fname_in)[-1]).replace(\"RAW\", \"CORR\")))\n", - " inp.append((fname_in, fout, channel))\n", + "inp = []\n", "\n", - " p = partial(characterize_module, max_cells,\n", - " (thresholds_offset_hard, thresholds_offset_sigma,\n", - " thresholds_noise_hard, thresholds_noise_sigma), loc, acq_rate)\n", + "for gain, fname_in in enumerate(raw_files):\n", "\n", - " results = list(map(p, inp))\n", - " #results = view.map_sync(p, inp)\n", + " \n", + " inp.append((fname_in, gain))\n", "\n", - " for ii, r in enumerate(results):\n", - " i = 0\n", - " offset, noise, gain, bp, thiscell, thisacq = r\n", - " all_cells.append(thiscell)\n", - " all_acq_rate.append(thisacq)\n", - " \n", - " # only at the first gain.\n", - " if gg == 0:\n", - " offset_g = np.zeros((offset.shape[0], offset.shape[1], offset.shape[2], 3))\n", - " noise_g = np.zeros_like(offset_g)\n", - " gain_g = np.zeros_like(offset_g)\n", - " badpix_g = np.zeros_like(offset_g, np.uint32)\n", - " first = False\n", - "\n", - " offset_g[...,gg] = offset\n", - " noise_g[...,gg] = noise\n", - " gain_g[...,gg] = gain\n", - " badpix_g[...,gg] = bp\n", - " gg +=1\n", + " p = partial(characterize_module, max_cells,\n", + " (thresholds_offset_hard, thresholds_offset_sigma,\n", + " thresholds_noise_hard, thresholds_noise_sigma),\n", + " loc, acq_rate, channel)\n", + "\n", + "#results = list(map(p, inp))\n", + "results = view.map_sync(p, inp)\n", + "\n", + "for ii, r in enumerate(results):\n", + " offset, noise, gain, gains_std, gg, bp, thiscell, thisacq = r\n", + " all_cells.append(thiscell)\n", + " all_acq_rate.append(thisacq)\n", + "\n", + " if ii == 0:\n", + " offset_g = np.zeros((offset.shape[0], offset.shape[1], offset.shape[2], 3))\n", + " noise_g = np.zeros_like(offset_g)\n", + " gain_g = np.zeros_like(offset_g)\n", + " gainstd_g = np.zeros_like(offset_g)\n", + " badpix_g = np.zeros_like(offset_g, np.uint32)\n", + "\n", + " offset_g[...,gg] = offset\n", + " noise_g[...,gg] = noise\n", + " gain_g[...,gg] = gain\n", + " gainstd_g[..., gg] = gains_std\n", + " badpix_g[...,gg] = bp\n", "\n", "duration = (datetime.now()-start).total_seconds()\n", "logger.runtime_summary_entry(success=True, runtime=duration,\n", - " total_sequences=total_sequences,\n", - " filesize=total_file_size)\n", + " total_sequences=total_sequences,\n", + " filesize=total_file_size)\n", "logger.send()\n", "\n", "max_cells = np.max(all_cells)\n", - "print(\"Using {} memory cells\".format(max_cells))\n", + "print(f\"Using {max_cells} memory cells\")\n", "\n", "acq_rate = np.max(all_acq_rate)\n", - "print(\"Using {} MHz acquisition rate\".format(acq_rate))" + "print(f\"Using {acq_rate} MHz acquisition rate\".format())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Add a badpixel due to bad gain separation\n", + "for g in range(2):\n", + " # Bad pixels during bad gain separation.\n", + " # Fraction of pixels in the module with separation lower than \"thresholds_gain_sigma\".\n", + " bad_sep = (gain_g[..., g+1] - gain_g[..., g]) / np.sqrt(gainstd_g[..., g+1]**2 + gainstd_g[..., g]**2)\n", + " badpix_g[...,g+1][(bad_sep)<thresholds_gain_sigma]|= BadPixels.GAIN_THRESHOLDING_ERROR.value" ] }, { @@ -362,20 +428,18 @@ "outputs": [], "source": [ "res = OrderedDict()\n", - "\n", "res = {'Offset': offset_g,\n", " 'Noise': noise_g,\n", " 'ThresholdsDark': thresholds_g,\n", - " 'BadPixelsDark': badpix_g \n", - " }\n", + " 'BadPixelsDark': badpix_g}\n", "\n", "if local_output:\n", - " ofile = \"{}/agipd_offset_store_{}_{}.h5\".format(out_folder, \"_\".join(offset_runs.values()), module_name)\n", + " ofile = f\"{out_folder}/agipd_offset_store_{'_'.join(offset_runs.values())}_{module_name}.h5\"\n", " store_file = h5py.File(ofile, \"w\")\n", - " store_file[\"{}/Offset/0/data\".format(module_name)] = offset_g\n", - " store_file[\"{}/Noise/0/data\".format(module_name)] = noise_g\n", - " store_file[\"{}/Threshold/0/data\".format(module_name)] = thresholds_g\n", - " store_file[\"{}/BadPixels/0/data\".format(module_name)] = badpix_g\n", + " store_file[f\"{module_name}/Offset/0/data\"] = offset_g\n", + " store_file[f\"{module_name}/Noise/0/data\"] = noise_g\n", + " store_file[f\"{module_name}/Threshold/0/data\"] = thresholds_g\n", + " store_file[f\"{module_name}/BadPixels/0/data\"] = badpix_g\n", " store_file.close()" ] }, @@ -386,7 +450,7 @@ "outputs": [], "source": [ "proposal = list(filter(None, in_folder.strip('/').split('/')))[-2]\n", - "file_loc = 'proposal:{} runs:{} {} {}'.format(proposal, run_low, run_med, run_high)" + "file_loc = f\"proposal:{proposal} runs:{run_low} {run_med} {run_high}\"" ] }, { @@ -406,7 +470,6 @@ " bias_voltage=bias_voltage,\n", " acquisition_rate=acq_rate,\n", " gain_setting=gain_setting)\n", - " detinst = getattr(Detectors, dinstance)\n", "\n", " # AGIPD_SIV1_AGIPDV11_M001Test\n", " device = Detectors.AGIPD.AGIPD_SIV1_AGIPDV11_M001Test\n", @@ -459,12 +522,11 @@ "source": [ "cell = 3\n", "gain = 0\n", - "out_folder = None\n", - "# attach module name for plotting.\n", + "\n", "res_da = {}\n", "res_da[module_name] = res\n", "\n", - "show_overview(res_da, cell, gain, out_folder=out_folder, infix=\"_\".join(offset_runs.values()))" + "show_overview(res_da, cell, gain, out_folder=None, infix=\"_\".join(offset_runs.values()))" ] }, { @@ -482,7 +544,7 @@ "source": [ "cell = 3\n", "gain = 1\n", - "show_overview(res_da, cell, gain, out_folder=out_folder, infix=\"_\".join(offset_runs.values()))" + "show_overview(res_da, cell, gain, out_folder=None, infix=\"_\".join(offset_runs.values()))" ] }, { @@ -500,7 +562,7 @@ "source": [ "cell = 3\n", "gain = 2\n", - "show_overview(res_da, cell, gain, out_folder=out_folder, infix=\"_\".join(offset_runs.values()))" + "show_overview(res_da, cell, gain, out_folder=None, infix=\"_\".join(offset_runs.values()))" ] }, { @@ -512,13 +574,6 @@ "The following plots show the results of bad pixel evaluation for all evaluated memory cells. Cells are stacked in the Z-dimension, while pixels values in x/y are rebinned with a factor of 2. This excludes single bad pixels present only in disconnected pixels. Hence, any bad pixels spanning at least 4 pixels in the x/y-plane, or across at least two memory cells are indicated. Colors encode the bad pixel type, or mixed type." ] }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### High Gain ###" - ] - }, { "cell_type": "code", "execution_count": null, @@ -528,22 +583,39 @@ "cols = {BadPixels.NOISE_OUT_OF_THRESHOLD.value: (BadPixels.NOISE_OUT_OF_THRESHOLD.name, '#FF000080'),\n", " BadPixels.OFFSET_NOISE_EVAL_ERROR.value: (BadPixels.OFFSET_NOISE_EVAL_ERROR.name, '#0000FF80'),\n", " BadPixels.OFFSET_OUT_OF_THRESHOLD.value: (BadPixels.OFFSET_OUT_OF_THRESHOLD.name, '#00FF0080'),\n", - " BadPixels.OFFSET_OUT_OF_THRESHOLD.value | BadPixels.NOISE_OUT_OF_THRESHOLD.value: ('MIXED', '#DD00DD80')}\n", + " BadPixels.GAIN_THRESHOLDING_ERROR.value: (BadPixels.GAIN_THRESHOLDING_ERROR.name, '#FF40FF40'),\n", + " BadPixels.OFFSET_OUT_OF_THRESHOLD.value | BadPixels.NOISE_OUT_OF_THRESHOLD.value: ('OFFSET_OUT_OF_THRESHOLD + NOISE_OUT_OF_THRESHOLD', '#DD00DD80'),\n", + " BadPixels.OFFSET_OUT_OF_THRESHOLD.value | BadPixels.NOISE_OUT_OF_THRESHOLD.value | \n", + " BadPixels.GAIN_THRESHOLDING_ERROR.value: ('MIXED', '#BFDF009F')}\n", "\n", - "rebin = 8 if not high_res_badpix_3d else 2\n", + "if high_res_badpix_3d:\n", + " display(Markdown(\"\"\"\n", + " \n", + " ## Global Bad Pixel Behaviour ##\n", "\n", - "gain = 0\n", - "badpix_g_da = {}\n", - "badpix_g_da[module_name] = badpix_g\n", - "for mod, data in badpix_g_da.items():\n", - " plot_badpix_3d(data[...,gain], cols, title=mod, rebin_fac=rebin)" + " The following plots show the results of bad pixel evaluation for all evaluated memory cells. \n", + " Cells are stacked in the Z-dimension, while pixels values in x/y are rebinned with a factor of 2. \n", + " This excludes single bad pixels present only in disconnected pixels. \n", + " Hence, any bad pixels spanning at least 4 pixels in the x/y-plane, or across at least two memory cells are indicated. \n", + " Colors encode the bad pixel type, or mixed type.\n", + "\n", + " \"\"\"))\n", + "\n", + " gnames = ['High Gain', 'Medium Gain', 'Low Gain']\n", + " for gain in range(3):\n", + " display(Markdown(f'### {gnames[gain]} ###'))\n", + " for mod, data in badpix_g.items():\n", + " plot_badpix_3d(data[...,gain], cols, title=mod, rebin_fac=1)\n", + " plt.show()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### Medium Gain ###" + "## Aggregate values, and per Cell behaviour ##\n", + "\n", + "The following tables and plots give an overview of statistical aggregates for each constant, as well as per cell behavior." ] }, { @@ -552,16 +624,12 @@ "metadata": {}, "outputs": [], "source": [ - "gain = 1\n", - "for mod, data in badpix_g_da.items():\n", - " plot_badpix_3d(data[...,gain], cols, title=mod, rebin_fac=rebin)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Low Gain ###" + "offset_g_dict = {}\n", + "badpix_g_dict = {}\n", + "offset_g_dict[module_name] = offset_g\n", + "badpix_g_dict[module_name] = badpix_g\n", + "create_constant_overview(offset_g_dict, \"Offset (ADU)\", max_cells, 4000, 8000,\n", + " badpixels=[badpix_g_dict, np.nan])" ] }, { @@ -570,18 +638,11 @@ "metadata": {}, "outputs": [], "source": [ - "gain = 2\n", - "for mod, data in badpix_g_da.items():\n", - " plot_badpix_3d(data[...,gain], cols, title=mod, rebin_fac=rebin)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Aggregate values, and per Cell behaviour ##\n", + "noise_g_dict = {}\n", + "noise_g_dict[module_name] = noise_g\n", "\n", - "The following tables and plots give an overview of statistical aggregates for each constant, as well as per cell behavior." + "create_constant_overview(noise_g_dict, \"Offset (ADU)\", max_cells, 0, 100,\n", + " badpixels=[badpix_g_dict, np.nan])" ] }, { @@ -590,11 +651,22 @@ "metadata": {}, "outputs": [], "source": [ - "offset_g_da = {}\n", - "offset_g_da[module_name] = offset_g\n", + "thresholds_g_dict = {}\n", + "thresholds_g_dict[module_name] = thresholds_g\n", "\n", - "create_constant_overview(offset_g_da, \"Offset (ADU)\", max_cells, 4000, 8000,\n", - " out_folder=out_folder, infix=\"_\".join(offset_runs.values()))" + "# Plot only three gain threshold maps.\n", + "bp_thresh = OrderedDict()\n", + "for mod, con in badpix_g_dict.items():\n", + " bp_thresh[mod] = np.zeros((con.shape[0], con.shape[1], con.shape[2], 5), dtype=con.dtype)\n", + " bp_thresh[mod][...,:2] = con[...,:2]\n", + " bp_thresh[mod][...,2:] = con\n", + "\n", + "\n", + "create_constant_overview(thresholds_g_dict, \"Threshold (ADU)\", max_cells, 3000, 10000, 5,\n", + " badpixels=[bp_thresh, np.nan],\n", + " gmap=['HG-MG Threshold', 'MG-LG Threshold', 'High gain', 'Medium gain', 'low gain'],\n", + " marker=['d','d','','','']\n", + " )" ] }, { @@ -603,10 +675,11 @@ "metadata": {}, "outputs": [], "source": [ - "noise_g_da = {}\n", - "noise_g_da[module_name] = noise_g\n", - "create_constant_overview(noise_g_da, \"Noise (ADU)\", max_cells, 0, 100,\n", - " out_folder=out_folder, infix=\"_\".join(offset_runs.values()))" + "bad_pixel_aggregate_g = OrderedDict()\n", + "\n", + "for m, d in badpix_g_dict.items():\n", + " bad_pixel_aggregate_g[m] = d.astype(np.bool).astype(np.float)\n", + "create_constant_overview(bad_pixel_aggregate_g, \"Bad pixel fraction\", max_cells, 0, 0.10, 3)" ] }, { @@ -615,48 +688,176 @@ "metadata": {}, "outputs": [], "source": [ - "thresholds_g_da = {}\n", - "thresholds_g_da[module_name] = thresholds_g\n", - "create_constant_overview(thresholds_g_da, \"Threshold (ADU)\", max_cells, 3000, 8000, 2,\n", - " out_folder=out_folder, infix=\"_\".join(offset_runs.values()))" + "# Retrieve existing constants for comparison\n", + "clist = [\"Offset\", \"Noise\", \"ThresholdsDark\", \"BadPixelsDark\"]\n", + "old_const = {}\n", + "old_mdata = {}\n", + "\n", + "print('Retrieve pre-existing constants for comparison.')\n", + "\n", + "for const in res:\n", + " metadata = ConstantMetaData()\n", + " dconst = getattr(Constants.AGIPD, const)()\n", + " dconst.data = res[const]\n", + " metadata.calibration_constant = dconst\n", + "\n", + " # Setting conditions\n", + " condition = Conditions.Dark.AGIPD(memory_cells=max_cells,\n", + " bias_voltage=bias_voltage,\n", + " acquisition_rate=acq_rate,\n", + " gain_setting=gain_setting)\n", + "\n", + " metadata.detector_condition = condition\n", + "\n", + " # specify the a version for this constant\n", + " if creation_time is None:\n", + " metadata.calibration_constant_version = Versions.Now(device=device)\n", + " else:\n", + " metadata.calibration_constant_version = Versions.Timespan(device=device,\n", + " start=creation_time)\n", + "\n", + " metadata.retrieve(cal_db_interface, timeout=cal_db_timeout)\n", + "\n", + " old_const[const] = metadata.calibration_constant.data\n", + "\n", + " if metadata.comm_db_success:\n", + " time = metadata.calibration_constant_version.begin_at\n", + " old_mdata[const] = time.isoformat()\n", + " os.makedirs(os.path.join(f'{out_folder}','old/'), exist_ok=True)\n", + " save_const_to_h5(metadata, os.path.join(f'{out_folder}','old/'))\n", + " else:\n", + " old_mdata[const] = \"Not found\"\n" ] }, { - "cell_type": "code", - "execution_count": null, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "bad_pixel_aggregate_g = OrderedDict()\n", + "## Summary tables ##\n", "\n", - "for m, d in badpix_g_da.items():\n", - " bad_pixel_aggregate_g[m] = d.astype(np.bool).astype(np.float)\n", - "create_constant_overview(bad_pixel_aggregate_g, \"Bad pixel fraction\", max_cells, 0, 0.10, 3,\n", - " out_folder=out_folder, infix=\"_\".join(offset_runs.values()))" + "The following tables show summary information for the evaluated module. Values for currently evaluated constants are compared with values for pre-existing constants retrieved from the calibration database." ] }, { "cell_type": "code", "execution_count": null, - "metadata": { - "scrolled": true - }, + "metadata": {}, "outputs": [], - "source": [] + "source": [ + "table = []\n", + "gain_names = ['High', 'Medium', 'Low']\n", + "bits = [BadPixels.NOISE_OUT_OF_THRESHOLD, BadPixels.OFFSET_OUT_OF_THRESHOLD, BadPixels.OFFSET_NOISE_EVAL_ERROR, BadPixels.GAIN_THRESHOLDING_ERROR]\n", + "for gain in range(3):\n", + "\n", + " l_data = []\n", + " l_data_old = []\n", + "\n", + " data = np.copy(badpix_g[:,:,:,gain])\n", + " datau32 = data.astype(np.uint32)\n", + " l_data.append(len(datau32[datau32>0].flatten()))\n", + " for bit in bits:\n", + " l_data.append(np.count_nonzero(badpix_g[:,:,:,gain] & bit.value))\n", + "\n", + " if old_const['BadPixelsDark'] is not None:\n", + " dataold = np.copy(old_const['BadPixelsDark'][:, :, :, gain])\n", + " datau32old = dataold.astype(np.uint32)\n", + " l_data_old.append(len(datau32old[datau32old>0].flatten()))\n", + " for bit in bits:\n", + " l_data_old.append(np.count_nonzero(old_const['BadPixelsDark'][:, :, :, gain] & bit.value))\n", + "\n", + " l_data_name = ['All bad pixels', 'NOISE_OUT_OF_THRESHOLD',\n", + " 'OFFSET_OUT_OF_THRESHOLD', 'OFFSET_NOISE_EVAL_ERROR', 'GAIN_THRESHOLDING_ERROR']\n", + "\n", + " l_threshold = ['', f'{thresholds_noise_sigma}' f'{thresholds_noise_hard[gain]}',\n", + " f'{thresholds_offset_sigma}' f'{thresholds_offset_hard[gain]}',\n", + " '', f'{thresholds_gain_sigma}']\n", + "\n", + " for i in range(len(l_data)):\n", + " line = [f'{l_data_name[i]}, {gain_names[gain]} gain', l_threshold[i], l_data[i]]\n", + "\n", + " if old_const['BadPixelsDark'] is not None:\n", + " line += [l_data_old[i]]\n", + " else:\n", + " line += ['-']\n", + "\n", + " table.append(line)\n", + " table.append(['', '', '', ''])\n", + "\n", + "display(Markdown('''\n", + "\n", + "### Number of bad pixels ###\n", + "\n", + "One pixel can be bad for different reasons, therefore, the sum of all types of bad pixels can be more than the number of all bad pixels.\n", + "\n", + "'''))\n", + "if len(table)>0:\n", + " md = display(Latex(tabulate.tabulate(table, tablefmt='latex',\n", + " headers=[\"Pixel type\", \"Threshold\",\n", + " \"New constant\", \"Old constant\"])))" + ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], - "source": [] + "source": [ + "import tabulate\n", + "\n", + "header = ['Parameter', \n", + " \"New constant\", \"Old constant \", \n", + " \"New constant\", \"Old constant \", \n", + " \"New constant\", \"Old constant \",\n", + " \"New constant\", \"Old constant \"]\n", + "\n", + "for const in ['Offset', 'Noise', 'ThresholdsDark']:\n", + " if const != 'ThresholdsDark':\n", + " table = [['','High gain', 'High gain', 'Medium gain', 'Medium gain', 'Low gain', 'Low gain']]\n", + " else:\n", + " table = [['','HG-MG threshold', 'HG-MG threshold', 'MG-LG threshold', 'MG-LG threshold']]\n", + "\n", + " data = np.copy(res[const])\n", + " if const == 'ThresholdsDark':\n", + " data[...,0][res['BadPixelsDark'][...,0]>0] = np.nan\n", + " data[...,1][res['BadPixelsDark'][...,1]>0] = np.nan\n", + " else:\n", + " data[res['BadPixelsDark']>0] = np.nan\n", + "\n", + " if old_const[const] is not None and old_const['BadPixelsDark'] is not None:\n", + " dataold = np.copy(old_const[const])\n", + " if const == 'ThresholdsDark':\n", + " dataold[...,0][old_const['BadPixelsDark'][...,0]>0] = np.nan\n", + " dataold[...,1][old_const['BadPixelsDark'][...,1]>0] = np.nan\n", + " else:\n", + " dataold[old_const['BadPixelsDark']>0] = np.nan\n", + "\n", + " f_list = [np.nanmedian, np.nanmean, np.nanstd, np.nanmin, np.nanmax]\n", + " n_list = ['Median', 'Mean', 'Std', 'Min', 'Max']\n", + "\n", + " for i, f in enumerate(f_list):\n", + " line = [n_list[i]]\n", + " for gain in range(3):\n", + " # Compare only 3 threshold gain-maps\n", + " if gain == 2 and const == 'ThresholdsDark':\n", + " continue\n", + " line.append('{:6.1f}'.format(f(data[...,gain])))\n", + " if old_const[const] is not None and old_const['BadPixelsDark'] is not None:\n", + " line.append('{:6.1f}'.format(f(dataold[...,gain])))\n", + " else:\n", + " line.append('-')\n", + "\n", + " table.append(line)\n", + "\n", + " display(Markdown('### {} [ADU], good pixels only ###'.format(const)))\n", + " md = display(Latex(tabulate.tabulate(table, tablefmt='latex', headers=header))) " + ] } ], "metadata": { "kernelspec": { - "display_name": "Calibration_VENV", + "display_name": "Python 3", "language": "python", - "name": "calibration_venv" + "name": "python3" }, "language_info": { "codemirror_mode": { diff --git a/notebooks/pnCCD/Characterize_pnCCD_Dark_NBC.ipynb b/notebooks/pnCCD/Characterize_pnCCD_Dark_NBC.ipynb index 10da19ed8966e13ca35b95e3746f1fe75413ed15..bb0eda13f888fa23fa19cd98131ec3ba041632f5 100644 --- a/notebooks/pnCCD/Characterize_pnCCD_Dark_NBC.ipynb +++ b/notebooks/pnCCD/Characterize_pnCCD_Dark_NBC.ipynb @@ -31,16 +31,19 @@ "outputs": [], "source": [ "cluster_profile = \"noDB\" # ipcluster profile to use\n", - "in_folder = \"/gpfs/exfel/exp/SQS/201930/p900075/raw\" # input folder, required\n", + "in_folder = \"/gpfs/exfel/exp/SQS/202031/p900166/raw\" # input folder, required\n", "out_folder = '/gpfs/exfel/data/scratch/ahmedk/test/pnccd' # output folder, required\n", "sequence = 0 # sequence file to use\n", - "run = 364 # which run to read data from, required\n", + "run = 34 # which run to read data from, required\n", "\n", + "db_module = \"PnCCD1\"\n", "karabo_da = ['PNCCD01'] # data aggregators\n", + "karabo_da_control = \"PNCCD02\" # file inset for control data\n", "karabo_id = \"SQS_NQS_PNCCD1MP\" # karabo prefix of PNCCD devices\n", "receiver_id = \"PNCCD_FMT-0\" # inset for receiver devices\n", "path_template = 'RAW-R{:04d}-{}-S{{:05d}}.h5' # the template to use to access data\n", "h5path = '/INSTRUMENT/{}/CAL/{}:output/data/image/' # path in the HDF5 file the data is at\n", + "h5path_ctrl = '/CONTROL/{}/CTRL/TCTRL'\n", "\n", "# for database time derivation:\n", "use_dir_creation_date = True # use dir creation date as data production reference date\n", @@ -52,9 +55,9 @@ "\n", "number_dark_frames = 0 # number of images to be used, if set to 0 all available images are used\n", "chunkSize = 100 # number of images to read per chunk\n", - "fix_temperature = 233. # fix temperature in K, set to 0. to use value from slow data\n", - "gain = 1 # the detector's gain setting, only 1 and 64 is available.\n", - "bias_voltage = 300 # detector's bias voltage\n", + "fix_temperature = 0. # fix temperature in K, set to 0. to use value from slow data\n", + "gain = 1 # the detector's gain setting, It is later read from file and this value is overwritten\n", + "bias_voltage = 0. # the detector's bias voltage. set to 0. to use value from slow data.\n", "integration_time = 70 # detector's integration time\n", "commonModeAxis = 0 # axis along which common mode will be calculated (0: along rows, 1: along columns)\n", "commonModeBlockSize = [512, 512] # size of the detector in pixels for common mode calculations\n", @@ -84,6 +87,7 @@ "import warnings\n", "warnings.filterwarnings('ignore')\n", "\n", + "import h5py\n", "import numpy as np\n", "import matplotlib.pyplot as plt\n", "%matplotlib inline\n", @@ -107,17 +111,6 @@ "from cal_tools.enums import BadPixels" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Output Folder Creation:\n", - "if not os.path.exists(out_folder):\n", - " os.makedirs(out_folder)" - ] - }, { "cell_type": "code", "execution_count": null, @@ -143,7 +136,7 @@ "outputs": [], "source": [ "proposal = list(filter(None, in_folder.strip('/').split('/')))[-2]\n", - "file_loc = 'Proposal: {}, Run: {}'.format(proposal, run)\n", + "file_loc = f'Proposal: {proposal}, Run: {run}'\n", "print(\"File Location:\", file_loc)" ] }, @@ -171,6 +164,9 @@ "filename = fp_path.format(sequence)\n", "h5path = h5path.format(karabo_id, receiver_id)\n", "\n", + "# Output Folder Creation:\n", + "os.makedirs(out_folder, exist_ok=True)\n", + "\n", "# Run's creation time:\n", "if creation_time:\n", " try:\n", @@ -196,6 +192,25 @@ "print(\"Run number: {}\".format(run))" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# extract slow data\n", + "if karabo_da_control:\n", + " ctrl_fname = os.path.join(ped_dir, path_template.format(run, karabo_da_control)).format(sequence)\n", + " ctrl_path = h5path_ctrl.format(karabo_id)\n", + " mdl_ctrl_path = f\"/CONTROL/{karabo_id}/MDL/\"\n", + " with h5py.File(ctrl_fname, \"r\") as f:\n", + " if bias_voltage == 0.:\n", + " bias_voltage = abs(f[os.path.join(mdl_ctrl_path, \"DAQ_MPOD/u0voltage/value\")][0])\n", + " gain = f[os.path.join(mdl_ctrl_path, \"DAQ_GAIN/pNCCDGain/value\")][0]\n", + " if fix_temperature == 0.:\n", + " fix_temperature = f[os.path.join(ctrl_path, \"inputA/krdg/value\")][0]" + ] + }, { "cell_type": "code", "execution_count": null, @@ -208,7 +223,6 @@ "outputs": [], "source": [ "# Reading Parameters such as Detector Bias, Gain, etc. from the Data:\n", - "\n", "memoryCells = 1 # pnCCD has 1 memory cell\n", "sensorSize = [pixels_x, pixels_y]\n", "blockSize = [sensorSize[0]//2, sensorSize[1]//2]# sensor area will be analysed according to blocksize\n", @@ -225,21 +239,11 @@ "outputs": [], "source": [ "# Printing the Parameters Read from the Data File:\n", - "\n", "display(Markdown('### Detector Parameters'))\n", - "print(\"Bias voltage is {} V.\".format(bias_voltage))\n", - "print(\"Detector gain is set to {}.\".format(gain))\n", - "print(\"Detector integration time is set to {} ms\".format(integration_time)) \n", - "\n", - "if fix_temperature != 0.:\n", - " print(f\"Using a fixed temperature of {fix_temperature} K\")\n", - " temperature_k = fix_temperature\n", - "else:\n", - " print(\"Temperature is not fixed.\")\n", - " #TODO: remove this line after properly saving the temperature in control data.\n", - " temperature_k = 233.\n", - " print(f\"Using a fixed temperature of {fix_temperature} K\")\n", - " \n", + "print(f\"Bias voltage is {bias_voltage} V.\")\n", + "print(f\"Detector gain is set to {gain}.\")\n", + "print(f\"Detector integration time is set to {integration_time} ms\") \n", + "print(f\"Using a fixed temperature of {fix_temperature} K\")\n", "print(\"Number of dark images to analyze:\", nImages) " ] }, @@ -993,18 +997,22 @@ " det = Constants.CCD(DetectorTypes.pnCCD)\n", " const = getattr(det, const_name)()\n", " const.data = constant_maps[const_name].data\n", - "\n", " metadata.calibration_constant = const\n", "\n", " # set the operating condition\n", " condition = Conditions.Dark.CCD(bias_voltage=bias_voltage,\n", " integration_time=integration_time,\n", " gain_setting=gain,\n", - " temperature=temperature_k,\n", + " temperature=fix_temperature,\n", " pixels_x=pixels_x,\n", " pixels_y=pixels_y)\n", "\n", - " device = Detectors.PnCCD1\n", + " for parm in condition.parameters:\n", + " if parm.name == \"Sensor Temperature\":\n", + " parm.lower_deviation = temp_limits\n", + " parm.upper_deviation = temp_limits\n", + "\n", + " device = getattr(Detectors, db_module)\n", " metadata.detector_condition = condition\n", "\n", " # specify the a version for this constant\n", @@ -1029,7 +1037,7 @@ "\n", "print(\"Generated constants with conditions:\\n\")\n", "print(f\"• bias_voltage: {bias_voltage}\\n• integration_time: {integration_time}\\n\"\n", - " f\"• gain_setting: {gain}\\n• temperature: {temperature_k}\\n\"\n", + " f\"• gain_setting: {gain}\\n• temperature: {fix_temperature}\\n\"\n", " f\"• creation_time: {creation_time}\\n\")" ] }, diff --git a/notebooks/pnCCD/Correct_pnCCD_NBC.ipynb b/notebooks/pnCCD/Correct_pnCCD_NBC.ipynb index a41a37a6a61bf9716311114bf3bb961b1494edd1..1ea32328e9cb27fd6b3ebe97f92bad272869af16 100644 --- a/notebooks/pnCCD/Correct_pnCCD_NBC.ipynb +++ b/notebooks/pnCCD/Correct_pnCCD_NBC.ipynb @@ -22,17 +22,20 @@ }, "outputs": [], "source": [ - "in_folder = \"/gpfs/exfel/exp/SQS/201930/p900075/raw\" # input folder\n", - "out_folder = '/gpfs/exfel/data/scratch/ahmedk/test/pnccd' # output folder\n", - "run = 365 # which run to read data from\n", + "in_folder = \"/gpfs/exfel/exp/SQS/202022/p002720/raw\" # input folder\n", + "out_folder = '/gpfs/exfel/data/scratch/setoodeh' # output folder\n", + "run = 53 # which run to read data from\n", "sequences = [-1] # sequences to correct, set to -1 for all, range allowed\n", "\n", + "db_module = \"pnCCD_M205_M206\"\n", "karabo_da = 'PNCCD01' # data aggregators\n", + "karabo_da_control = \"PNCCD02\" # file inset for control data\n", "karabo_id = \"SQS_NQS_PNCCD1MP\" # karabo prefix of PNCCD devices\n", "receiver_id = \"PNCCD_FMT-0\" # inset for receiver devices\n", - "path_template = 'RAW-R{:04d}-PNCCD01-S{{:05d}}.h5' # the template to use to access data\n", + "path_template = 'RAW-R{:04d}-{}-S{{:05d}}.h5' # the template to use to access data\n", "path_template_seqs = \"{}/r{:04d}/*PNCCD01-S*.h5\"\n", "h5path = '/INSTRUMENT/{}/CAL/{}:output/data/' # path to data in the HDF5 file \n", + "h5path_ctrl = '/CONTROL/{}/CTRL/TCTRL'\n", "\n", "overwrite = True # keep this as True to not overwrite the output \n", "use_dir_creation_date = True # required to obtain creation time of the run\n", @@ -51,11 +54,11 @@ "seq_num = 0 # sequence number for which the last plot at the end of the notebook is plotted\n", "\n", "# pnCCD parameters:\n", - "fix_temperature = 233.\n", - "gain = 1\n", - "bias_voltage = 300\n", + "fix_temperature = 0. # fix temperature in K, set to 0. to use value from slow data.\n", + "gain = 0. # the detector's gain setting, It is later read from file and this value is overwritten\n", + "bias_voltage = 0. # the detector's bias voltage. set to 0. to use value from slow data.\n", "integration_time = 70\n", - "photon_energy = 1.6 # Al fluorescence in keV\n", + "photon_energy = 1.5 # Al fluorescence in keV\n", "\n", "cal_db_interface = \"tcp://max-exfl016:8015\" # calibration DB interface to use\n", "cal_db_timeout = 300000 # timeout on caldb requests\n", @@ -65,7 +68,7 @@ "common_mode = True # Apply common mode correction\n", "relgain = True # Apply relative gain correction\n", "cti = False # Apply charge transfer inefficiency correction (not implemented, yet)\n", - "do_pattern_classification = True # classify split events" + "do_pattern_classification = False # classify split events" ] }, { @@ -104,6 +107,7 @@ "import time\n", "import copy\n", "import os\n", + "import traceback\n", "import glob\n", "import datetime\n", "from datetime import timedelta\n", @@ -134,56 +138,6 @@ " sequences = None" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Each xcal.HistogramCalculator requires a total number of bins and a binning range. We define these using a \n", - "# dictionary:\n", - "\n", - "# For all xcal histograms:\n", - "if gain == 1:\n", - " Hist_Bin_Dict = {\n", - " \"bins\": 70000, # number of bins \n", - " \"bin_range\": [0, 70000]\n", - " }\n", - "\n", - " # For the numpy histograms on the last cell of the notebook:\n", - " Event_Bin_Dict = {\n", - " \"event_bins\": 1000, # number of bins \n", - " \"b_range\": [0, 50000] # bin range \n", - " }\n", - " \n", - "elif gain == 64:\n", - " # For all xcal histograms:\n", - " Hist_Bin_Dict = {\n", - " \"bins\": 25000, # number of bins \n", - " \"bin_range\": [0, 25000] \n", - " }\n", - " # For the numpy histograms on the last cell of the notebook:\n", - " Event_Bin_Dict = {\n", - " \"event_bins\": 1000, # number of bins \n", - " \"b_range\": [0, 3000] # bin range \n", - " }\n", - " \n", - "bins = Hist_Bin_Dict[\"bins\"]\n", - "bin_range = Hist_Bin_Dict[\"bin_range\"]\n", - "event_bins = Event_Bin_Dict[\"event_bins\"]\n", - "b_range = Event_Bin_Dict[\"b_range\"]\n", - "\n", - "# On the singles spectrum (uploaded in the middle of this notebook), the ADU values correspoding to the boundaries\n", - "# of the first peak region are used as cti_limit_low and cti_limit_high:\n", - "\n", - "if gain == 1:\n", - " cti_limit_low = 3000 # lower limit of cti\n", - " cti_limit_high = 10000 # higher limit of cti\n", - "elif gain == 64:\n", - " cti_limit_low = 50\n", - " cti_limit_high = 170" - ] - }, { "cell_type": "code", "execution_count": null, @@ -224,9 +178,15 @@ " creation_time = get_dir_creation_date(in_folder, run)\n", "\n", "\n", - "print(f\"Creation time: {creation_time}\")\n", - " \n", - "\n", + "print(f\"Creation time: {creation_time}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ "# Reading all sequences of the run:\n", "file_list = []\n", "total_sequences = 0\n", @@ -249,6 +209,48 @@ "print(f\"This run has a total number of {total_sequences} sequences.\\n\")" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# extract slow data\n", + "if karabo_da_control:\n", + " ctrl_fname = os.path.join(ped_dir, path_template.format(run, karabo_da_control)).format(sequences[0])\n", + " ctrl_path = h5path_ctrl.format(karabo_id)\n", + " mdl_ctrl_path = f\"/CONTROL/{karabo_id}/MDL/\"\n", + " try:\n", + " with h5py.File(ctrl_fname, \"r\") as f:\n", + " if bias_voltage == 0.:\n", + " bias_voltage = abs(f[os.path.join(mdl_ctrl_path, \"DAQ_MPOD/u0voltage/value\")][0])\n", + " gain = f[os.path.join(mdl_ctrl_path, \"DAQ_GAIN/pNCCDGain/value\")][0]\n", + " if fix_temperature == 0.:\n", + " fix_temperature = f[os.path.join(ctrl_path, \"inputA/krdg/value\")][0]\n", + " except KeyError:\n", + " print(\"Error !!! during extracting slow data\")\n", + " traceback.print_exc(limit=1)\n", + " print(\"Control file name:\", ctrl_fname)\n", + " print(\"bias voltage control h5path:\", os.path.join(mdl_ctrl_path, \"DAQ_MPOD/u0voltage/value\"))\n", + " print(\"gain control h5path:\", os.path.join(mdl_ctrl_path, \"DAQ_GAIN/pNCCDGain/value\"))\n", + " print(\"fix_temperature control h5path:\", os.path.join(ctrl_path, \"inputA/krdg/value\"))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Printing the Parameters Read from the Data File:\n", + "\n", + "display(Markdown('### Detector Parameters'))\n", + "print(\"Bias voltage is {} V.\".format(bias_voltage))\n", + "print(\"Detector gain is set to {}.\".format(gain))\n", + "print(\"Detector integration time is set to {} ms\".format(integration_time))\n", + "print(f\"Using a fixed temperature of {fix_temperature} K\")" + ] + }, { "cell_type": "code", "execution_count": null, @@ -297,6 +299,58 @@ " raise AttributeError(\"Output path exists! Exiting\") " ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Each xcal.HistogramCalculator requires a total number of bins and a binning range. We define these using a \n", + "# dictionary:\n", + "\n", + "# For all xcal histograms:\n", + "if gain == 1:\n", + " Hist_Bin_Dict = {\n", + " \"bins\": 35000, # number of bins \n", + " \"bin_range\": [0, 35000]\n", + " }\n", + "\n", + " # For the numpy histograms on the last cell of the notebook:\n", + " Event_Bin_Dict = {\n", + " \"event_bins\": 1000, # number of bins \n", + " \"b_range\": [0, 35000] # bin range \n", + " }\n", + " \n", + "#TODO: make it more adaptive for more than only 2 gains [below was for gain==64 only]\n", + "else:\n", + " # For all xcal histograms:\n", + " Hist_Bin_Dict = {\n", + " \"bins\": 25000, # number of bins \n", + " \"bin_range\": [0, 25000] \n", + " }\n", + " # For the numpy histograms on the last cell of the notebook:\n", + " Event_Bin_Dict = {\n", + " \"event_bins\": 1000, # number of bins \n", + " \"b_range\": [0, 5000] # bin range \n", + " }\n", + " \n", + "bins = Hist_Bin_Dict[\"bins\"]\n", + "bin_range = Hist_Bin_Dict[\"bin_range\"]\n", + "event_bins = Event_Bin_Dict[\"event_bins\"]\n", + "b_range = Event_Bin_Dict[\"b_range\"]\n", + "\n", + "# On the singles spectrum (uploaded in the middle of this notebook), the ADU values correspoding to the boundaries\n", + "# of the first peak region are used as cti_limit_low and cti_limit_high:\n", + "\n", + "if gain == 1:\n", + " cti_limit_low = 1000 # lower limit of cti\n", + " cti_limit_high = 100000 # higher limit of cti\n", + "#TODO: make it more adaptive for more than only 2 gains [below was for gain==64 only\n", + "else:\n", + " cti_limit_low = 50\n", + " cti_limit_high = 2000" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -337,7 +391,7 @@ " \n", " for const in constants.keys():\n", " constants[const], when[const] = \\\n", - " get_constant_from_db_and_time(Detectors.PnCCD1,\n", + " get_constant_from_db_and_time(getattr(Detectors, db_module),\n", " getattr(Constants.CCD(DetectorTypes.pnCCD), const)(),\n", " condition,\n", " np.zeros((pixels_x, pixels_y, 1)),\n", @@ -408,7 +462,7 @@ " photon_energy=photon_energy)\n", "\n", " constants[\"RelativeGain\"], relgain_time = \\\n", - " get_constant_from_db_and_time(Detectors.PnCCD1,\n", + " get_constant_from_db_and_time(getattr(Detectors, db_module),\n", " Constants.CCD(DetectorTypes.pnCCD).RelativeGain(),\n", " condition,\n", " np.zeros((pixels_x, pixels_y)),\n", @@ -518,22 +572,24 @@ " cores=cpuCores,\n", " blockSize=blockSize)\n", " histCalCommonModeCor.debug()\n", + " \n", + "if corr_bools.get('pattern_class'):\n", "# Will contain split events pattern data:\n", - "histCalPcorr = xcal.HistogramCalculator(sensorSize, \n", - " bins=bins, \n", - " range=bin_range,\n", - " nCells=memoryCells, \n", - " cores=cpuCores,\n", - " blockSize=blockSize)\n", - "histCalPcorr.debug()\n", + " histCalPcorr = xcal.HistogramCalculator(sensorSize, \n", + " bins=bins, \n", + " range=bin_range,\n", + " nCells=memoryCells, \n", + " cores=cpuCores,\n", + " blockSize=blockSize)\n", + " histCalPcorr.debug()\n", "# Will contain singles events data:\n", - "histCalPcorrS = xcal.HistogramCalculator(sensorSize, \n", - " bins=bins, \n", - " range=bin_range,\n", - " nCells=memoryCells, \n", - " cores=cpuCores,\n", - " blockSize=blockSize)\n", - "histCalPcorrS.debug()\n", + " histCalPcorrS = xcal.HistogramCalculator(sensorSize, \n", + " bins=bins, \n", + " range=bin_range,\n", + " nCells=memoryCells, \n", + " cores=cpuCores,\n", + " blockSize=blockSize)\n", + " histCalPcorrS.debug()\n", "if corr_bools.get('relgain'):\n", " # Will contain gain corrected data:\n", " histCalGainCor = xcal.HistogramCalculator(sensorSize, \n", @@ -673,6 +729,22 @@ " offset_mean_im = np.nanmean(data, axis=2) \n", " offset_single_im = data[...,0] # The offset corrected image corresponding to the first frame \n", " \n", + " # cm: common mode, c: classifications, p: even patterns\n", + " if corr_bools.get('common_mode'):\n", + " ddsetcm = ofile.create_dataset(h5path+\"/pixels_cm\",\n", + " oshape,\n", + " chunks=(chunk_size_idim, oshape[1], oshape[2]),\n", + " dtype=np.float32)\n", + " \n", + " data = cmCorrection.correct(data.astype(np.float32), # common mode correction\n", + " cellTable=np.zeros(data.shape[2], np.int32)) \n", + " histCalCommonModeCor.fill(data) # filling histogram with common mode corrected data\n", + " # common mode corrected images:\n", + " if cm_mean_im is None:\n", + " cm_mean_im = np.nanmean(data, axis=2) \n", + " cm_single_im = data[...,0] # The common mode corrected image corresponding to the first frame \n", + " ddsetcm[...] = np.moveaxis(data, 2, 0)\n", + " \n", " if corr_bools.get('relgain'):\n", " data /= rg # relative gain correction \n", " histCalGainCor.fill(data) # filling histogram with gain corrected data\n", @@ -685,12 +757,6 @@ "\n", " if corr_bools.get('pattern_class'):\n", "\n", - " # cm: common mode, c: classifications, p: even patterns\n", - " if corr_bools.get('common_mode'):\n", - " ddsetcm = ofile.create_dataset(h5path+\"/pixels_cm\",\n", - " oshape,\n", - " chunks=(chunk_size_idim, oshape[1], oshape[2]),\n", - " dtype=np.float32)\n", "\n", " ddsetc = ofile.create_dataset(h5path+\"/pixels_classified\",\n", " oshape,\n", @@ -705,16 +771,6 @@ " # The calculation of the cluster map:\n", " patternClassifierLH._noisemap = noise[:, :pixels_x//2, :]\n", " patternClassifierRH._noisemap = noise[:, pixels_x//2:, :]\n", - " if corr_bools.get('common_mode'):\n", - " data = cmCorrection.correct(data.astype(np.float32), # common mode correction\n", - " cellTable=np.zeros(data.shape[2], np.int32)) \n", - " histCalCommonModeCor.fill(data) # filling histogram with common mode corrected data\n", - " \n", - " if cm_mean_im is None:\n", - " cm_mean_im = np.nanmean(data, axis=2) \n", - " cm_single_im = data[...,0] # The common mode corrected image corresponding to the first frame \n", - " \n", - " ddsetcm[...] = np.moveaxis(data, 2, 0)\n", "\n", " # Dividing the data into left and right hemispheres:\n", " dataLH = data[:, :pixels_x//2, :]\n", @@ -771,8 +827,9 @@ " cm_cor_HistVals, _, cm_HistMids, _ = histCalCommonModeCor.get()\n", "if corr_bools.get('relgain'):\n", " gain_cor_HistVals, _, gain_cor_HistMids, _ = histCalGainCor.get()\n", - "split_HistVals, _, split_HistMids, _ = histCalPcorr.get() # split events corrected\n", - "singles_HistVals, _, singles_HistMids, _ = histCalPcorrS.get() # last s in variable names: singles events" + "if corr_bools.get('pattern_class'):\n", + " split_HistVals, _, split_HistMids, _ = histCalPcorr.get() # split events corrected\n", + " singles_HistVals, _, singles_HistMids, _ = histCalPcorrS.get() # last s in variable names: singles events" ] }, { @@ -789,8 +846,9 @@ " np.savez(os.path.join(out_folder, 'Common_Mode_Corrected_Events.npz'), cm_HistMids, cm_cor_HistVals)\n", "if corr_bools.get('relgain'):\n", " np.savez(os.path.join(out_folder, 'Gain_Corrected_Events.npz'), gain_cor_HistMids, gain_cor_HistVals)\n", - "np.savez(os.path.join(out_folder, 'Split_Events.npz'), split_HistMids, split_HistVals)\n", - "np.savez(os.path.join(out_folder, 'Singles_Events.npz'), singles_HistMids, singles_HistVals)\n", + "if corr_bools.get('pattern_class'):\n", + " np.savez(os.path.join(out_folder, 'Split_Events.npz'), split_HistMids, split_HistVals)\n", + " np.savez(os.path.join(out_folder, 'Singles_Events.npz'), singles_HistMids, singles_HistVals)\n", "\n", "print(\"Various spectra are saved to disk in the form of histograms. Please check {}\".format(out_folder))" ] @@ -805,9 +863,10 @@ "# good.\n", "\n", "if gain == 1:\n", - " x_range = (0, 30000)\n", - "elif gain == 64:\n", - " x_range = (0, 1000)" + " x_range = (0, 35000)\n", + "#TODO: make it more adaptive for more than only 2 gains [below was for gain==64 only\n", + "else:\n", + " x_range = (0, 2000)" ] }, { diff --git a/webservice/sqlite_view.py b/webservice/sqlite_view.py new file mode 100644 index 0000000000000000000000000000000000000000..2979f2223957b9806af7fd3649c52dd26187c64a --- /dev/null +++ b/webservice/sqlite_view.py @@ -0,0 +1,17 @@ +import sqlite3 + + +file_path = "/home/xcal/calibration_webservice/webservice/webservice_jobs.sqlite" +run = '247' +proposal = '900138' + + +conn = sqlite3.connect(file_path) +c = conn.cursor() + +c.execute("SELECT * FROM jobs") + +for r in c.fetchall(): + rid, jobid, db_proposal, db_run, status, time, _, _ = r + if db_proposal == proposal and db_run == run: + print(r) diff --git a/webservice/update_mdc.py b/webservice/update_mdc.py index 2200972a25a98ff9ba6cce4a48eda1584d63f170..36eb34b93ba2861cc1cb5a16e1213ca8a44c326f 100644 --- a/webservice/update_mdc.py +++ b/webservice/update_mdc.py @@ -1,17 +1,17 @@ +import yaml import argparse from metadata_client.metadata_client import MetadataClient -import yaml + parser = argparse.ArgumentParser( - description='Update run status at MDC for a given run id.') -parser.add_argument('--conf-file', type=str, help='Path to webservice config', - default='/home/xcal/calibration_webservice_deployed/webservice/webservice.yaml') # noqa -parser.add_argument('--flg', type=str, choices=["NA", "R", "A"], - help='Status flag for MDC request: NA - not available, R - running, A - available.') # noqa + description='Request dark characterization. Will wait on data transfers to complete first!') +parser.add_argument('--conf-file', type=str, help='Path to webcervice config', default='/home/xcal/calibration_webservice_deployed/webservice/webservice.yaml') +parser.add_argument('--flg', type=str, choices=["NA", "R", "A"], help='Status flag for MDC request') parser.add_argument('--rid', type=int, help='Run id from MDC') -parser.add_argument('--msg', type=str, help='Message string to MDC', - default='Error while job submission') +parser.add_argument('--msg', type=str, help='Message string to MDC') + + args = vars(parser.parse_args()) conf_file = args['conf_file'] @@ -19,23 +19,23 @@ rid = args['rid'] flg = args['flg'] msg = args['msg'] + with open(conf_file, "r") as f: config = yaml.load(f.read(), Loader=yaml.FullLoader) mdconf = config['metadata-client'] client_conn = MetadataClient(client_id=mdconf['user-id'], - client_secret=mdconf['user-secret'], - user_email=mdconf['user-email'], - token_url=mdconf['token-url'], - refresh_url=mdconf['refresh-url'], - auth_url=mdconf['auth-url'], - scope=mdconf['scope'], - base_api_url=mdconf['base-api-url']) + client_secret=mdconf['user-secret'], + user_email=mdconf['user-email'], + token_url=mdconf['token-url'], + refresh_url=mdconf['refresh-url'], + auth_url=mdconf['auth-url'], + scope=mdconf['scope'], + base_api_url=mdconf['base-api-url']) + response = client_conn.update_run_api(rid, {'flg_cal_data_status': flg, 'cal_pipeline_reply': msg}) -if response.status_code == 200: - print('Run is updated') -else: - print(f'Update failed {response}') +print(response, response.status_code) + diff --git a/webservice/webservice.py b/webservice/webservice.py index faa9d8b0ab58fdc9fe5aa09431f5d52b4fe7076a..7cdecaca6ff948a3f07f2f3e5343d2c85a18c6d7 100644 --- a/webservice/webservice.py +++ b/webservice/webservice.py @@ -706,10 +706,10 @@ async def server_runner(config, mode): for karabo_id in karabo_ids: if karabo_das[0] == 'all': - karabo_das = data_conf[karabo_id]["karabo-da"] + karabo_da = data_conf[karabo_id]["karabo-da"] # Check if any files for given karabo-das exists - if await check_files(in_folder, wait_runs, karabo_das): + if await check_files(in_folder, wait_runs, karabo_da): thisconf = copy.copy(data_conf[karabo_id]) if (karabo_id in pconf and @@ -720,7 +720,7 @@ async def server_runner(config, mode): thisconf["out-folder"] = '/'.join((out_folder, karabo_id.replace('-', '_'))) thisconf["karabo-id"] = karabo_id - thisconf["karabo-da"] = karabo_das + thisconf["karabo-da"] = karabo_da run_config = [] for typ, run in run_mapping.items(): diff --git a/xfel_calibrate/calibrate.py b/xfel_calibrate/calibrate.py index b84de4027c6076fe7bd4bd31c232b12150f228c0..316ff2dfa70788b860efdc454f073c5b2f8acd89 100755 --- a/xfel_calibrate/calibrate.py +++ b/xfel_calibrate/calibrate.py @@ -524,7 +524,7 @@ def create_finalize_script(fmt_args, temp_path, job_list): tmpl = Template(''' #!/bin/tcsh source /etc/profile.d/modules.sh - module load texlive + module load texlive/2019 echo 'Running finalize script' python3 -c "from xfel_calibrate.finalize import finalize; finalize(joblist={{joblist}}, diff --git a/xfel_calibrate/notebooks.py b/xfel_calibrate/notebooks.py index 494d3686fdc36fe2bbe7c02912d42e00a1ceac66..9b11236bd20fd7aa08d771ebaee07aef7c67509f 100644 --- a/xfel_calibrate/notebooks.py +++ b/xfel_calibrate/notebooks.py @@ -163,7 +163,7 @@ notebooks = { "notebook": "notebooks/Jungfrau/Jungfrau_dark_analysis_all_gains_burst_mode_NBC.ipynb", # noqa "concurrency": {"parameter": "karabo_da", - "default concurrency": None, + "default concurrency": list(range(6)), "cluster cores": 4}, }, "CORRECT": {