From ef0a008aebdeb4c203f2bbba4fc7903d106bb711 Mon Sep 17 00:00:00 2001
From: David Hammer <dhammer@mailbox.org>
Date: Mon, 15 Mar 2021 15:45:04 +0100
Subject: [PATCH] Flake8 and related fixes

- trailing whitespace
- unused variables
- unused imports
- misc formatting
---
 .../AGIPD/AGIPD_Correct_and_Verify.ipynb      | 197 ++++++------------
 ...IPD_Retrieve_Constants_Precorrection.ipynb |   6 +-
 2 files changed, 68 insertions(+), 135 deletions(-)

diff --git a/notebooks/AGIPD/AGIPD_Correct_and_Verify.ipynb b/notebooks/AGIPD/AGIPD_Correct_and_Verify.ipynb
index 82f22b590..f13e87851 100644
--- a/notebooks/AGIPD/AGIPD_Correct_and_Verify.ipynb
+++ b/notebooks/AGIPD/AGIPD_Correct_and_Verify.ipynb
@@ -14,12 +14,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2019-02-21T11:30:06.730220Z",
-     "start_time": "2019-02-21T11:30:06.658286Z"
-    }
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "in_folder = \"/gpfs/exfel/exp/HED/202031/p900174/raw\" # the folder to read data from, required\n",
@@ -51,7 +46,7 @@
     "gain_setting = 0.1 # the gain setting, use 0.1 to try to auto-determine\n",
     "photon_energy = 9.2 # photon energy in keV\n",
     "overwrite = True # set to True if existing data should be overwritten\n",
-    "max_pulses = [0, 500, 1] # range list [st, end, step] of maximum pulse indices within a train. 3 allowed maximum list input elements.   \n",
+    "max_pulses = [0, 500, 1] # range list [st, end, step] of maximum pulse indices within a train. 3 allowed maximum list input elements.\n",
     "mem_cells_db = 0 # set to a value different than 0 to use this value for DB queries\n",
     "cell_id_preview = 1 # cell Id used for preview in single-shot plots\n",
     "\n",
@@ -71,7 +66,7 @@
     "xray_gain = False # do relative gain correction based on xray data\n",
     "blc_noise = False # if set, baseline correction via noise peak location is attempted\n",
     "blc_stripes = False # if set, baseline corrected via stripes\n",
-    "blc_hmatch = False # if set, base line correction via histogram matching is attempted \n",
+    "blc_hmatch = False # if set, base line correction via histogram matching is attempted\n",
     "match_asics = False # if set, inner ASIC borders are matched to the same signal level\n",
     "adjust_mg_baseline = False # adjust medium gain baseline to match highest high gain value\n",
     "zero_nans = False # set NaN values in corrected data to 0\n",
@@ -104,8 +99,6 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "import copy\n",
-    "import gc\n",
     "import itertools\n",
     "import math\n",
     "import re\n",
@@ -114,11 +107,11 @@
     "from datetime import timedelta\n",
     "from multiprocessing import Pool\n",
     "from pathlib import Path\n",
-    "from time import perf_counter, sleep, time\n",
+    "from time import perf_counter\n",
     "\n",
     "import tabulate\n",
     "from dateutil import parser\n",
-    "from IPython.display import HTML, Latex, Markdown, display\n",
+    "from IPython.display import Latex, Markdown, display\n",
     "\n",
     "warnings.filterwarnings('ignore')\n",
     "import matplotlib\n",
@@ -126,11 +119,8 @@
     "import yaml\n",
     "from extra_data import RunDirectory, stack_detector_data\n",
     "from extra_geom import AGIPD_1MGeometry, AGIPD_500K2GGeometry\n",
-    "from iCalibrationDB import Detectors\n",
     "from matplotlib import cm as colormap\n",
     "from matplotlib.colors import LogNorm\n",
-    "from matplotlib.ticker import FormatStrFormatter, LinearLocator\n",
-    "from mpl_toolkits.mplot3d import Axes3D\n",
     "\n",
     "matplotlib.use(\"agg\")\n",
     "%matplotlib inline\n",
@@ -148,8 +138,7 @@
     "from cal_tools.cython import agipdalgs as calgs\n",
     "from cal_tools.enums import AgipdGainMode, BadPixels\n",
     "from cal_tools.step_timing import StepTimer\n",
-    "from cal_tools.tools import (CalibrationMetadata, get_dir_creation_date,\n",
-    "                             map_modules_from_folder, module_index_to_qm)\n",
+    "from cal_tools.tools import (get_dir_creation_date, map_modules_from_folder, module_index_to_qm)\n",
     "\n",
     "sns.set()\n",
     "sns.set_context(\"paper\", font_scale=1.4)\n",
@@ -255,7 +244,7 @@
     "    karabo_da = [\"AGIPD{:02d}\".format(i) for i in modules]\n",
     "else:\n",
     "    modules = [int(x[-2:]) for x in karabo_da]\n",
-    "    \n",
+    "\n",
     "print(\"Process modules: \", ', '.join(\n",
     "    [module_index_to_qm(x) for x in modules]))\n",
     "print(f\"Detector in use is {karabo_id}\")\n",
@@ -270,17 +259,17 @@
    "outputs": [],
    "source": [
     "# Display Information about the selected pulses indices for correction.\n",
-    "pulses_lst = list(range(*max_pulses)) if not (len(max_pulses)==1 and max_pulses[0]==0) else max_pulses  \n",
+    "pulses_lst = list(range(*max_pulses)) if not (len(max_pulses)==1 and max_pulses[0]==0) else max_pulses\n",
     "\n",
     "try:\n",
-    "    if len(pulses_lst) > 1:        \n",
+    "    if len(pulses_lst) > 1:\n",
     "        print(\"A range of {} pulse indices is selected: from {} to {} with a step of {}\"\n",
     "               .format(len(pulses_lst), pulses_lst[0] , pulses_lst[-1] + (pulses_lst[1] - pulses_lst[0]),\n",
     "                       pulses_lst[1] - pulses_lst[0]))\n",
     "    else:\n",
-    "        print(\"one pulse is selected: a pulse of idx {}\".format(pulses_lst[0]))\n",
+    "        print(f\"one pulse is selected: a pulse of idx {pulses_lst[0]}\")\n",
     "except Exception as e:\n",
-    "    raise ValueError('max_pulses input Error: {}'.format(e))"
+    "    raise ValueError(f\"max_pulses input Error: {e}\")"
    ]
   },
   {
@@ -352,7 +341,7 @@
     "    delta = timedelta(hours=offset.hour,\n",
     "                      minutes=offset.minute, seconds=offset.second)\n",
     "    creation_time += delta\n",
-    "    \n",
+    "\n",
     "# Evaluate gain setting\n",
     "if gain_setting == 0.1:\n",
     "    if creation_time.replace(tzinfo=None) < parser.parse('2020-01-31'):\n",
@@ -434,11 +423,10 @@
     "def retrieve_constants(mod):\n",
     "    \"\"\"\n",
     "    Retrieve calibration constants and load them to shared memory\n",
-    "    \n",
+    "\n",
     "    Metadata for constants is taken from yml file or retrieved from the DB\n",
     "    \"\"\"\n",
-    "    err = ''\n",
-    "    # TODO: parallelize over modules\n",
+    "    err = \"\"\n",
     "    k_da = karabo_da[mod]\n",
     "    try:\n",
     "        # check if there is a yaml file in out_folder that has the device constants.\n",
@@ -446,8 +434,19 @@
     "            when = agipd_corr.initialize_from_yaml(k_da, const_yaml, mod)\n",
     "        else:\n",
     "            # TODO: should we save what is found here in metadata?\n",
-    "            when = agipd_corr.initialize_from_db(karabo_id, k_da, cal_db_interface, creation_time, mem_cells_db, bias_voltage,\n",
-    "                                                 photon_energy, gain_setting, acq_rate, mod, False)\n",
+    "            when = agipd_corr.initialize_from_db(\n",
+    "                karabo_id,\n",
+    "                k_da,\n",
+    "                cal_db_interface,\n",
+    "                creation_time,\n",
+    "                mem_cells_db,\n",
+    "                bias_voltage,\n",
+    "                photon_energy,\n",
+    "                gain_setting,\n",
+    "                acq_rate,\n",
+    "                mod,\n",
+    "                False,\n",
+    "            )\n",
     "    except Exception as e:\n",
     "        err = f\"Error: {e}\\nError traceback: {traceback.format_exc()}\"\n",
     "        when = None\n",
@@ -494,7 +493,7 @@
    "source": [
     "def imagewise_chunks(img_counts):\n",
     "    \"\"\"Break up the loaded data into chunks of up to chunk_size\n",
-    "    \n",
+    "\n",
     "    Yields (file data slot, start index, stop index)\n",
     "    \"\"\"\n",
     "    for i_proc, n_img in enumerate(img_counts):\n",
@@ -515,9 +514,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "scrolled": true
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "with Pool() as pool:\n",
@@ -546,7 +543,7 @@
     "            # Perform image-wise correction\n",
     "            pool.starmap(agipd_corr.baseline_correction, imagewise_chunks(img_counts))\n",
     "            step_timer.done_step(\"Base-line shift correction\")\n",
-    "        \n",
+    "\n",
     "        if common_mode:\n",
     "            # Perform cross-file correction parallel over asics\n",
     "            pool.starmap(agipd_corr.cm_correction, itertools.product(\n",
@@ -584,9 +581,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "scrolled": true
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "# if the yml file contains \"retrieved-constants\", that means a leading\n",
@@ -604,7 +599,7 @@
     "        if fst_print:\n",
     "            print(\"Constants are retrieved with creation time: \")\n",
     "            fst_print = False\n",
-    "    \n",
+    "\n",
     "        module_timestamps = {}\n",
     "\n",
     "        # If correction is crashed\n",
@@ -652,8 +647,7 @@
     "    Z = data.T\n",
     "\n",
     "    # Plot the surface.\n",
-    "    surf = ax.plot_surface(X, Y, Z, cmap=colormap.coolwarm,\n",
-    "                           linewidth=0, antialiased=False)\n",
+    "    ax.plot_surface(X, Y, Z, cmap=colormap.coolwarm, linewidth=0, antialiased=False)\n",
     "    ax.set_xlabel(x_axis)\n",
     "    ax.set_ylabel(y_axis)\n",
     "    ax.set_zlabel(\"Counts\")\n",
@@ -680,7 +674,7 @@
    "source": [
     "def get_trains_data(run_folder, source, include, detector_id, tid=None, modules=16, fillvalue=np.nan):\n",
     "    \"\"\"Load single train for all module\n",
-    "    \n",
+    "\n",
     "    :param run_folder: Path to folder with data\n",
     "    :param source: Data source to be loaded\n",
     "    :param include: Inset of file name to be considered\n",
@@ -693,7 +687,7 @@
     "        tid, data = run_data.select(f'{detector_id}/DET/*', source).train_from_id(tid)\n",
     "    else:\n",
     "        tid, data = next(iter(run_data.select(f'{detector_id}/DET/*', source).trains(require_all=True)))\n",
-    "        \n",
+    "\n",
     "    return tid, stack_detector_data(train=data, data=source, fillvalue=fillvalue, modules=modules)"
    ]
   },
@@ -791,7 +785,7 @@
     "print(f\"Gain statistics in %\")\n",
     "table = [[f'{gains[gains==0].size/gains.size*100:.02f}',\n",
     "          f'{gains[gains==1].size/gains.size*100:.03f}',\n",
-    "          f'{gains[gains==2].size/gains.size*100:.03f}']] \n",
+    "          f'{gains[gains==2].size/gains.size*100:.03f}']]\n",
     "md = display(Latex(tabulate.tabulate(table, tablefmt='latex',\n",
     "                                     headers=[\"High\", \"Medium\", \"Low\"])))"
    ]
@@ -806,9 +800,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "scrolled": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "pulse_range = [np.min(pulseId[pulseId>=0]), np.max(pulseId[pulseId>=0])]\n",
@@ -882,12 +874,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2019-02-18T17:29:33.226396Z",
-     "start_time": "2019-02-18T17:29:27.027758Z"
-    }
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "fig = plt.figure(figsize=(20, 10))\n",
@@ -923,12 +910,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2019-02-18T17:29:33.761015Z",
-     "start_time": "2019-02-18T17:29:33.227922Z"
-    }
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "fig = plt.figure(figsize=(20, 10))\n",
@@ -941,24 +923,19 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2019-02-18T17:29:35.903487Z",
-     "start_time": "2019-02-18T17:29:33.762568Z"
-    }
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "fig = plt.figure(figsize=(20, 10))\n",
     "ax = fig.add_subplot(111)\n",
     "vmin, vmax = get_range(corrected[cell_id_preview], 5, -50)\n",
     "nbins = np.int((vmax + 50) / 2)\n",
-    "h = ax.hist(corrected[cell_id_preview].flatten(), \n",
-    "            bins=nbins, range=(-50, vmax), \n",
+    "h = ax.hist(corrected[cell_id_preview].flatten(),\n",
+    "            bins=nbins, range=(-50, vmax),\n",
     "            histtype='stepfilled', log=True)\n",
-    "_ = plt.xlabel('[ADU]')\n",
-    "_ = plt.ylabel('Counts')\n",
-    "_ = ax.grid()"
+    "plt.xlabel('[ADU]')\n",
+    "plt.ylabel('Counts')\n",
+    "ax.grid()"
    ]
   },
   {
@@ -968,18 +945,13 @@
    "outputs": [],
    "source": [
     "display(Markdown('### Mean CORRECTED Preview ###\\n'))\n",
-    "display(Markdown(f'A mean across one train \\n'))"
+    "display(Markdown(f'A mean across one train\\n'))"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2019-02-18T17:29:39.369686Z",
-     "start_time": "2019-02-18T17:29:35.905152Z"
-    }
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "fig = plt.figure(figsize=(20, 10))\n",
@@ -992,12 +964,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2019-02-18T17:29:49.217848Z",
-     "start_time": "2019-02-18T17:29:39.371232Z"
-    }
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "fig = plt.figure(figsize=(20, 10))\n",
@@ -1009,16 +976,16 @@
     "nbins = np.int((vmax + 100) / 5)\n",
     "h = ax.hist(corrected.flatten(), bins=nbins,\n",
     "            range=(-100, vmax), histtype='step', log=True, label = 'All')\n",
-    "_ = ax.hist(corrected[gains == 0].flatten(), bins=nbins, range=(-100, vmax),\n",
-    "            alpha=0.5, log=True, label='High gain', color='green')\n",
-    "_ = ax.hist(corrected[gains == 1].flatten(), bins=nbins, range=(-100, vmax),\n",
-    "            alpha=0.5, log=True, label='Medium gain', color='red')\n",
-    "_ = ax.hist(corrected[gains == 2].flatten(), bins=nbins,\n",
-    "            range=(-100, vmax), alpha=0.5, log=True, label='Low gain', color='yellow')\n",
-    "_ = ax.legend()\n",
-    "_ = ax.grid()\n",
-    "_ = plt.xlabel('[ADU]')\n",
-    "_ = plt.ylabel('Counts')"
+    "ax.hist(corrected[gains == 0].flatten(), bins=nbins, range=(-100, vmax),\n",
+    "        alpha=0.5, log=True, label='High gain', color='green')\n",
+    "ax.hist(corrected[gains == 1].flatten(), bins=nbins, range=(-100, vmax),\n",
+    "        alpha=0.5, log=True, label='Medium gain', color='red')\n",
+    "ax.hist(corrected[gains == 2].flatten(), bins=nbins, range=(-100, vmax),\n",
+    "        alpha=0.5, log=True, label='Low gain', color='yellow')\n",
+    "ax.legend()\n",
+    "ax.grid()\n",
+    "plt.xlabel('[ADU]')\n",
+    "plt.ylabel('Counts')"
    ]
   },
   {
@@ -1034,12 +1001,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2019-02-18T17:29:49.641675Z",
-     "start_time": "2019-02-18T17:29:49.224167Z"
-    }
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "fig = plt.figure(figsize=(20, 10))\n",
@@ -1050,9 +1012,7 @@
   },
   {
    "cell_type": "markdown",
-   "metadata": {
-    "collapsed": true
-   },
+   "metadata": {},
    "source": [
     "## Bad Pixels ##\n",
     "The mask contains dedicated entries for all pixels and memory cells as well as all three gains stages. Each mask entry is encoded in 32 bits as:"
@@ -1061,12 +1021,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2019-02-18T17:29:49.651913Z",
-     "start_time": "2019-02-18T17:29:49.643556Z"
-    }
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "table = []\n",
@@ -1089,24 +1044,17 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2019-02-18T17:29:50.086169Z",
-     "start_time": "2019-02-18T17:29:49.653391Z"
-    }
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "fig = plt.figure(figsize=(20, 10))\n",
     "ax = fig.add_subplot(111)\n",
-    "ax = geom.plot_data_fast(np.log2(mask[cell_id_preview]), ax=ax, vmin=0, vmax=32, cmap=\"jet\")"
+    "geom.plot_data_fast(np.log2(mask[cell_id_preview]), ax=ax, vmin=0, vmax=32, cmap=\"jet\")"
    ]
   },
   {
    "cell_type": "markdown",
-   "metadata": {
-    "collapsed": true
-   },
+   "metadata": {},
    "source": [
     "### Percentage of Bad Pixels across one train  ###"
    ]
@@ -1114,18 +1062,12 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2019-02-18T17:29:51.686562Z",
-     "start_time": "2019-02-18T17:29:50.088883Z"
-    }
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "fig = plt.figure(figsize=(20, 10))\n",
     "ax = fig.add_subplot(111)\n",
-    "ax = geom.plot_data_fast(np.mean(mask>0, axis=0),\n",
-    "                         vmin=0, ax=ax, vmax=1, cmap=\"jet\")"
+    "geom.plot_data_fast(np.mean(mask>0, axis=0), vmin=0, ax=ax, vmax=1, cmap=\"jet\")"
    ]
   },
   {
@@ -1138,12 +1080,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "ExecuteTime": {
-     "end_time": "2019-02-18T17:29:55.483270Z",
-     "start_time": "2019-02-18T17:29:53.664226Z"
-    }
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "fig = plt.figure(figsize=(20, 10))\n",
diff --git a/notebooks/AGIPD/AGIPD_Retrieve_Constants_Precorrection.ipynb b/notebooks/AGIPD/AGIPD_Retrieve_Constants_Precorrection.ipynb
index 73e3ed6d4..f6b40e513 100644
--- a/notebooks/AGIPD/AGIPD_Retrieve_Constants_Precorrection.ipynb
+++ b/notebooks/AGIPD/AGIPD_Retrieve_Constants_Precorrection.ipynb
@@ -133,10 +133,6 @@
     "print(f\"Outputting to {out_folder}\")\n",
     "out_folder.mkdir(parents=True, exist_ok=True)\n",
     "\n",
-    "import warnings\n",
-    "\n",
-    "warnings.filterwarnings('ignore')\n",
-    "\n",
     "melt_snow = False if corr_bools[\"only_offset\"] else agipdlib.SnowResolution.NONE"
    ]
   },
@@ -380,7 +376,7 @@
     "          ', '.join([tools.module_index_to_qm(x) for x in modules]))\n",
     "    print(f\"Operating conditions are:\")\n",
     "    print(f\"• Bias voltage: {bias_voltage}\")\n",
-    "    print(f\"• Memory cells: {max_cells}\\n\")\n",
+    "    print(f\"• Memory cells: {max_cells}\")\n",
     "    print(f\"• Acquisition rate: {acq_rate}\")\n",
     "    print(f\"• Gain mode: {gain_mode.name}\")\n",
     "    print(f\"• Gain setting: {gain_setting}\")\n",
-- 
GitLab