diff --git a/notebooks/Jungfrau/Jungfrau_Gain_Correct_and_Verify_NBC.ipynb b/notebooks/Jungfrau/Jungfrau_Gain_Correct_and_Verify_NBC.ipynb index 00da5997abba27a77fd5aa54890785ae082ef76c..f1c6c165d58328b8be8e5fbc215825c9d6309f52 100644 --- a/notebooks/Jungfrau/Jungfrau_Gain_Correct_and_Verify_NBC.ipynb +++ b/notebooks/Jungfrau/Jungfrau_Gain_Correct_and_Verify_NBC.ipynb @@ -17,37 +17,35 @@ "metadata": {}, "outputs": [], "source": [ - "cluster_profile = \"noDB\" # cluster profile to use\n", - "in_folder = \"/gpfs/exfel/exp/FXE/201901/p002210/raw\" # the folder to read data from, required\n", - "out_folder = \"/gpfs/exfel/data/scratch/ahmedk/test/jf\" # the folder to output to, required\n", - "sequences = [-1] # sequences to correct, set to -1 for all, range allowed\n", - "run = 249 # runs to process, required\n", - "\n", - "karabo_id = \"FXE_XAD_JF1M\" # karabo prefix of Jungfrau devices\n", - "karabo_da = ['JNGFR01'] # data aggregators\n", - "receiver_id = \"RECEIVER-{}\" # inset for receiver devices\n", - "receiver_control_id = \"CONTROL\" # inset for control devices\n", - "path_template = 'RAW-R{:04d}-{}-S{:05d}.h5' # template to use for file name, double escape sequence number\n", + "in_folder = \"/gpfs/exfel/exp/CALLAB/202031/p900113/raw\" # the folder to read data from, required\n", + "out_folder = \"/gpfs/exfel/data/scratch/hammerd/issue-242\" # the folder to output to, required\n", + "sequences = [-1] # sequences to correct, set to [-1] for all, range allowed\n", + "run = 9979 # run to process, required\n", + "\n", + "karabo_id = \"SPB_IRDA_JF4M\" # karabo prefix of Jungfrau devices\n", + "karabo_da = ['JNGFR01'] # data aggregators\n", + "receiver_id = \"JNGFR{:02d}\" # inset for receiver devices\n", + "receiver_control_id = \"CONTROL\" # inset for control devices\n", + "path_template = 'RAW-R{:04d}-{}-S{:05d}.h5' # template to use for file name\n", "h5path = '/INSTRUMENT/{}/DET/{}:daqOutput/data' # path in H5 file under which images are located\n", - "h5path_run = '/RUN/{}/DET/{}' # path to run data\n", - "h5path_cntrl = '/CONTROL/{}/DET/{}' # path to control data\n", + "h5path_run = '/RUN/{}/DET/{}' # path to run data\n", + "h5path_cntrl = '/CONTROL/{}/DET/{}' # path to control data\n", "karabo_id_control = \"\" # if control is on a different ID, set to empty string if it is the same a karabo-id\n", - "karabo_da_control = \"JNGFR01\" # file inset for control data\n", + "karabo_da_control = \"JNGFRCTRL00\" # file inset for control data\n", "\n", - "use_dir_creation_date = True # use the creation data of the input dir for database queries\n", - "cal_db_interface = \"tcp://max-exfl016:8017#8025\" #\"tcp://max-exfl016:8015#8025\" # the database interface to use\n", - "cal_db_timeout = 180000 # timeout on caldb requests\",\n", + "use_dir_creation_date = True # use the creation data of the input dir for database queries\n", + "cal_db_interface = \"tcp://max-exfl016:8017#8025\" # the database interface to use\n", + "cal_db_timeout = 180000 # timeout on caldb requests\n", "\n", - "overwrite = True # set to True if existing data should be overwritten\n", - "no_relative_gain = False # do not do relative gain correction\n", - "bias_voltage = 180 # will be overwritten by value in file\n", - "sequences_per_node = 5 # number of sequence files per cluster node if run as slurm job, set to 0 to not run SLURM parallel\n", - "photon_energy = 9.2 # photon energy in keV\n", + "overwrite = True # set to True if existing data should be overwritten\n", + "no_relative_gain = False # do not do relative gain correction\n", + "bias_voltage = 180 # will be overwritten by value in file\n", + "sequences_per_node = 5 # number of sequence files per cluster node if run as slurm job, set to 0 to not run SLURM parallel\n", + "photon_energy = 9.2 # photon energy in keV\n", "chunk_size_idim = 1 # chunking size of imaging dimension, adjust if user software is sensitive to this.\n", - "integration_time = 4.96 # integration time in us, will be overwritten by value in file\n", - "mem_cells = 0. # leave memory cells equal 0, as it is saved in control information starting 2019.\n", - "gmapfile = \"\" # variable is not used but left here for back compatibility\n", - "db_module = [\"Jungfrau_M233\"] # ID of module in calibration database\n", + "integration_time = 4.96 # integration time in us, will be overwritten by value in file\n", + "mem_cells = 0 # leave memory cells equal 0, as it is saved in control information starting 2019.\n", + "db_module = [\"Jungfrau_M275\"] # ID of module in calibration database\n", "manual_slow_data = False # if true, use manually entered bias_voltage and integration_time values\n", "chunk_size = 0\n", "\n", @@ -62,23 +60,25 @@ "metadata": {}, "outputs": [], "source": [ + "import copy\n", + "import multiprocessing\n", "import time\n", - "from ipyparallel import Client\n", + "import warnings\n", "from functools import partial\n", - "import tabulate\n", - "from IPython.display import display, Latex\n", - "import copy\n", + "from pathlib import Path\n", + "\n", "import h5py\n", - "import os\n", - "from cal_tools.tools import (map_modules_from_folder, get_dir_creation_date,\n", - " get_constant_from_db_and_time)\n", - "from iCalibrationDB import (ConstantMetaData, Constants, Conditions, Detectors, Versions)\n", - "from cal_tools.enums import BadPixels\n", - "import numpy as np\n", "import matplotlib\n", "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "import tabulate\n", + "from cal_tools.enums import BadPixels\n", + "from cal_tools.tools import (get_constant_from_db_and_time,\n", + " get_dir_creation_date, map_modules_from_folder)\n", + "from iCalibrationDB import Conditions, Constants\n", + "from IPython.display import Latex, display\n", "from matplotlib.colors import LogNorm\n", - "import warnings\n", + "\n", "warnings.filterwarnings('ignore')\n", "\n", "matplotlib.use('agg')\n", @@ -91,23 +91,18 @@ "metadata": {}, "outputs": [], "source": [ - "client = Client(profile=cluster_profile)\n", - "view = client[:]\n", - "view.use_dill()\n", - "\n", + "in_folder = Path(in_folder)\n", + "out_folder = Path(out_folder)\n", + "ped_dir = in_folder / f'r{run:04d}'\n", "h5path = h5path.format(karabo_id, receiver_id)\n", - "ped_dir = \"{}/r{:04d}\".format(in_folder, run)\n", "\n", - "if ped_dir[-1] == \"/\":\n", - " ped_dir = ped_dir[:-1]\n", - "\n", - "if not os.path.exists(out_folder):\n", - " os.makedirs(out_folder)\n", - "elif not overwrite:\n", + "if out_folder.exists() and not overwrite:\n", " raise AttributeError(\"Output path exists! Exiting\")\n", + "else:\n", + " out_folder.mkdir(parents=True, exist_ok=True)\n", "\n", "fp_name_contr = path_template.format(run, karabo_da_control, 0)\n", - "fp_path_contr = '{}/{}'.format(ped_dir, fp_name_contr)\n", + "fp_path_contr = ped_dir / fp_name_contr\n", "\n", "if sequences[0] == -1:\n", " sequences = None\n", @@ -131,7 +126,7 @@ "metadata": {}, "outputs": [], "source": [ - "def check_memoryCells(file_name, path):\n", + "def check_memory_cells(file_name, path):\n", " with h5py.File(file_name, 'r') as f:\n", " t_stamp = np.array(f[path + '/storageCells/timestamp'])\n", " st_cells = np.array(f[path + '/storageCells/value'])\n", @@ -151,8 +146,9 @@ "outputs": [], "source": [ "# set everything up filewise\n", - "mmf = map_modules_from_folder(in_folder, run, path_template, karabo_da, sequences)\n", - "mapped_files, mod_ids, total_sequences, sequences_qm, _ = mmf\n", + "mapped_files, mod_ids, total_sequences, sequences_qm, _ = map_modules_from_folder(\n", + " in_folder, run, path_template, karabo_da, sequences\n", + ")\n", "\n", "print(f\"Processing a total of {total_sequences} sequence files\")\n", "table = []\n", @@ -169,8 +165,9 @@ " headers=[\"#\", \"module\", \"# module\", \"file\"])))\n", "\n", "# restore the queue\n", - "mmf = map_modules_from_folder(in_folder, run, path_template, karabo_da, sequences)\n", - "mapped_files, mod_ids, total_sequences, sequences_qm, _ = mmf" + "mapped_files, mod_ids, total_sequences, sequences_qm, _ = map_modules_from_folder(\n", + " in_folder, run, path_template, karabo_da, sequences\n", + ")" ] }, { @@ -180,7 +177,7 @@ "outputs": [], "source": [ "if not manual_slow_data:\n", - " with h5py.File(fp_path_contr.format(0), 'r') as f:\n", + " with h5py.File(fp_path_contr, 'r') as f:\n", " run_path = h5path_run.format(karabo_id_control, receiver_control_id)\n", " integration_time = float(f[f'{run_path}/exposureTime/value'][()]*1e6)\n", " bias_voltage = int(np.squeeze(f[f'{run_path}/vHighVoltage/value'])[0])\n", @@ -188,26 +185,26 @@ "\n", "control_path = h5path_cntrl.format(karabo_id_control, receiver_control_id)\n", "try:\n", - " this_run_mcells, sc_start = check_memoryCells(fp_path_contr.format(0), control_path)\n", + " this_run_mcells, sc_start = check_memory_cells(fp_path_contr, control_path)\n", " if this_run_mcells == 1:\n", - " memoryCells = 1\n", + " memory_cells = 1\n", " print(f'Dark runs in single cell mode\\n storage cell start: {sc_start:02d}')\n", " else:\n", - " memoryCells = 16\n", + " memory_cells = 16\n", " print(f'Dark runs in burst mode\\n storage cell start: {sc_start:02d}')\n", "except Exception as e:\n", " if \"Unable to open object\" in str(e):\n", " if mem_cells==0:\n", - " memoryCells = 1\n", + " memory_cells = 1\n", " else:\n", - " memoryCells = mem_cells\n", - " print(f'Set memory cells to {memoryCells} as it is not saved in control information.')\n", + " memory_cells = mem_cells\n", + " print(f'Set memory cells to {memory_cells} as it is not saved in control information.')\n", " else:\n", " print(f\"Error trying to access memory cell from contol information: {e}\")\n", "\n", "print(f\"Integration time is {integration_time} us\")\n", "print(f\"Bias voltage is {bias_voltage} V\")\n", - "print(f\"Number of memory cells is {memoryCells}\")" + "print(f\"Number of memory cells is {memory_cells}\")" ] }, { @@ -216,70 +213,63 @@ "metadata": {}, "outputs": [], "source": [ - "condition = Conditions.Dark.jungfrau(memory_cells=memoryCells,\n", - " bias_voltage=bias_voltage,\n", - " integration_time=integration_time)\n", + "condition = Conditions.Dark.jungfrau(\n", + " memory_cells=memory_cells,\n", + " bias_voltage=bias_voltage,\n", + " integration_time=integration_time,\n", + ")\n", "\n", - "def get_constant_for_module(karabo_id, condition, cal_db_interface, creation_time, cal_db_timeout,\n", - " memoryCells, karabo_da):\n", + "def get_constants_for_module(karabo_da: str):\n", " \"\"\" Get calibration constants for given module of Jungfrau\n", - " \n", - " Function contains all includes to be used with ipCluster\n", "\n", - " :param condition: Calibration condition\n", - " :param cal_db_interface: Interface string, e.g. \"tcp://max-exfl016:8015\"\n", - " :param creation_time: Latest time for constant to be created\n", - " :param cal_db_timeout: Timeout for zmq request\n", - " :param: memoryCells: Number of used memory cells\n", - " :param: db_module: Module of Jungfrau, e.g. \"Jungfrau_M035\"\n", - "\n", - " :return: offset_map (offset map), mask (mask of bad pixels), \n", - " gain_map (map of relative gain factors), db_module (name of DB module), \n", + " :return:\n", + " offset_map (offset map),\n", + " mask (mask of bad pixels),\n", + " gain_map (map of relative gain factors),\n", + " db_module (name of DB module),\n", " when (dictionaty: constant - creation time)\n", " \"\"\"\n", - "\n", - " from iCalibrationDB import (ConstantMetaData, Constants, Conditions, Detectors, Versions)\n", - " from cal_tools.tools import get_constant_from_db_and_time\n", - " import numpy as np\n", " \n", " when = {}\n", - "\n", - " #TODO: Remove condition + constant retrieval duplication from notebook \n", - "\n", - " offset_map, when['Offset'] = \\\n", - " get_constant_from_db_and_time(karabo_id, karabo_da,\n", - " Constants.jungfrau.Offset(),\n", - " condition,\n", - " np.zeros((1024, 512, 1, 3)),\n", - " cal_db_interface,\n", - " creation_time=creation_time,\n", - " timeout=cal_db_timeout)\n", - " mask, when['BadPixels'] = \\\n", - " get_constant_from_db_and_time(karabo_id, karabo_da,\n", - " Constants.jungfrau.BadPixelsDark(),\n", - " condition,\n", - " np.zeros((1024, 512, 1, 3)),\n", - " cal_db_interface,\n", - " creation_time=creation_time,\n", - " timeout=cal_db_timeout)\n", - " gain_map, when['Gain'] = \\\n", - " get_constant_from_db_and_time(karabo_id, karabo_da,\n", - " Constants.jungfrau.RelativeGain(),\n", - " condition,\n", - " None,\n", - " cal_db_interface,\n", - " creation_time=creation_time,\n", - " timeout=cal_db_timeout)\n", - "\n", + " retrieval_function = partial(\n", + " get_constant_from_db_and_time,\n", + " karabo_id=karabo_id,\n", + " karabo_da=karabo_da,\n", + " condition=condition,\n", + " cal_db_interface=cal_db_interface,\n", + " creation_time=creation_time,\n", + " timeout=cal_db_timeout,\n", + " )\n", + " offset_map, when[\"Offset\"] = retrieval_function(\n", + " constant=Constants.jungfrau.Offset(), empty_constant=np.zeros((1024, 512, 1, 3))\n", + " )\n", + " mask, when[\"BadPixelsDark\"] = retrieval_function(\n", + " constant=Constants.jungfrau.BadPixelsDark(),\n", + " empty_constant=np.zeros((1024, 512, 1, 3)),\n", + " )\n", + " mask_ff, when[\"BadPixelsFF\"] = retrieval_function(\n", + " constant=Constants.jungfrau.BadPixelsFF(),\n", + " empty_constant=None\n", + " )\n", + " gain_map, when[\"Gain\"] = retrieval_function(\n", + " constant=Constants.jungfrau.RelativeGain(),\n", + " empty_constant=None\n", + " )\n", + "\n", + " # combine masks\n", + " if mask_ff is not None:\n", + " mask |= np.moveaxis(mask_ff, 0, 1)\n", + " \n", " # move from x,y,cell,gain to cell,x,y,gain\n", " offset_map = np.squeeze(offset_map)\n", " mask = np.squeeze(mask)\n", - " if memoryCells > 1:\n", + " \n", + " if memory_cells > 1:\n", " offset_map = np.moveaxis(np.moveaxis(offset_map, 0, 2), 0, 2)\n", " mask = np.moveaxis(np.moveaxis(mask, 0, 2), 0, 2)\n", "\n", " if gain_map is not None:\n", - " if memoryCells > 1:\n", + " if memory_cells > 1:\n", " gain_map = np.moveaxis(np.moveaxis(gain_map, 0, 2), 0, 1)\n", " else:\n", " gain_map = np.squeeze(gain_map)\n", @@ -288,24 +278,19 @@ " return offset_map, mask, gain_map, karabo_da, when\n", "\n", "\n", - "# Retrieve Offset, BadPixels and gain constants for a JungFrau module.\n", - "# Run ip Cluster parallelization over modules\n", - "p = partial(get_constant_for_module, karabo_id, condition, cal_db_interface, \n", - " creation_time, cal_db_timeout, memoryCells)\n", - "\n", - "r = view.map_sync(p, karabo_da)\n", - "#r = list(map(p, karabo_da))\n", + "with multiprocessing.Pool() as pool:\n", + " r = pool.map(get_constants_for_module, karabo_da)\n", "\n", "constants = {}\n", - "for rr in r:\n", - " offset_map, mask, gain_map, k_da, when = rr\n", + "for offset_map, mask, gain_map, k_da, when in r:\n", " print(f'Constants for module {k_da}:')\n", " for const in when:\n", - " print(f'{const} injected at {when[const]}')\n", + " print(f' {const} injected at {when[const]}')\n", + " \n", " if gain_map is None:\n", - " print(\"No gain map found\")\n", + " print(\" No gain map found\")\n", " no_relative_gain = True\n", - " \n", + " \n", " constants[k_da] = (offset_map, mask, gain_map)" ] }, @@ -319,11 +304,9 @@ " \"\"\" Copy and sanitize data in `infile` that is not touched by `correctLPD`\n", " \"\"\"\n", "\n", - " if h5base.startswith(\"/\"):\n", - " h5base = h5base[1:]\n", + " h5base = h5base.lstrip(\"/\")\n", " dont_copy = [\"adc\", ]\n", - " dont_copy = [h5base+\"/{}\".format(do)\n", - " for do in dont_copy]\n", + " dont_copy = [f'{h5base}/{dnc}' for dnc in dont_copy]\n", "\n", " def visitor(k, item):\n", " if k not in dont_copy:\n", @@ -344,11 +327,7 @@ "outputs": [], "source": [ "# Correct a chunk of images for offset and gain\n", - "def correct_chunk(offset_map, mask, gain_map, memoryCells, no_relative_gain, inp):\n", - " import numpy as np\n", - " import copy\n", - " import h5py\n", - "\n", + "def correct_chunk(offset_map, mask, gain_map, memory_cells, no_relative_gain, inp):\n", " fim_data = None\n", " gim_data = None\n", " rim_data = None\n", @@ -360,13 +339,13 @@ " g[g==3] = 2\n", "\n", " if copy_sample and ind==0:\n", - " if memoryCells==1:\n", + " if memory_cells==1:\n", " rim_data = np.squeeze(copy.copy(d))\n", " else:\n", " rim_data = np.squeeze(copy.copy(d[:,0,...]))\n", "\n", " # Select memory cells\n", - " if memoryCells>1:\n", + " if memory_cells>1:\n", " m[m>16] = 0\n", " offset_map_cell = offset_map[m,...]\n", " mask_cell = mask[m,...]\n", @@ -380,7 +359,7 @@ "\n", " # Gain correction\n", " if not no_relative_gain:\n", - " if memoryCells>1:\n", + " if memory_cells>1:\n", " gain_map_cell = gain_map[m,...]\n", " else:\n", " gain_map_cell = gain_map\n", @@ -391,7 +370,7 @@ "\n", " # Store sample of data for plotting\n", " if copy_sample and ind==0:\n", - " if memoryCells==1:\n", + " if memory_cells==1:\n", " fim_data = np.squeeze(copy.copy(d))\n", " gim_data = np.squeeze(copy.copy(g))\n", " msk_data = np.squeeze(copy.copy(msk))\n", @@ -403,30 +382,37 @@ " except Exception as e:\n", " err = e\n", "\n", - " return ind, d, msk, rim_data, fim_data, gim_data, msk_data, err\n", - "\n", + " return ind, d, msk, rim_data, fim_data, gim_data, msk_data, err" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ "fim_data = {}\n", "gim_data = {}\n", "rim_data = {}\n", "msk_data = {}\n", "\n", + "# For each module, chunks will be processed by pool\n", + "pool = multiprocessing.Pool()\n", "# Loop over modules\n", - "for i, key in enumerate(mapped_files):\n", + "for local_karabo_da, mapped_files_module in zip(karabo_da, mapped_files.values()):\n", + " h5path_f = h5path.format(int(local_karabo_da[-2:]))\n", " # Loop over sequences for given module\n", - " for k, f in enumerate(list(mapped_files[key].queue)):\n", - " \n", - " offset_map, mask, gain_map = constants[karabo_da[i]]\n", - " h5path_f = h5path.format(int(karabo_da[i][-2:]))\n", + " for sequence_file_number, sequence_file in enumerate(mapped_files_module.queue):\n", + " sequence_file = Path(sequence_file)\n", + " offset_map, mask, gain_map = constants[local_karabo_da]\n", " \n", - " with h5py.File(f, 'r') as infile:\n", - " \n", + " with h5py.File(sequence_file, 'r') as infile:\n", " # The processed files are saved here in a folder with the run name.\n", - " out_file = \"{}/{}\".format(out_folder, f.split(\"/\")[-1])\n", - " out_file = out_file.replace(\"RAW\", \"CORR\")\n", - " print(f'Process file: {f}, with path {h5path_f}')\n", + " out_filename = out_folder / sequence_file.name.replace(\"RAW\", \"CORR\")\n", + " print(f'Process file: {sequence_file}, with path {h5path_f}')\n", " try:\n", - " with h5py.File(out_file, \"w\") as ofile:\n", - " copy_and_sanitize_non_cal_data(infile, ofile, h5path_f)\n", + " with h5py.File(out_filename, \"w\") as outfile:\n", + " copy_and_sanitize_non_cal_data(infile, outfile, h5path_f)\n", "\n", " oshape = infile[h5path_f+\"/adc\"].shape\n", " print(f'Data shape: {oshape}')\n", @@ -434,18 +420,18 @@ " raise ValueError(f\"No image data: shape {oshape}\")\n", " # Chunk always contains >= 1 complete image\n", " chunk_shape = (chunk_size_idim, 1) + oshape[-2:]\n", - " ddset = ofile.create_dataset(h5path_f+\"/adc\",\n", - " oshape,\n", - " chunks=chunk_shape,\n", - " dtype=np.float32)\n", - "\n", - " mskset = ofile.create_dataset(h5path_f+\"/mask\",\n", - " oshape,\n", - " chunks=chunk_shape,\n", - " dtype=np.uint32,\n", - " compression=\"gzip\", compression_opts=1, shuffle=True)\n", - "\n", - " # Run ip Cluster parallelization over chunks of images\n", + "\n", + " ddset = outfile.create_dataset(h5path_f+\"/adc\",\n", + " oshape,\n", + " chunks=chunk_shape,\n", + " dtype=np.float32)\n", + "\n", + " mskset = outfile.create_dataset(h5path_f+\"/mask\",\n", + " oshape,\n", + " chunks=chunk_shape,\n", + " dtype=np.uint32,\n", + " compression=\"gzip\", compression_opts=1, shuffle=True)\n", + " # Parallelize over chunks of images\n", " inp = []\n", " max_ind = oshape[0]\n", " ind = 0\n", @@ -464,23 +450,21 @@ " else:\n", " m = None\n", " print(f'To process: {d.shape}')\n", - " inp.append((d,g,m, ind, k==0))\n", + " inp.append((d, g, m, ind, sequence_file_number==0))\n", " ind += chunk_size\n", "\n", " print('Preparation time: ', time.time() - ts)\n", " ts = time.time()\n", "\n", " print(f'Run {len(inp)} processes')\n", - " p = partial(correct_chunk, offset_map, mask, gain_map, memoryCells, no_relative_gain)\n", + " p = partial(correct_chunk, offset_map, mask, gain_map, memory_cells, no_relative_gain)\n", "\n", - " r = view.map_sync(p, inp)\n", - " # Used for debugging correct chunk\n", - " #r = list(map(p, inp))\n", + " r = pool.map(p, inp)\n", " \n", - " if k==0:\n", + " if sequence_file_number == 0:\n", " (_,_,_,\n", - " rim_data[karabo_da[i]], fim_data[karabo_da[i]],\n", - " gim_data[karabo_da[i]], msk_data[karabo_da[i]], _) = r[0]\n", + " rim_data[local_karabo_da], fim_data[local_karabo_da],\n", + " gim_data[local_karabo_da], msk_data[local_karabo_da], _) = r[0]\n", "\n", " print('Correction time: ', time.time() - ts)\n", " ts = time.time()\n", @@ -495,7 +479,8 @@ "\n", " print('Saving time: ', time.time() - ts)\n", " except Exception as e:\n", - " print(f\"Error: {e}\")" + " print(f\"Error: {e}\")\n", + "pool.close()" ] }, { @@ -513,8 +498,7 @@ " ax.set_ylabel(y_axis)\n", " ax.set_title(title)\n", " cb = fig.colorbar(im)\n", - " cb.set_label(\"Counts\")\n", - " " + " cb.set_label(\"Counts\")" ] }, { @@ -524,8 +508,10 @@ "outputs": [], "source": [ "for mod in rim_data: \n", - " h, ex, ey = np.histogram2d(rim_data[mod].flatten(), gim_data[mod].flatten(),\n", - " bins=[100, 4], range=[[0, 10000], [0,4]])\n", + " h, ex, ey = np.histogram2d(rim_data[mod].flatten(),\n", + " gim_data[mod].flatten(),\n", + " bins=[100, 4],\n", + " range=[[0, 10000], [0, 4]])\n", " do_2d_plot(h, (ex, ey), \"Signal (ADU)\", \"Gain Bit Value\", f'Module {mod}')" ] },