diff --git a/notebooks/Jungfrau/Jungfrau_dark_analysis_all_gains_burst_mode_NBC.ipynb b/notebooks/Jungfrau/Jungfrau_dark_analysis_all_gains_burst_mode_NBC.ipynb index daa5fbf8a924a098564c4c0558e98ac5a9e4622a..15de49b6f53b194b7a6246312bef7a4bb6810276 100644 --- a/notebooks/Jungfrau/Jungfrau_dark_analysis_all_gains_burst_mode_NBC.ipynb +++ b/notebooks/Jungfrau/Jungfrau_dark_analysis_all_gains_burst_mode_NBC.ipynb @@ -43,7 +43,7 @@ "badpixel_threshold_sigma = 5. # bad pixels defined by values outside n times this std from median\n", "offset_abs_threshold_low = [1000, 10000, 10000] # absolute bad pixel threshold in terms of offset, lower values\n", "offset_abs_threshold_high = [8000, 15000, 15000] # absolute bad pixel threshold in terms of offset, upper values\n", - "max_trains = 0 # Maximum trains to process darks. Set to 0 to process all available train images.\n", + "max_trains = 1000 # Maximum trains to process darks. Set to 0 to process all available train images. 1000 trains is enough resolution to create the dark constants\n", "min_trains = 1 # Minimum number of trains that should be available to process dark constants. Default 1.\n", "manual_slow_data = False # if true, use manually entered bias_voltage and integration_time values\n", "time_limits = 0.025 # to find calibration constants later on, the integration time is allowed to vary by 0.5 us\n", @@ -71,12 +71,10 @@ }, "outputs": [], "source": [ - "import glob\n", "import gc\n", "import os\n", "import warnings\n", "from logging import warning\n", - "from pathlib import Path\n", "warnings.filterwarnings('ignore')\n", "\n", "import matplotlib\n", @@ -95,7 +93,6 @@ "from XFELDetAna.plotting.heatmap import heatmapPlot\n", "from XFELDetAna.plotting.histogram import histPlot\n", "from cal_tools import jungfraulib, step_timing\n", - "from cal_tools.ana_tools import save_dict_to_hdf5\n", "from cal_tools.enums import BadPixels, JungfrauGainMode\n", "from cal_tools.tools import (\n", " get_dir_creation_date,\n", @@ -313,31 +310,7 @@ "metadata": {}, "outputs": [], "source": [ - "context = psh.context.ThreadContext(num_workers=multiprocessing.cpu_count())" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def calculate_parallel_threads(n_trains, memory_cells):\n", - " \"\"\"Calculate the maximum number of parallel threads to use.\n", - " If enough memory is free, each cell constant can be computed in parallel.\n", - " Otherwise the parallel threads are reduced based on estimated memory consumption.\n", - " If the n_trains exceed the free memory of one processes,\n", - " number of trains to process will be reduced.\n", - " \"\"\"\n", - " reduced_trains = n_trains\n", - " available_memory = (psutil.virtual_memory().available >> 30) - (16 * n_trains * (1024 * 512 * 3 + 1) // 1e9)\n", - "\n", - " parallel_threads = available_memory // ((1024 * 512 * 5 * n_trains) // 1e9)\n", - " if parallel_threads < 1:\n", - " reduced_trains = (available_memory // ((1024 * 512 * 4 / 1e9))) - 4\n", - " warning(f\"Reducing the processed trains from {n_trains} to {reduced_trains} to fit the free memory.\")\n", - "\n", - " return max(min(memory_cells, int(parallel_threads)), 1), int(reduced_trains)" + "context = psh.context.ThreadContext(num_workers=memory_cells)" ] }, { @@ -370,7 +343,8 @@ "\n", " print(f\"\\n- Instrument data path for {mod} is {instrument_src}.\")\n", "\n", - " offset_map[mod] = context.alloc(shape=(sensor_size+(memory_cells, 3)), fill=0, dtype=np.float32)\n", + " offset_map[mod] = context.alloc(\n", + " shape=(sensor_size+(memory_cells, 3)), fill=0, dtype=np.float32)\n", " noise_map[mod] = context.alloc(like=offset_map[mod], fill=0)\n", " bad_pixels_map[mod] = context.alloc(like=offset_map[mod], dtype=np.uint32, fill=0)\n", "\n", @@ -422,8 +396,6 @@ " f\"Less than {min_trains} trains are available in RAW data.\"\n", " \" Not enough data to process darks.\")\n", "\n", - " parallel_threads, n_trains = calculate_parallel_threads(n_trains, memory_cells)\n", - "\n", " # Select only requested number of images to process darks.\n", " instr_dc = instr_dc.select_trains(np.s_[:n_trains])\n", " images = np.transpose(\n", @@ -447,12 +419,10 @@ " acelltable[1:] = 255\n", "\n", " # Calculate offset and noise maps\n", - " context = psh.context.ThreadContext(num_workers=parallel_threads)\n", " context.map(process_cell, range(memory_cells))\n", " del images\n", " del acelltable\n", " del gain_vals\n", - " gc.collect();\n", " step_timer.done_step(f'Creating Offset and noise constants for a module.')" ] }, diff --git a/setup.py b/setup.py index 252d1d5aefc86b525fa233c4090bff16f7198963..192a5d57e1cd7176d9156bafe5b490077488d7d4 100644 --- a/setup.py +++ b/setup.py @@ -91,7 +91,6 @@ install_requires = [ "pasha==0.1.1", "prettytable==0.7.2", "princess==0.5", - "psutil==5.9.4", "pypandoc==1.4", "python-dateutil==2.8.2", "pyyaml==5.3",