diff --git a/notebooks/REMI/REMI_Digitize_and_Transform.ipynb b/notebooks/REMI/REMI_Digitize_and_Transform.ipynb index 412de0670bb17fbdc67c5392608bf67d0607eb43..5dae80017da0519cea7136740ac5334546911389 100644 --- a/notebooks/REMI/REMI_Digitize_and_Transform.ipynb +++ b/notebooks/REMI/REMI_Digitize_and_Transform.ipynb @@ -13,12 +13,6 @@ "\n", "calib_config_path = '/gpfs/exfel/exp/SQS/202101/p002535/usr/config_board2+4.yaml' # Path to correction and transform configuration\n", "\n", - "# These parameters are required by xfel-calibrate but ignored in this notebook.\n", - "cycle = '' # Proposal cycle, currently not used.\n", - "cal_db_timeout = 0 # Calibration DB timeout, currently not used.\n", - "cal_db_interface = 'foo' # Calibration DB interface, currently not used.\n", - "karabo_da = 'bar' # Karabo data aggregator name, currently not used\n", - "\n", "# Output parameters.\n", "karabo_id = 'SQS_REMI_DLD6' # Karabo device ID root for virtual output device.\n", "proposal = '' # Proposal, leave empty for auto detection based on in_folder\n", @@ -26,11 +20,6 @@ "out_seq_len = 5000 # Number of trains per sequence file in output.\n", "det_device_id = '{karabo_id}/DET/{det_name}' # Karabo device ID for virtual output device.\n", "det_output_key = 'output' # Pipeline name for fast data output.\n", - "save_raw_triggers = True # Whether to save trigger position in files.\n", - "save_raw_edges = True # Whether to save digitized edge positions in files.\n", - "save_raw_amplitudes = True # Whether to save analog pulse amplitudes in files.\n", - "save_rec_signals = True # Whether to save reconstructed signals (u1-w2, mcp) in files.\n", - "save_rec_hits = True # Whether to save reoncstructed hits (x,y,t,m) in files.\n", "chunks_triggers = [500] # HDF chunk size for triggers.\n", "chunks_edges = [500, 7, 50] # HDF chunk size for edges.\n", "chunks_amplitudes = [500, 7, 50] # HDF chunk size for amplitudes.\n", @@ -65,7 +54,19 @@ "mp_find_triggers = 0.5 # Parallelization for finding triggers.\n", "mp_find_edges = 0.5 # Parallelization for digitizing analog signal.\n", "mt_avg_trace = 2 # Parallelization for trace averaging.\n", - "mp_rec_hits = 1.0 # Parallelization for hit reconstruction." + "mp_rec_hits = 1.0 # Parallelization for hit reconstruction.\n", + "\n", + "# DEPRECATED AND IGNORED\n", + "# Left for compatibility with webservice or legacy configuration.\n", + "cycle = '' # Proposal cycle, passed by webservice but not used.\n", + "karabo_da = 'bar' # Karabo data aggregator name, passed by webservice but not used\n", + "cal_db_timeout = 0 # Calibration DB timeout, passed by webservice but not used.\n", + "cal_db_interface = 'foo' # Calibration DB interface, passed by webservice but not used.\n", + "save_raw_triggers = True # Whether to save trigger position in files, ignored and always enabled.\n", + "save_raw_edges = True # Whether to save digitized edge positions in files, ignored and always enabled.\n", + "save_raw_amplitudes = True # Whether to save analog pulse amplitudes in files, ignored and always enabled.\n", + "save_rec_signals = True # Whether to save reconstructed signals (u1-w2, mcp) in files, ignored and always enabled.\n", + "save_rec_hits = True # Whether to save reoncstructed hits (x,y,t,m) in files, ignored and always enabled." ] }, { @@ -82,8 +83,12 @@ "import numpy as np\n", "import matplotlib.pyplot as plt\n", "from matplotlib.colors import LogNorm\n", + "from matplotlib.patches import Circle\n", "from threadpoolctl import threadpool_limits\n", "\n", + "import tabulate\n", + "from IPython.display import Latex, Markdown, display\n", + "\n", "import h5py\n", "\n", "import pasha as psh\n", @@ -132,9 +137,26 @@ "\n", "remi = Analysis(calib_config_path, use_hex=not quad_anode)\n", "\n", + "# Collect required sources and keys required.\n", + "sourcekeys = set()\n", + "for det_name in remi['detector'].keys():\n", + " sourcekeys |= remi.get_detector_sourcekeys(det_name)\n", + " \n", + "if not reconstruct_ppt:\n", + " sourcekeys.add((ppt_source, 'data.bunchPatternTable'))\n", + "\n", "with timing('open_run'):\n", - " dc = remi.prepare_dc(RunDirectory(Path(in_folder) / f'r{run:04d}', inc_suspect_trains=True),\n", - " require_ppt=not reconstruct_ppt)" + " # Initial opening of input data.\n", + " base_dc = RunDirectory(Path(in_folder) / f'r{run:04d}', inc_suspect_trains=True)\n", + " \n", + "with timing('select_data'):\n", + " # Filter down to those trains with data for all required sources.\n", + " filter_run = base_dc.select(sourcekeys, require_all=True)\n", + "\n", + "# Re-select entire data collection to the trains with data.\n", + "dc = base_dc.select_trains(by_id[filter_run.train_ids])\n", + "base_dc = None\n", + "filter_run = None" ] }, { @@ -144,21 +166,110 @@ "# Transformation parameters" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Additional parameters through the user-side configuration file for analog channels and detector settings. Parameters that are deprecated and ignored, but present in the file, are excluded." + ] + }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "def print_leaf(leaf, indent=0):\n", + "def print_leaf(leaf, indent=0, ignored_keys={}):\n", " for key, value in leaf.items():\n", + " if key in ignored_keys:\n", + " continue\n", + " \n", " if isinstance(value, dict):\n", " print(indent * 4 * ' ' + key)\n", - " print_leaf(value, indent=indent+1)\n", + " print_leaf(value, indent=indent+1, ignored_keys=ignored_keys)\n", " else:\n", " print(indent * 4 * ' ' + f'{key}: {value}')\n", - " \n", - "print_leaf(remi.tree)" + "\n", + "print(calib_config_path.resolve())\n", + "print_leaf(remi.tree, ignored_keys={'instrument', 'trigger'})" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "discr_table = []\n", + "\n", + "if quad_anode:\n", + " signals = ['u1', 'u2', 'v1', 'v2', 'mcp']\n", + " wire_angles = [np.pi*(3/4), np.pi*(1/4)]\n", + "else:\n", + " signals = ['u1', 'u2', 'v1', 'v2', 'w1', 'w2', 'mcp']\n", + " wire_angles = [np.pi*(3/4), np.pi*(3/4+1/3), np.pi*(3/4+2/3)]\n", + " \n", + "N = 15\n", + "shifts = np.linspace(-0.4, 0.4, N)\n", + "\n", + "for det_name, cur_det in remi['detector'].items():\n", + " fig = plt.figure(num=f'wiring_{det_name}', figsize=(9, 5))\n", + " fig.text(0.5, 1.0, det_name, ha='center', va='top', size='xx-large')\n", + " ax = fig.add_axes([0.0, 0.0, 1.0, 1.0])\n", + " ax.set_axis_off()\n", + "\n", + " ax.add_patch(Circle((0,0), 1, ec='black', fc='none', lw=2))\n", + " ax.set_xlim(-1.5*(9/5), 1.5*(9/5))\n", + " ax.set_ylim(-1.5, 1.5)\n", + "\n", + " _, params = remi.get_discriminator(cur_det['channels'])\n", + " discr_header = params[0].keys()\n", + "\n", + " for channel_idx in range(len(signals)):\n", + " index = cur_det['indices'].index(channel_idx)\n", + " discr_table.append((det_name, signals[channel_idx],\n", + " cur_det['channels'][index],\n", + " remi['digitizer']['discriminator'],\n", + " *params[index].values()))\n", + "\n", + " for j, start_angle in enumerate(wire_angles):\n", + " x1 = np.cos(start_angle+np.pi/4)\n", + " x2 = np.cos(start_angle+5*np.pi/4)\n", + "\n", + " y1 = np.sin(start_angle+np.pi/4)\n", + " y2 = np.sin(start_angle+5*np.pi/4)\n", + "\n", + " channel = cur_det['channels'][cur_det['indices'].index(2*j)]\n", + " ax.text(x1*1.2, y1*1.2, f'{signals[2*j]}\\n{channel}',\n", + " c=f'C{j}', fontsize='xx-large', va='center', ha='center')\n", + "\n", + " channel = cur_det['channels'][cur_det['indices'].index(2*j+1)]\n", + " ax.text(x2*1.2, y2*1.2, f'{signals[2*j+1]}\\n{channel}',\n", + " c=f'C{j}', fontsize='xx-large', va='center', ha='center')\n", + "\n", + " for k, shift in enumerate(shifts):\n", + " x1 = np.cos(start_angle+np.pi/4+shifts[k])\n", + " x2 = np.cos(start_angle+5*np.pi/4+shifts[N-k-1])\n", + "\n", + " y1 = np.sin(start_angle+np.pi/4+shifts[k])\n", + " y2 = np.sin(start_angle+5*np.pi/4+shifts[N-k-1])\n", + "\n", + " ax.plot([x1, x2], [y1, y2], c=f'C{j}')\n", + "\n", + " mcp_angle = np.pi/6\n", + " channel = cur_det['channels'][cur_det['indices'].index(6)]\n", + " ax.text(1.4*np.cos(mcp_angle), 1.2*np.sin(mcp_angle), f'mcp\\n{channel}',\n", + " c='k', fontsize='xx-large', va='center', ha='center')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "display(Latex(tabulate.tabulate(\n", + " discr_table, tablefmt='latex', headers=['', '', '', 'discriminator', *discr_header])))" ] }, { @@ -245,7 +356,7 @@ " \n", " ppt_data = FakeKeyDataFunctor(fake_ppt, dc.train_ids)\n", " \n", - " fig, ax = plt.subplots(num=99, figsize=(9, 6), clear=True, ncols=1, nrows=1)\n", + " fig, ax = plt.subplots(num='reconstructed_ppt_triggers', figsize=(9, 6), clear=True, ncols=1, nrows=1)\n", "\n", " ax.set_title('Edge trigger signal')\n", " ax.plot(trigger_trace, lw=1, label=f'Mean {trigger_edge_channel} trace')\n", @@ -257,7 +368,8 @@ " ax.legend()\n", " \n", "else:\n", - " ppt_data = dc[ppt_source, 'data.bunchPatternTable']" + " ppt_data = dc[ppt_source, 'data.bunchPatternTable']\n", + " print(f'Pulse pattern entries for {(ppt_data.data_counts() > 0).sum()} trains')" ] }, { @@ -320,6 +432,7 @@ " \n", " if trailing_trigger:\n", " # Add a single count to every train for trailing trigger.\n", + " warning('Trailing trigger active, all pulse counts are one higher than expected')\n", " pulse_counts += 1\n", "\n", " # Compute offsets based on pulse counts.\n", @@ -338,7 +451,7 @@ }, "outputs": [], "source": [ - "fig, ax = plt.subplots(num=1, ncols=1, nrows=1, figsize=(9, 4), clear=True)\n", + "fig, ax = plt.subplots(num='pulse_counts', ncols=1, nrows=1, figsize=(9, 4), clear=True)\n", "\n", "ax.set_title('Pulse count')\n", "ax.plot(dc.train_ids, pulse_counts, lw=1)\n", @@ -353,9 +466,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Find triggers\n", - "\n", - "The trigger defines the boundary of a pulse on the digitizer trace, which is stored per train." + "### Find triggers" ] }, { @@ -466,7 +577,7 @@ "metadata": {}, "outputs": [], "source": [ - "fig, (lx, rx) = plt.subplots(num=2, ncols=2, nrows=1, figsize=(9, 4), clear=True,\n", + "fig, (lx, rx) = plt.subplots(num='trigger_positions', ncols=2, nrows=1, figsize=(9, 4), clear=True,\n", " gridspec_kw=dict(top=0.75))\n", "\n", "# Display ~400 pulses or 10 trains, whatever is lower\n", @@ -511,6 +622,13 @@ "pass" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The trigger defines the boundary of each pulse on the digitizer trace acquired by train. The starting position in samples of each found trigger is shown for the first few trains in detail on the left and all trains on the right." + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -538,7 +656,7 @@ "\n", "det_data = {}\n", "\n", - "for i, (det_name, det) in enumerate(remi['detector'].items()):\n", + "for det_name, det in remi['detector'].items():\n", " det_sourcekeys = remi.get_detector_sourcekeys(det_name)\n", " det_get_traces = remi.get_traces_getter(det_name)\n", " trace_len = dc[next(iter(det_sourcekeys))].entry_shape[0]\n", @@ -556,7 +674,8 @@ " source_name = remi['digitizer']['source']\n", " bl_start, bl_stop, _ = remi.get_baseline_limits(trace_len)\n", " bl_sym = remi['digitizer']['baseline_symmetry']\n", - " time_cal = remi.get_time_calibration()\n", + " \n", + " time_cal = 1e9 / (2 * remi['digitizer']['clock_factor'] * (1.3e9 / 288))\n", " \n", " traces_corr = np.empty((7, trace_len), dtype=np.float64)\n", " baselines = np.empty(bl_sym, dtype=np.float64)\n", @@ -597,7 +716,7 @@ " if not np.isfinite(edges).any():\n", " warning(f'No edges found for {det_name}')\n", " \n", - " fig, (ux, bx) = plt.subplots(num=110+i, ncols=1, nrows=2, figsize=(9.5, 8), clear=True,\n", + " fig, (ux, bx) = plt.subplots(num=f'digitize_result_{det_name}', ncols=1, nrows=2, figsize=(9.5, 8), clear=True,\n", " gridspec_kw=dict(left=0.1, right=0.98, top=0.98, bottom=0.1, hspace=0.25))\n", " \n", " fig.text(0.02, 0.98, det_name.upper(), rotation=90, ha='left', va='top', size='x-large')\n", @@ -606,7 +725,7 @@ "\n", " for edge_idx, edge_name in enumerate(['u1', 'u2', 'v1', 'v2', 'w1', 'w2', 'mcp']):\n", " n, _, _ = ux.hist(finite_flattened_slice(amplitudes, np.s_[:, edge_idx, :]),\n", - " bins=1000, range=(0, 2048), histtype='step', lw=1,\n", + " bins=1000, range=(0, 4096), histtype='step', lw=1,\n", " color=f'C{edge_idx}' if edge_idx < 6 else 'k', label=edge_name)\n", " max_num = max(max_num, n.max())\n", " \n", @@ -643,6 +762,17 @@ " }" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The analog signal is digitized into discrete edges using a fast timing discriminator. The result of this operation is available in files in the `raw.triggers` dataset.\n", + "\n", + "The pulse height distribution is an integral view about the chosen digitization thresholds. For more detail, please refer to the spectral pulse height distributions further below.\n", + "\n", + "The fractional edge distribution visualizes the interpolated component of edge positions, i.e. between discrete digitizer samples. This should in general be flat, in particular a convex shape indicates poor interpolation due to too fast rise times." + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -656,8 +786,8 @@ "metadata": {}, "outputs": [], "source": [ - "for i, det_name in enumerate(remi['detector'].keys()):\n", - " fig, axs = plt.subplots(num=10+i, nrows=7, figsize=(9.5, 8), clear=True,\n", + "for det_name in remi['detector'].keys():\n", + " fig, axs = plt.subplots(num=f'global_average_{det_name}', nrows=7, figsize=(9.5, 8), clear=True,\n", " gridspec_kw=dict(left=0.1, right=0.98, top=0.98, bottom=0.1))\n", " fig.text(0.02, 0.98, det_name.upper(), rotation=90, ha='left', va='top', size='x-large')\n", "\n", @@ -683,10 +813,10 @@ "metadata": {}, "outputs": [], "source": [ - "for i, det_name in enumerate(remi['detector'].keys()):\n", + "for det_name in remi['detector'].keys():\n", " edges = det_data[det_name]['edges']\n", " \n", - " fig = plt.figure(num=100+i, figsize=(9.5, 8))\n", + " fig = plt.figure(num=f'edge_samples_{det_name}', figsize=(9.5, 8))\n", " grid = fig.add_gridspec(ncols=2, nrows=4, left=0.1, right=0.98, top=0.98, bottom=0.1)\n", "\n", " fig.text(0.02, 0.98, det_name.upper(), rotation=90, ha='left', va='top', size='x-large')\n", @@ -747,10 +877,11 @@ }, "outputs": [], "source": [ - "for i, det_name in enumerate(remi['detector'].keys()):\n", - " fig = plt.figure(num=20+i, figsize=(9.5, 6))\n", + "for det_name in remi['detector'].keys():\n", + " fig = plt.figure(num=f'digitized_spectra_{det_name}', figsize=(9.5, 6))\n", " \n", " edges = det_data[det_name]['edges']\n", + " amplitudes = det_data[det_name]['amplitudes']\n", " \n", " min_edge = np.nanmin(edges)\n", " max_edge = np.nanmax(edges)\n", @@ -810,6 +941,63 @@ "pass" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Spectral pulse height distributions" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for det_name in remi['detector'].keys():\n", + " fig = plt.figure(num=f'spectral_pulse_heights_{det_name}', figsize=(9.5, 12.0))\n", + " grid = fig.add_gridspec(ncols=2, nrows=4, left=0.08, right=0.98, top=0.95, hspace=0.3)\n", + " fig.text(0.02, 0.98, det_name.upper(), rotation=90, ha='left', va='top', size='x-large')\n", + " \n", + " edges = det_data[det_name]['edges']\n", + " amplitudes = det_data[det_name]['amplitudes']\n", + " \n", + " min_edge = np.nanmin(edges)\n", + " max_edge = np.nanmax(edges)\n", + " \n", + " max_amplitude = np.nanmax(amplitudes)\n", + "\n", + " for edge_idx, edge_name in enumerate(['u1', 'u2', 'v1', 'v2', 'w1', 'w2', 'mcp']):\n", + " if edge_idx < 6:\n", + " row = 1 + edge_idx // 2\n", + " col = edge_idx % 2\n", + " tof_bins = int((max_edge - min_edge) // 20)\n", + " else:\n", + " row = 0\n", + " col = np.s_[:]\n", + " tof_bins = int((max_edge - min_edge) // 10)\n", + "\n", + " ax = fig.add_subplot(grid[row, col])\n", + " ax.set_title(f'Spectral pulse amplitudes: {edge_name}')\n", + "\n", + " flat_edges = finite_flattened_slice(edges, np.s_[:, edge_idx, :])\n", + " flat_amplitudes = finite_flattened_slice(amplitudes, np.s_[:, edge_idx, :])\n", + " ax.hist2d(flat_edges, flat_amplitudes,\n", + " bins=[tof_bins, 512], norm=LogNorm(),\n", + " range=[[min_edge, max_edge], [0, max_amplitude]])\n", + " \n", + " if edge_idx == 6:\n", + " ax.set_ylabel('Pulse height')\n", + " pass" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "A more detailed view into the distribution of pulse heights as a function of TOF, e.g. to indicate whether the spectrometer transmission may depend on the kinetic energy and/or (in the case of ions) mass." + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -825,7 +1013,7 @@ }, "outputs": [], "source": [ - "for i, det_name in enumerate(remi['detector'].keys()):\n", + "for det_name in remi['detector'].keys():\n", " edges = det_data[det_name]['edges']\n", " \n", " sort = remi.get_dld_sorter(det_name)\n", @@ -841,7 +1029,8 @@ " \n", " signals, sums = remi.get_signals_and_sums(edges, indices=sort.channel_indices, sum_shifts=sum_shifts,\n", " mask=is_valid)\n", - " fig = plot_detector_diagnostics(signals=signals, sums=sums, fig_num=30+i, im_scale=1.5,\n", + " fig = plot_detector_diagnostics(signals=signals, sums=sums,\n", + " fig_num=f'diagnostics_{det_name}', im_scale=1.5,\n", " sum_range=max(sort.uncorrected_time_sum_half_widths),\n", " sorter=sort)\n", " fig.text(0.02, 0.98, det_name.upper() + ' before corrections', rotation=90, ha='left', va='top', size='x-large')\n", @@ -852,13 +1041,27 @@ " sums = np.full((n_masked, 3), np.nan, dtype=np.float64)\n", "\n", " sort.correct(edges[is_valid], signals, sums)\n", - " fig = plot_detector_diagnostics(signals=signals, sums=sums, fig_num=40+i, im_scale=1.5,\n", + " fig = plot_detector_diagnostics(signals=signals, sums=sums,\n", + " fig_num=f'corr_diagnostics_{det_name}', im_scale=1.5,\n", " sum_range=max(sort.uncorrected_time_sum_half_widths),\n", " sorter=sort)\n", " fig.text(0.02, 0.98, det_name.upper() + ' after corrections', rotation=90, ha='left', va='top', size='x-large')\n", "pass" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Overview of initial detector signal correlations before actual hit reconstruction takes place. Only the firsts edge on each channel occuring for each trigger is included, if their times are compatible with a rough time sum window.\n", + "\n", + "* The top row contains the spectrum of time differences on each wire in temporal coordinates on the left and spatial coordinates on the right (according to configured scale factors).\n", + "* The middle row depicts time sums, first integrated and then as a function of time difference. The time sum should generally be somewhat constant, a spectrum-like appearance indicates wire ends have been swapped entirely.\n", + "* [HEX-only] The bottom row shows the detector image for each combination of wires based on this limited dataset. There should be no deformations or rotations in any of the wire pairs, else likely channels are misassigned.\n", + "\n", + "The plot occurs twice if signal-level corrections for time sum or position are enabled." + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -902,7 +1105,7 @@ "metadata": {}, "outputs": [], "source": [ - "fig, ax = plt.subplots(num=50+i, figsize=(9.5, 4), ncols=1, clear=True,\n", + "fig, ax = plt.subplots(num='hit_count_per_trigger', figsize=(9.5, 4), ncols=1, clear=True,\n", " gridspec_kw=dict(top=0.92, right=0.98, left=0.05, bottom=0.12))\n", " \n", "max_num_hits = 0.0\n", @@ -934,7 +1137,7 @@ "\n", "* `0`: All six anode signals and the corresponding MCP signal were found.\n", "* `4`: One signal on layer `u` is missing, all other signals for this event were found.\n", - "* `18`: Only one anode signal on each layer was found and the MCP signal is missing. There is no way to check whether this combination of signals is actually valid.\n", + "* `18`: Only one anode signal on each layer was found and the MCP signal is missing. There is no way to check whether this combination of signals is actually valid based on the detector data alone.\n", "\n", "| Method | `u+v+w +mcp` |\n", "| - | - |\n", @@ -971,10 +1174,10 @@ }, "outputs": [], "source": [ - "for i, det_name in enumerate(remi['detector'].keys()):\n", + "for det_name in remi['detector'].keys():\n", " hits = det_data[det_name]['hits']\n", " \n", - " fig, ax = plt.subplots(num=60+i, figsize=(9.5, 5), ncols=1, clear=True,\n", + " fig, ax = plt.subplots(num=f'reconstruction_methods_{det_name}', figsize=(9.5, 5), ncols=1, clear=True,\n", " gridspec_kw=dict(left=0.08, right=0.91, top=0.8))\n", " \n", " fig.text(0.02, 0.98, det_name.upper(), rotation=90, ha='left', va='top', size='x-large')\n", @@ -1050,34 +1253,41 @@ }, "outputs": [], "source": [ - "for i, det_name in enumerate(remi['detector'].keys()):\n", + "for det_name in remi['detector'].keys():\n", " flat_hits = det_data[det_name]['hits'].reshape(-1)\n", " flat_hits = flat_hits[np.isfinite(flat_hits[:]['x'])]\n", " flat_hits = flat_hits[flat_hits['m'] <= 10]\n", - " \n", - " fig = plt.figure(num=70+i, figsize=(9, 13.5))\n", + "\n", + " fig = plt.figure(num=f'detector_results_{det_name}', figsize=(9, 10.5))\n", " \n", " fig.text(0.02, 0.98, det_name.upper(), rotation=90, ha='left', va='top', size='x-large')\n", " fig.text(0.02, 0.02, det_name.upper(), rotation=90, ha='left', va='bottom', size='x-large')\n", " \n", - " imp = fig.add_axes([0.1 + 0.25/2, 0.56, 0.6, 0.4])\n", - " txp = fig.add_axes([0.1, 0.28, 0.85, 0.22])\n", - " typ = fig.add_axes([0.1, 0.04, 0.85, 0.22])\n", + " imp = fig.add_axes([0.1 + 0.25/2, 0.56, 0.5, 0.45])\n", + " txp = fig.add_axes([0.1, 0.27, 0.85, 0.23])\n", + " typ = fig.add_axes([0.1, 0.02, 0.85, 0.23])\n", " \n", " if flat_hits.size == 0:\n", " warning(f'No hits found for {det_name}')\n", " continue\n", " \n", - " im_radius = remi['detector'][det_name]['mcp_radius']*1.1\n", + " mcp_radius = remi['detector'][det_name]['mcp_radius']\n", + " im_radius = mcp_radius * 1.1\n", " \n", " imp.hist2d(flat_hits['x'], flat_hits['y'], bins=(256, 256),\n", " range=[[-im_radius, im_radius], [-im_radius, im_radius]], norm=LogNorm())\n", + " imp.add_patch(Circle(\n", + " (0, 0), mcp_radius,\n", + " linestyle='dashed', edgecolor='red', facecolor='none', linewidth=1))\n", " imp.xaxis.set_label_position('top')\n", " imp.set_xlabel('X / mm')\n", " imp.set_ylabel('Y / mm')\n", " imp.tick_params(right=True, labelright=True, top=True, labeltop=True)\n", " imp.grid()\n", " \n", + " text_pos = 1.05*mcp_radius*np.sin(np.pi/4)\n", + " imp.text(text_pos, text_pos, 'MCP', c='red', ha='left', va='bottom')\n", + " \n", " min_tof = flat_hits['t'].min()\n", " max_tof = flat_hits['t'].max()\n", " \n", @@ -1089,7 +1299,7 @@ "\n", " for ax, dim_label in zip([txp, typ], ['x', 'y']):\n", " ax.hist2d(flat_hits['t'], flat_hits[dim_label], bins=(num_tof_bins, 256),\n", - " range=[[min_tof, max_tof], [-im_radius, im_radius]], norm=LogNorm())\n", + " range=[[min_tof, max_tof], [-im_radius, im_radius]], norm=LogNorm())\n", " ax.set_ylabel(f'{dim_label.upper()} / mm')\n", " \n", " typ.set_xlabel('Time-of-flight / ns')\n", @@ -1121,17 +1331,10 @@ "\n", "control_sources = [det_device_id.format(karabo_id=karabo_id, det_name=det_name.upper())\n", " for det_name in remi['detector']]\n", - "\n", - "channels = []\n", - "if save_raw_triggers or save_raw_edges:\n", - " channels.append('raw')\n", - "if save_rec_signals or save_rec_hits:\n", - " channels.append('rec')\n", - " \n", "instrument_channels = [\n", " f'{device_id}:{det_output_key}/{channel}'\n", " for device_id in control_sources\n", - " for channel in channels\n", + " for channel in ['raw', 'rec']\n", "]" ] }, @@ -1171,38 +1374,33 @@ " \n", " cur_data = det_data[det_name]\n", " \n", - " if save_raw_triggers:\n", - " cur_fast_data.create_key('raw.triggers', triggers[pulse_mask],\n", - " maxshape=(None,) + triggers.shape[1:],\n", - " chunks=tuple(chunks_triggers), **dataset_kwargs)\n", + " cur_fast_data.create_key('raw.triggers', triggers[pulse_mask],\n", + " maxshape=(None,) + triggers.shape[1:],\n", + " chunks=tuple(chunks_triggers), **dataset_kwargs)\n", " \n", - " if save_raw_edges:\n", - " cur_fast_data.create_key('raw.edges', cur_data['edges'][pulse_mask],\n", - " maxshape=(None,) + cur_data['edges'].shape[1:],\n", - " chunks=tuple(chunks_edges if chunks_edges[-1] <= cur_max_hits\n", - " else chunks_edges[:-1] + [cur_max_hits]),\n", - " **dataset_kwargs)\n", + " cur_fast_data.create_key('raw.edges', cur_data['edges'][pulse_mask],\n", + " maxshape=(None,) + cur_data['edges'].shape[1:],\n", + " chunks=tuple(chunks_edges if chunks_edges[-1] <= cur_max_hits\n", + " else chunks_edges[:-1] + [cur_max_hits]),\n", + " **dataset_kwargs)\n", " \n", - " if save_raw_amplitudes:\n", - " cur_fast_data.create_key('raw.amplitudes', cur_data['amplitudes'][pulse_mask],\n", - " maxshape=(None,) + cur_data['amplitudes'].shape[1:],\n", - " chunks=tuple(chunks_amplitudes if chunks_amplitudes[-1] <= cur_max_hits\n", - " else chunks_amplitudes[:-1] + [cur_max_hits]),\n", - " **dataset_kwargs)\n", + " cur_fast_data.create_key('raw.amplitudes', cur_data['amplitudes'][pulse_mask],\n", + " maxshape=(None,) + cur_data['amplitudes'].shape[1:],\n", + " chunks=tuple(chunks_amplitudes if chunks_amplitudes[-1] <= cur_max_hits\n", + " else chunks_amplitudes[:-1] + [cur_max_hits]),\n", + " **dataset_kwargs)\n", " \n", - " if save_rec_signals:\n", - " cur_fast_data.create_key('rec.signals', cur_data['signals'][pulse_mask],\n", - " maxshape=(None,) + cur_data['signals'].shape[1:],\n", - " chunks=tuple(chunks_signals if chunks_signals[-1] <= cur_max_hits\n", - " else chunks_signals[:-1] + [cur_max_hits]),\n", - " **dataset_kwargs)\n", + " cur_fast_data.create_key('rec.signals', cur_data['signals'][pulse_mask],\n", + " maxshape=(None,) + cur_data['signals'].shape[1:],\n", + " chunks=tuple(chunks_signals if chunks_signals[-1] <= cur_max_hits\n", + " else chunks_signals[:-1] + [cur_max_hits]),\n", + " **dataset_kwargs)\n", " \n", - " if save_rec_hits:\n", - " cur_fast_data.create_key('rec.hits', cur_data['hits'][pulse_mask],\n", - " maxshape=(None,) + hits.shape[1:],\n", - " chunks=tuple(chunks_hits if chunks_hits[-1] <= cur_max_hits\n", - " else chunks_hits[:-1] + [cur_max_hits]),\n", - " **dataset_kwargs)\n", + " cur_fast_data.create_key('rec.hits', cur_data['hits'][pulse_mask],\n", + " maxshape=(None,) + hits.shape[1:],\n", + " chunks=tuple(chunks_hits if chunks_hits[-1] <= cur_max_hits\n", + " else chunks_hits[:-1] + [cur_max_hits]),\n", + " **dataset_kwargs)\n", " \n", " cur_fast_data.create_index(raw=pulse_counts[train_mask], rec=pulse_counts[train_mask])\n", " \n",