Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • SCS/ToolBox
  • kluyvert/ToolBox
2 results
Show changes
Commits on Source (41)
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
import numpy as np import numpy as np
%matplotlib notebook %matplotlib notebook
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
plt.rcParams['figure.constrained_layout.use'] = True plt.rcParams['figure.constrained_layout.use'] = True
import dask import dask
print(f'dask: {dask.__version__}') print(f'dask: {dask.__version__}')
import dask.array as da
from dask.distributed import Client, progress
from dask_jobqueue import SLURMCluster
import netCDF4
import pickle
```
%% Cell type:code id: tags:
``` python
from psutil import virtual_memory from psutil import virtual_memory
import gc
# gc.collect() # run garbage collection to free possible memory
mem = virtual_memory() mem = virtual_memory()
print(f'Physical memory: {mem.total/1024/1024/1024:.0f} Gb') # total physical memory available print(f'Physical memory: {mem.total/1024/1024/1024:.0f} Gb') # total physical memory available
```
%% Cell type:code id: tags:
``` python
import logging import logging
logging.basicConfig(filename='example.log', level=logging.DEBUG) logging.basicConfig(filename='example.log', level=logging.DEBUG)
```
%% Cell type:code id: tags:
``` python
%load_ext autoreload
%autoreload 2
import toolbox_scs as tb import toolbox_scs as tb
print(tb.__file__) print(tb.__file__)
import toolbox_scs.routines.boz as boz import toolbox_scs.routines.boz as boz
``` ```
%% Cell type:code id: tags:
``` python
%load_ext line_profiler
%load_ext memory_profiler
```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
# Slum cluster setup # Create parameters object
%% Cell type:markdown id: tags:
Change to *True* to allocate a slurm cluster and run the calculation on it instead of the current machine.
%% Cell type:code id: tags: %% Cell type:code id: tags:parameters
``` python ``` python
if False: proposal = 2937
partition = 'exfel' # For users darkrun = 478
# partition = 'upex' # For users run = 477
module = 15
cluster = SLURMCluster( gain = 0.5
queue=partition, sat_level = 500
local_directory='/scratch', # Local disk space for workers to use rois_th = 1
# Resources per SLURM job (per node, the way SLURM is configured on Maxwell)
# processes=16 runs 16 Dask workers in a job, so each worker has 1 core & 32 GB RAM.
processes=8, cores=16, memory='512GB',
walltime='9:00:00',
#interface='ib0',
#job_extra=["--reservation=upex_002619"] # reserved partition
)
# dashboard link
# print('https://max-jhub.desy.de/user/lleguy/proxy/8787/graph')
# Submit 2 SLURM jobs, for 32 Dask workers
cluster.scale(32)
#cluster.adapt(minimum=0, maximum=32)
client = Client(cluster)
print("Created dask client:", client)
# Get a notbook widget showing the cluster state
cluster
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
try: params = boz.parameters(proposal=proposal, darkrun=darkrun, run=run, module=module, gain=gain)
client.close()
cluster.close()
except:
print('No client defined')
``` ```
%% Cell type:markdown id: tags:
# Create parameters object
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
params = boz.parameters(proposal=2712, darkrun=5, run=6, module=15) from extra_data.read_machinery import find_proposal
#params = boz.parameters(proposal=2619, darkrun=33, run=38, module=15)
root = find_proposal(f'p{proposal:06d}')
path = root + '/usr/processed_runs/' + f'r{params.run}/'
print(path)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
print(params) print(params)
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
### load data persistently ### load data persistently
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
params.dask_load() params.dask_load()
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
# Dark run inspection # Dark run inspection
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
The aim is to check dark level and extract bad pixel map. The aim is to check dark level and extract bad pixel map.
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
mean_th = (30, 56) dark = boz.average_module(params.arr_dark).compute()
```
%% Cell type:code id: tags:
``` python
pedestal = np.mean(dark)
pedestal
```
%% Cell type:code id: tags:
``` python
mean_th = (pedestal-25, pedestal+30)
f = boz.inspect_dark(params.arr_dark, mean_th=mean_th) f = boz.inspect_dark(params.arr_dark, mean_th=mean_th)
f.savefig(path+f'p{params.proposal}-r{params.run}-d{params.darkrun}-inspect-dark.png', dpi=300)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
params.mean_th = mean_th params.mean_th = mean_th
params.set_mask(boz.bad_pixel_map(params)) params.set_mask(boz.bad_pixel_map(params))
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
print(params) print(params)
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
# Histogram # Histogram
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
h, f = boz.inspect_histogram(params.proposal, params.run, params.module, h, f = boz.inspect_histogram(params.arr,
params.arr_dark,
mask=params.get_mask() #, extra_lines=True mask=params.get_mask() #, extra_lines=True
) )
f.suptitle(f'p:{params.proposal} r:{params.run} d:{params.darkrun}')
f.savefig(path+f'p{params.proposal}-r{params.run}-d{params.darkrun}-histogram.png', dpi=300)
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
adding guide to the eye adding guide to the eye
%% Cell type:code id: tags:
``` python
ax = f.gca()
pf = np.polyfit([60, 220], [7, 4], 1)
ax.plot([40, 400], 2*10**np.polyval(pf, [40, 400]))
ax.plot([40, 400], 0.25*2*10**np.polyval(pf, [40, 400]))
```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
# ROIs extraction # ROIs extraction
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
params.rois_th = 50 params.rois_th = rois_th
params.rois = boz.find_rois_from_params(params) params.rois = boz.find_rois_from_params(params)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
print(params) print(params)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
dark = boz.average_module(params.arr_dark).compute()
data = boz.average_module(params.arr, dark=dark).compute() data = boz.average_module(params.arr, dark=dark).compute()
dataM = data.mean(axis=0) # mean over pulseId dataM = data.mean(axis=0) # mean over pulseId
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
f = boz.inspect_rois(dataM, params.rois, params.rois_th) f = boz.inspect_rois(dataM, params.rois, params.rois_th)
f.savefig(path+f'p{params.proposal}-r{params.run}-d{params.darkrun}-rois.png', dpi=300)
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
# Flat field extraction # Flat field extraction
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
The first step is to compute a good average image, this mean remove saturated shots and ignoring bad pixels The first step is to compute a good average image, this mean remove saturated shots and ignoring bad pixels
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
params.sat_level = 511 params.sat_level = sat_level
res = boz.average_module(params.arr, dark=dark, res = boz.average_module(params.arr, dark=dark,
ret='mean', mask=params.get_mask(), sat_roi=params.rois['sat'], ret='mean', mask=params.get_mask(), sat_roi=params.rois['sat'],
sat_level=params.sat_level) sat_level=params.sat_level)
avg = res.compute().mean(axis=0) avg = res.compute().mean(axis=0)
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
The second step is from that good average image to fit the plane field on n/0 and p/0 rois. We have to make sure that the rois have same width. The second step is from that good average image to fit the plane field on n/0 and p/0 rois. We have to make sure that the rois have same width.
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
f = boz.inspect_plane_fitting(avg, params.rois) f = boz.inspect_plane_fitting(avg, params.rois)
``` ```
%% Cell type:code id: tags:
``` python
f.savefig(path+f'p{params.proposal}-r{params.run}-d{params.darkrun}-inspect-noflatfield.png', dpi=300)
```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
fit the plane field correction fit the plane field correction
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
plane = boz.plane_fitting(params) plane = boz.plane_fitting(params)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
plane plane
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
compute the correction and inspect the result of its application compute the correction and inspect the result of its application
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
params.set_flat_field(plane.x) params.set_flat_field(plane.x)
ff = boz.compute_flat_field_correction(params.rois, params.get_flat_field()) ff = boz.compute_flat_field_correction(params.rois, params.get_flat_field())
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
f = boz.inspect_plane_fitting(avg/ff, params.rois) f = boz.inspect_plane_fitting(avg/ff, params.rois)
``` ```
%% Cell type:code id: tags:
``` python
f.savefig(path+f'p{params.proposal}-r{params.run}-d{params.darkrun}-inspect-withflatfield.png', dpi=300)
```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
# Non-linearities correction extraction # Non-linearities correction extraction
%% Cell type:markdown id: tags:
To speed up online analysis, we save the corrections with a dummy non-linearity correction. The saved file can be used for online analysis as soon as it appears.
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
N = 40 params.set_Fnl(np.arange(2**9))
domain = boz.nl_domain(N, 40, 280) params.save(path=path)
```
%% Cell type:code id: tags:
``` python
N = 80
domain = boz.nl_domain(N, 40, 511)
params.alpha = 0.5 params.alpha = 0.5
params.max_iter = 25 params.max_iter = 25
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## minimizing ## minimizing
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
res, fit_res = boz.nl_fit(params, domain) res, fit_res = boz.nl_fit(params, domain)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
params.set_Fnl(boz.nl_lut(domain, res.x)) params.set_Fnl(boz.nl_lut(domain, res.x))
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
f = boz.inspect_correction(params, gain=3) print(params)
```
%% Cell type:code id: tags:
``` python
f = boz.inspect_correction(params, gain=params.gain)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
f.savefig(f'p{params.proposal}-r{params.run}-d{params.darkrun}-inspect-correction.png', dpi=300) f.savefig(path+f'p{params.proposal}-r{params.run}-d{params.darkrun}-inspect-correction.png', dpi=300)
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
### plotting the fitted correction ### plotting the fitted correction
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
f = boz.inspect_Fnl(params.get_Fnl()) f = boz.inspect_Fnl(params.get_Fnl())
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
f.savefig(f'p{params.proposal}-r{params.run}-d{params.darkrun}-inspect-Fnl.png', dpi=300) f.savefig(path+f'p{params.proposal}-r{params.run}-d{params.darkrun}-inspect-Fnl.png', dpi=300)
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
### plotting the fit progresion ### plotting the fit progresion
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
f = boz.inspect_nl_fit(fit_res) f = boz.inspect_nl_fit(fit_res)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
f.savefig(f'p{params.proposal}-r{params.run}-d{params.darkrun}-inspect-nl-fit.png', dpi=300) f.savefig(path+f'p{params.proposal}-r{params.run}-d{params.darkrun}-inspect-nl-fit.png', dpi=300)
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
# Save the analysis parameters # Save the analysis parameters
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
params.save() params.save(path=path)
``` ```
......
This diff is collapsed.
``Getting started`` Getting started
~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~
Installation Toolbox Installation
------------ --------------------
The ToolBox may be installed in any environment. However, it depends on the extra_data and the euxfel_bunch_pattern package, which are no official third party python modules. Within environments where the latter are not present, they need to be installed by hand.
Furthermore, as long as the ToolBox is not yet added to one of our custom environments, it needs to be installed locally. Activate your preferred environment and check installation of scs_toolbox by typing: Recommended: Proposal Environment setup
+++++++++++++++++++++++++++++++++++++++
The proposal specific environment is installed by launching in a shell
terminal on Maxwell:
.. code:: bash
module load exfel exfel_anaconda3
scs-activate-toolbox-kernel --proposal 2780
where in this example 2780 is the proposal number. After this and refreshing the
browser, a new kernel named ``SCS Toolbox (p002780)`` is available and should
be used to run jupyter notebooks on the Maxwell Jupyter hub.
Alternative: Manual ToolBox Installation
++++++++++++++++++++++++++++++++++++++++
The ToolBox may be installed in any environment. However, it depends on the
extra_data and the euxfel_bunch_pattern package, which are no official third
party python modules. Within environments where the latter are not present, they
need to be installed by hand.
Furthermore, as long as the ToolBox is not yet added to one of our custom
environments, it needs to be installed locally. Activate your preferred
environment and check installation of scs_toolbox by typing:
.. code:: bash .. code:: bash
pip show toolbox_scs pip show toolbox_scs
If the toolbox has been installed in your home directory previously, everything is set up. Otherwise it needs to be installed (only once). In that case enter following command from the the ToolBox top-level directory: If the toolbox has been installed in your home directory previously, everything
is set up. Otherwise it needs to be installed (only once). In that case enter
following command from the the ToolBox top-level directory:
.. code:: bash .. code:: bash
pip install --user ".[maxwell]" pip install --user ".[maxwell]"
Alternatively, use the -e flag for installation to install the package in development mode. Alternatively, use the -e flag for installation to install the package in
development mode.
.. code:: bash .. code:: bash
pip install --user -e ".[maxwell]" pip install --user -e ".[maxwell]"
Transferring Data
-----------------
The DAQ system save data on the online cluster. To analyze data on the
Maxwell offline cluster, they need to be transferred there. This is achieved by
login at:
https://in.xfel.eu/metadata
and then navigating to the proposal, then to the ``Runs`` tab from where runs can
be transferred to the offline cluster by marking them as ``Good`` in the
``Data Assessment``. Depending on the amount of data in the run, this can take a
while.
.. image:: metadata.png
Processing Data
---------------
On the Maxwell Jupyter hub:
https://max-jhub.desy.de
notebooks can be executed using the corresponding Proposal Environment
kernel or the ``xfel`` kernel if the toolbox was manually installed. For quick
startup, example notebooks (.ipynb files) can be directly downloaded from the
:ref:`How to's` section by clicking on the ``View page source``.
``How to's`` .. _how to's:
~~~~~~~~~~~~
How to's
~~~~~~~~
top top
--- ---
...@@ -38,6 +40,36 @@ Photo-Electron Spectrometer (PES) ...@@ -38,6 +40,36 @@ Photo-Electron Spectrometer (PES)
routines routines
-------- --------
* :doc:`BOZ analysis part I parameters determination <BOZ analysis part I parameters determination>`. BOZ: Beam-Splitting Off-axis Zone plate analysis
* :doc:`BOZ analysis part II run processing <BOZ analysis part II run processing>`. ++++++++++++++++++++++++++++++++++++++++++++++++
* *to do*
The BOZ analysis consists of 2 notebooks and a script. The first notebook
:doc:`BOZ analysis part I parameters determination <BOZ analysis part I parameters determination>`
is used to determine all the necessary correction, that is the flat field
correction from the zone plate optics and the non-linearity correction from the
DSSC gain. The inputs are a dark run and a run with X-rays on three broken or
empty membranes. For the latter, an alternative is to use pre-edge data on an
actual sample. The result is a JSON file that contains the flat field and
non-linearity correction as well as the parameters used for their determination
such that this can be reproduced and investigated in case of issues. The
determination of the flat field correction is rather quick, few minutes and is
the most important correction for the change in XAS computed from the 1st and
1st order. For quick correction of the online preview one can bypass the
non-linearity calculation by taking the JSON file as soon as it appears.
The determination of the non-linearity correction is a lot longer and can take
some 2 to 8 hours depending on the number of pulses in the
train. For this reason it is possible to use a script
``scripts/boz_parameters_job.sh`` in the toolbox to launch the first notebook
via slurm:
``sbatch ./boz_parameters_job.sh 615 614 3``
where 615 is the dark run number, 614 is the run on 3 broken membranes and 3 is
the DSSC gain in photon per bin. The proposal run number is defined inside the
script file.
The second notebook
:doc:`BOZ analysis part II run processing <BOZ analysis part II run processing>`
then use the JSON file to load all needed corrections and
process an run with a corresponding dark run to bin data and compute a
spectrum or a time resolved XAS scan
doc/metadata.png

136 KiB

#!/bin/bash
#SBATCH -N 1
#SBATCH --partition=exfel
#SBATCH --time=12:00:00
#SBATCH --mail-type=END,FAIL
#SBATCH --output=logs/%j-%x.out
PROPOSAL=2619
DARK=$1
RUN=$2
GAIN=$3
ROISTH=${4:-1}
SATLEVEL=${5:-500}
MODULE=15
source /etc/profile.d/modules.sh
module load exfel
module load exfel_anaconda3/1.1
echo processing run $RUN
mkdir ../../processed_runs/r$RUN
papermill 'BOZ analysis part I parameters determination.ipynb' ../../processed_runs/r$RUN/output.ipynb \
-p proposal $PROPOSAL -p darkrun $DARK -p run $RUN -p module $MODULE \
-p gain $GAIN -p rois_th $ROISTH -p sat_level $SATLEVEL -k xfel
...@@ -2,7 +2,7 @@ from setuptools import setup, find_packages ...@@ -2,7 +2,7 @@ from setuptools import setup, find_packages
with open('README.rst') as f: with open('README.rst') as f:
readme = f.read() readme = f.read()
with open('VERSION') as f: with open('VERSION') as f:
_version = f.read() _version = f.read()
_version = _version.strip("\n") _version = _version.strip("\n")
...@@ -12,7 +12,7 @@ basic_analysis_reqs = ['numpy', 'scipy',] # and is readily available in Karabo ...@@ -12,7 +12,7 @@ basic_analysis_reqs = ['numpy', 'scipy',] # and is readily available in Karabo
advanced_analysis_reqs = [ advanced_analysis_reqs = [
'pandas', 'imageio', 'xarray>=0.13.0', 'h5py', 'h5netcdf',] 'pandas', 'imageio', 'xarray>=0.13.0', 'h5py', 'h5netcdf',]
interactive_reqs = ['ipykernel', 'matplotlib', 'tqdm',] interactive_reqs = ['ipykernel', 'matplotlib', 'tqdm',]
maxwell_reqs = ['joblib', 'extra_data', 'euxfel_bunch_pattern>=0.6'] maxwell_reqs = ['joblib', 'papermill', 'extra_data', 'euxfel_bunch_pattern>=0.6']
setup(name='toolbox_scs', setup(name='toolbox_scs',
......
...@@ -132,19 +132,19 @@ mnemonics = { ...@@ -132,19 +132,19 @@ mnemonics = {
'key': 'value.value', 'key': 'value.value',
'dim': None},), 'dim': None},),
"PES_RV": ({'source': 'SA3_XTD10_PES/MDL/DAQ_MPOD', "PES_RV": ({'source': 'SA3_XTD10_PES/MDL/DAQ_MPOD',
'key': 'u215.value', 'key': 'u213.value',
'dim': None},), 'dim': None},),
"PES_N2": ({'source': 'SA3_XTD10_PES/DCTRL/V30300S_NITROGEN', "PES_N2": ({'source': 'SA3_XTD10_PES/DCTRL/V30300S_NITROGEN',
'key': 'interlock.AActionState.value', 'key': 'hardwareState.value',
'dim': None},), 'dim': None},),
"PES_Ne": ({'source': 'SA3_XTD10_PES/DCTRL/V30310S_NEON', "PES_Ne": ({'source': 'SA3_XTD10_PES/DCTRL/V30310S_NEON',
'key': 'interlock.AActionState.value', 'key': 'hardwareState.value',
'dim': None},), 'dim': None},),
"PES_Kr": ({'source': 'SA3_XTD10_PES/DCTRL/V30320S_KRYPTON', "PES_Kr": ({'source': 'SA3_XTD10_PES/DCTRL/V30320S_KRYPTON',
'key': 'interlock.AActionState.value', 'key': 'hardwareState.value',
'dim': None},), 'dim': None},),
"PES_Xe": ({'source': 'SA3_XTD10_PES/DCTRL/V30330S_XENON', "PES_Xe": ({'source': 'SA3_XTD10_PES/DCTRL/V30330S_XENON',
'key': 'interlock.AActionState.value', 'key': 'hardwareState.value',
'dim': None},), 'dim': None},),
# XTD10 MCP (after GATT) # XTD10 MCP (after GATT)
...@@ -273,6 +273,14 @@ mnemonics = { ...@@ -273,6 +273,14 @@ mnemonics = {
"VFM_BENDERF": ({'source': 'SCS_KBS_VFM/MOTOR/BENDERF', "VFM_BENDERF": ({'source': 'SCS_KBS_VFM/MOTOR/BENDERF',
'key': 'encoderPosition.value', 'key': 'encoderPosition.value',
'dim': None},), 'dim': None},),
"HFM_BENDING": ({'source': 'SCS_KBS_HFM/MDL/AVERAGER',
'key': 'result.value',
'dim': None},),
"VFM_BENDING": ({'source': 'SCS_KBS_VFM/MDL/AVERAGER',
'key': 'result.value',
'dim': None},),
# AFS LASER # AFS LASER
"AFS_PhaseShifter": ({'source': 'SCS_ILH_LAS/PHASESHIFTER/DOOCS', "AFS_PhaseShifter": ({'source': 'SCS_ILH_LAS/PHASESHIFTER/DOOCS',
...@@ -352,9 +360,12 @@ mnemonics = { ...@@ -352,9 +360,12 @@ mnemonics = {
"scannerY_enc": ({'source': 'SCS_CDIFFT_SAM/ENC/SCANNERY', "scannerY_enc": ({'source': 'SCS_CDIFFT_SAM/ENC/SCANNERY',
'key': 'value.value', 'key': 'value.value',
'dim': None},), 'dim': None},),
"SAM-Z-MOT": ({'source': 'SCS_CDIFFT_MOV/MOTOR/SAM_Z',
'key': 'actualPosition.value',
'dim': None},),
"SAM-Z": ({'source': 'SCS_CDIFFT_MOV/ENC/SAM_Z', "SAM-Z": ({'source': 'SCS_CDIFFT_MOV/ENC/SAM_Z',
'key': 'value.value', 'key': 'value.value',
'dim': None},), 'dim': None},),
"magnet": ({'source': 'SCS_CDIFFT_MAG/ASENS/PSU_CMON', "magnet": ({'source': 'SCS_CDIFFT_MAG/ASENS/PSU_CMON',
'key': 'value.value', 'key': 'value.value',
'dim': None}, 'dim': None},
...@@ -371,6 +382,11 @@ mnemonics = { ...@@ -371,6 +382,11 @@ mnemonics = {
'key': 'data.image.pixels', 'key': 'data.image.pixels',
'dim': ['llc2_y', 'llc2_x']},), 'dim': ['llc2_y', 'llc2_x']},),
# PI-MTE3 CCD camera
"MTE3": ({'source': 'SCS_CDIDET_MTE3/CAM/CAMERA:daqOutput',
'key': 'data.image.pixels',
'dim': ['x', 'y']},),
# FastCCD, if in raw folder, raw images # FastCCD, if in raw folder, raw images
# if in proc folder, dark substracted and relative gain corrected # if in proc folder, dark substracted and relative gain corrected
"fastccd": ({'source': 'SCS_CDIDET_FCCD2M/DAQ/FCCD:daqOutput', "fastccd": ({'source': 'SCS_CDIDET_FCCD2M/DAQ/FCCD:daqOutput',
......
...@@ -14,7 +14,7 @@ import extra_data as ed ...@@ -14,7 +14,7 @@ import extra_data as ed
from ..misc.bunch_pattern_external import is_sase_3 from ..misc.bunch_pattern_external import is_sase_3
from ..mnemonics_machinery import (mnemonics_to_process, from ..mnemonics_machinery import (mnemonics_to_process,
mnemonics_for_run) mnemonics_for_run)
from ..constants import mnemonics as _mnemonics
__all__ = [ __all__ = [
'get_pes_params', 'get_pes_params',
...@@ -27,6 +27,7 @@ log = logging.getLogger(__name__) ...@@ -27,6 +27,7 @@ log = logging.getLogger(__name__)
def get_pes_tof(run, mnemonics=None, merge_with=None, def get_pes_tof(run, mnemonics=None, merge_with=None,
start=31390, width=300, origin=None, width_ns=None, start=31390, width=300, origin=None, width_ns=None,
subtract_baseline=True,
baseStart=None, baseWidth=80, baseStart=None, baseWidth=80,
sample_rate=2e9): sample_rate=2e9):
""" """
...@@ -57,6 +58,9 @@ def get_pes_tof(run, mnemonics=None, merge_with=None, ...@@ -57,6 +58,9 @@ def get_pes_tof(run, mnemonics=None, merge_with=None,
width_ns: float width_ns: float
time window for one spectrum. If None, the time window is defined by time window for one spectrum. If None, the time window is defined by
width / sample rate. width / sample rate.
subtract_baseline: bool
If True, subtract baseline defined by baseStart and baseWidth to each
spectrum.
baseStart: int baseStart: int
starting sample of the baseline. starting sample of the baseline.
baseWidth: int baseWidth: int
...@@ -89,7 +93,7 @@ def get_pes_tof(run, mnemonics=None, merge_with=None, ...@@ -89,7 +93,7 @@ def get_pes_tof(run, mnemonics=None, merge_with=None,
elif 'bunchPatternTable' in run_mnemonics: elif 'bunchPatternTable' in run_mnemonics:
bpt = run.get_array(*run_mnemonics['bunchPatternTable'].values()) bpt = run.get_array(*run_mnemonics['bunchPatternTable'].values())
elif 'bunchPatternTable_SA3' in run_mnemonics: elif 'bunchPatternTable_SA3' in run_mnemonics:
bpt = run.get_array(*mnemonics['bunchPatternTable_SA3'].values()) bpt = run.get_array(*run_mnemonics['bunchPatternTable_SA3'].values())
else: else:
bpt = None bpt = None
...@@ -114,26 +118,35 @@ def get_pes_tof(run, mnemonics=None, merge_with=None, ...@@ -114,26 +118,35 @@ def get_pes_tof(run, mnemonics=None, merge_with=None,
arr = merge_with[m] arr = merge_with[m]
else: else:
arr = run.get_array(*run_mnemonics[m].values(), name=m) arr = run.get_array(*run_mnemonics[m].values(), name=m)
if arr.sizes['PESsampleId'] < npulses*period*440 + start + width:
log.warning('Not all pulses were recorded. The number of samples on '
f'the digitizer {arr.sizes["PESsampleId"]} is not enough '
f'to cover the {npulses} spectra. Missing spectra will be '
'filled with NaNs.')
spectra = [] spectra = []
for p in range(npulses): for p in range(npulses):
begin = p*period*440 + start begin = p*period*440 + start
end = begin + width end = begin + width
baseBegin = p*period*440 + baseStart if end > arr.sizes['PESsampleId']:
baseEnd = baseBegin + baseWidth break
pes = arr.isel(PESsampleId=slice(begin, end)) pes = arr.isel(PESsampleId=slice(begin, end))
bl = arr.isel( if subtract_baseline:
PESsampleId=slice(baseBegin, baseEnd)).mean(dim='PESsampleId') baseBegin = p*period*440 + baseStart
spectra.append(pes - bl) baseEnd = baseBegin + baseWidth
bl = arr.isel(
PESsampleId=slice(baseBegin, baseEnd)).mean(dim='PESsampleId')
pes = pes - bl
spectra.append(pes)
spectra = xr.concat(spectra, spectra = xr.concat(spectra,
dim='sa3_pId').rename(m.replace('raw', 'tof')) dim='sa3_pId').rename(m.replace('raw', 'tof'))
ds = ds.merge(spectra) ds = ds.merge(spectra)
if len(ds.variables) > 0: if len(ds.variables) > 0:
ds = ds.assign_coords({'sa3_pId': mask_on['pulse_slot'].values}) ds = ds.assign_coords({'sa3_pId': mask_on['pulse_slot'][:ds.sizes['sa3_pId']].values})
ds = ds.rename({'PESsampleId': 'time_ns'}) ds = ds.rename({'PESsampleId': 'time_ns'})
ds = ds.assign_coords({'time_ns': time_ns}) ds = ds.assign_coords({'time_ns': time_ns})
if bool(merge_with): if bool(merge_with):
ds = merge_with.drop(to_process, ds = merge_with.drop(to_process,
errors='ignore').merge(ds, join='inner') errors='ignore').merge(ds, join='left')
return ds return ds
...@@ -156,10 +169,13 @@ def get_pes_params(run): ...@@ -156,10 +169,13 @@ def get_pes_params(run):
params = {} params = {}
sel = run.select_trains(ed.by_index[:20]) sel = run.select_trains(ed.by_index[:20])
mnemonics = mnemonics_for_run(run) mnemonics = mnemonics_for_run(run)
for gas in ['N2', 'Ne', 'Kr', 'Xe']: gas_dict = {'N2': 409.9, 'Ne': 870.2, 'Kr': 1921, 'Xe': 1148.7}
arr = sel.get_array(*mnemonics[f'PES_{gas}'].values()) for gas in gas_dict.keys():
if arr[0] == 0: mnemo = _mnemonics[f'PES_{gas}'][0]
arr = sel.get_run_value(mnemo['source'], mnemo['key'])
if arr == 'ON':
params['gas'] = gas params['gas'] = gas
params['binding_energy'] = gas_dict[gas]
break break
if 'gas' not in params: if 'gas' not in params:
params['gas'] = 'unknown' params['gas'] = 'unknown'
......
...@@ -42,13 +42,15 @@ class parameters(): ...@@ -42,13 +42,15 @@ class parameters():
proposal: int, proposal number proposal: int, proposal number
darkrun: int, run number for the dark run darkrun: int, run number for the dark run
run: int, run number for the data run run: int, run number for the data run
module: int: DSSC module number module: int, DSSC module number
gain: float, number of ph per bin
""" """
def __init__(self, proposal, darkrun, run, module): def __init__(self, proposal, darkrun, run, module, gain):
self.proposal = proposal self.proposal = proposal
self.darkrun = darkrun self.darkrun = darkrun
self.run = run self.run = run
self.module = module self.module = module
self.gain = gain
self.mask_idx = None self.mask_idx = None
self.mean_th = (None, None) self.mean_th = (None, None)
self.std_th = (None, None) self.std_th = (None, None)
...@@ -112,7 +114,10 @@ class parameters(): ...@@ -112,7 +114,10 @@ class parameters():
def get_flat_field(self): def get_flat_field(self):
"""Get the flat field plane definition.""" """Get the flat field plane definition."""
return np.array(self.flat_field) if self.flat_field is None:
return None
else:
return np.array(self.flat_field)
def set_Fnl(self, Fnl): def set_Fnl(self, Fnl):
"""Set the non-linear correction function.""" """Set the non-linear correction function."""
...@@ -123,7 +128,10 @@ class parameters(): ...@@ -123,7 +128,10 @@ class parameters():
def get_Fnl(self): def get_Fnl(self):
"""Get the non-linear correction function.""" """Get the non-linear correction function."""
return np.array(self.Fnl) if self.Fnl is None:
return None
else:
return np.array(self.Fnl)
def save(self, path='./'): def save(self, path='./'):
"""Save the parameters as a JSON file. """Save the parameters as a JSON file.
...@@ -137,6 +145,7 @@ class parameters(): ...@@ -137,6 +145,7 @@ class parameters():
v['darkrun'] = self.darkrun v['darkrun'] = self.darkrun
v['run'] = self.run v['run'] = self.run
v['module'] = self.module v['module'] = self.module
v['gain'] = self.gain
v['mask'] = self.mask_idx v['mask'] = self.mask_idx
v['mean_th'] = self.mean_th v['mean_th'] = self.mean_th
...@@ -168,7 +177,7 @@ class parameters(): ...@@ -168,7 +177,7 @@ class parameters():
""" """
with open(fname, 'r') as f: with open(fname, 'r') as f:
v = json.load(f) v = json.load(f)
c = cls(v['proposal'], v['darkrun'], v['run'], v['module']) c = cls(v['proposal'], v['darkrun'], v['run'], v['module'], v['gain'])
c.mean_th = v['mean_th'] c.mean_th = v['mean_th']
c.std_th = v['std_th'] c.std_th = v['std_th']
...@@ -188,7 +197,7 @@ class parameters(): ...@@ -188,7 +197,7 @@ class parameters():
def __str__(self): def __str__(self):
f = f'proposal:{self.proposal} darkrun:{self.darkrun} run:{self.run}' f = f'proposal:{self.proposal} darkrun:{self.darkrun} run:{self.run}'
f += f' module:{self.module}\n' f += f' module:{self.module} gain:{self.gain} ph/bin\n'
if self.mask_idx is not None: if self.mask_idx is not None:
f += f'mean threshold:{self.mean_th} std threshold:{self.std_th}\n' f += f'mean threshold:{self.mean_th} std threshold:{self.std_th}\n'
...@@ -337,16 +346,16 @@ def find_rois(data_mean, threshold): ...@@ -337,16 +346,16 @@ def find_rois(data_mean, threshold):
rois: dictionnary of rois rois: dictionnary of rois
""" """
# compute vertical and horizontal projection # compute vertical and horizontal projection
pX = data_mean.sum(axis=0) pX = data_mean.mean(axis=0)
pX = pX[:256] # half the ladder since there is a gap in the middle pX = pX[:256] # half the ladder since there is a gap in the middle
pY = data_mean.sum(axis=1) pY = data_mean.mean(axis=1)
# along X # along X
lowX = int(np.argmax(pX[:128] > threshold)) # 1st occurrence returned lowX = int(np.argmax(pX[:64] > threshold)) # 1st occurrence returned
highX = int(np.argmax(pX[128:] < threshold) + 128) # 1st occ. returned highX = int(np.argmax(pX[192:] <= threshold) + 192) # 1st occ. returned
midX = (lowX + highX)//2
leftX = int(np.argmin(pX[(lowX+20):midX]) + lowX + 20) leftX = int(np.argmin(pX[64:128]) + 64)
rightX = int(np.argmin(pX[midX:highX-20]) + midX) rightX = int(np.argmin(pX[128:192]) + 128)
# along Y # along Y
lowY = int(np.argmax(pY[:64] > threshold)) # 1st occurrence returned lowY = int(np.argmax(pY[:64] > threshold)) # 1st occurrence returned
...@@ -429,9 +438,9 @@ def inspect_rois(data_mean, rois, threshold=None, allrois=False): ...@@ -429,9 +438,9 @@ def inspect_rois(data_mean, rois, threshold=None, allrois=False):
matplotlib figure matplotlib figure
""" """
# compute vertical and horizontal projection # compute vertical and horizontal projection
pX = data_mean.sum(axis=0) pX = data_mean.mean(axis=0)
pX = pX[:256] # half the ladder since there is a gap in the middle pX = pX[:256] # half the ladder since there is a gap in the middle
pY = data_mean.sum(axis=1) pY = data_mean.mean(axis=1)
# Set up the axes with gridspec # Set up the axes with gridspec
fig = plt.figure(figsize=(5, 3)) fig = plt.figure(figsize=(5, 3))
...@@ -451,6 +460,23 @@ def inspect_rois(data_mean, rois, threshold=None, allrois=False): ...@@ -451,6 +460,23 @@ def inspect_rois(data_mean, rois, threshold=None, allrois=False):
vmax=np.percentile(data_mean[:, :256], 99)) vmax=np.percentile(data_mean[:, :256], 99))
main_ax.set_aspect('equal') main_ax.set_aspect('equal')
from matplotlib.patches import Rectangle
roi = rois['n']
main_ax.add_patch(Rectangle((roi['xl'], 128-roi['yh']),
roi['xh'] - roi['xl'],
roi['yh'] - roi['yl'],
alpha=0.3, color='b'))
roi = rois['0']
main_ax.add_patch(Rectangle((roi['xl'], 128-roi['yh']),
roi['xh'] - roi['xl'],
roi['yh'] - roi['yl'],
alpha=0.3, color='g'))
roi = rois['p']
main_ax.add_patch(Rectangle((roi['xl'], 128-roi['yh']),
roi['xh'] - roi['xl'],
roi['yh'] - roi['yl'],
alpha=0.3, color='r'))
x.plot(Xs, pX) x.plot(Xs, pX)
x.invert_yaxis() x.invert_yaxis()
if threshold is not None: if threshold is not None:
...@@ -471,62 +497,58 @@ def inspect_rois(data_mean, rois, threshold=None, allrois=False): ...@@ -471,62 +497,58 @@ def inspect_rois(data_mean, rois, threshold=None, allrois=False):
return fig return fig
def histogram_module(proposalNB, runNB, moduleNB, mask=None): def histogram_module(arr, mask=None):
"""Compute a histogram of the 9 bits raw pixel values over a module. """Compute a histogram of the 9 bits raw pixel values over a module.
Inputs Inputs
------ ------
proposalNB: proposal number arr: dask array of reshaped dssc data (trainId, pulseId, x, y)
runNB: run number
moduleNB: module number
mask: optional bad pixel mask mask: optional bad pixel mask
Returns Returns
------- -------
histogram histogram
""" """
run = open_run(proposal=proposalNB, run=runNB)
# DSSC
source = f'SCS_DET_DSSC1M-1/DET/{moduleNB}CH0:xtdf'
key = 'image.data'
arr = run[source, key].dask_array()
arr = arr.rechunk((100, -1, -1, -1))
if mask is not None: if mask is not None:
w = da.repeat(da.repeat(da.array(mask[None, None, :, :]), w = da.repeat(da.repeat(da.array(mask[None, None, :, :]),
arr.shape[1], axis=1), arr.shape[0], axis=0) arr.shape[1], axis=1), arr.shape[0], axis=0)
w = w.rechunk((100, -1, -1, -1)) w = w.rechunk((100, -1, -1, -1))
return da.bincount(arr.ravel(), w.ravel()).compute() return da.bincount(arr.ravel(), w.ravel(), minlength=512).compute()
else: else:
return da.bincount(arr.ravel()).compute() return da.bincount(arr.ravel(), minlength=512).compute()
def inspect_histogram(proposalNB, runNB, moduleNB, def inspect_histogram(arr, arr_dark=None, mask=None, extra_lines=False):
mask=None, extra_lines=False):
"""Compute and plot a histogram of the 9 bits raw pixel values. """Compute and plot a histogram of the 9 bits raw pixel values.
Inputs Inputs
------ ------
proposalNB: proposal number arr: dask array of reshaped dssc data (trainId, pulseId, x, y)
runNB: run number arr: dask array of reshaped dssc dark data (trainId, pulseId, x, y)
moduleNB: module number
mask: optional bad pixel mask mask: optional bad pixel mask
extra_lines: boolean, default False, plot extra lines at period values extra_lines: boolean, default False, plot extra lines at period values
Returns Returns
------- -------
histogram (h, hd): histogram of arr, arr_dark
figure figure
""" """
h = histogram_module(proposalNB, runNB, moduleNB, mask=mask)
from matplotlib.ticker import MultipleLocator from matplotlib.ticker import MultipleLocator
f = plt.figure(figsize=(6, 3)) f = plt.figure(figsize=(6, 3))
plt.plot(np.arange(2**9), h, marker='o', h = histogram_module(arr, mask=mask)
Sum_h = np.sum(h)
plt.plot(np.arange(2**9), h/Sum_h, marker='o',
ms=3, markerfacecolor='none', lw=1) ms=3, markerfacecolor='none', lw=1)
if arr_dark is not None:
hd = histogram_module(arr_dark, mask=mask)
Sum_hd = np.sum(hd)
plt.plot(np.arange(2**9), hd/Sum_hd, marker='o',
ms=3, markerfacecolor='none', lw=1, c='k', alpha=.5)
else:
hd = None
if extra_lines: if extra_lines:
for k in range(50, 271): for k in range(50, 271):
if not (k - 2) % 8: if not (k - 2) % 8:
...@@ -537,21 +559,18 @@ def inspect_histogram(proposalNB, runNB, moduleNB, ...@@ -537,21 +559,18 @@ def inspect_histogram(proposalNB, runNB, moduleNB,
plt.axvline(k, c='r', alpha=0.3, ls='--') plt.axvline(k, c='r', alpha=0.3, ls='--')
plt.axvline(271, c='C1', alpha=0.5, ls='--') plt.axvline(271, c='C1', alpha=0.5, ls='--')
plt.fill_between(np.arange(2**9)[30:51], h[30:51], 1, alpha=0.5)
plt.ylim([1, None])
plt.xlim([0, 2**9-1]) plt.xlim([0, 2**9-1])
plt.yscale('log') plt.yscale('log')
plt.axes().xaxis.set_minor_locator(MultipleLocator(10)) plt.axes().xaxis.set_minor_locator(MultipleLocator(10))
plt.xlabel('DSSC pixel value') plt.xlabel('DSSC pixel value')
plt.ylabel('counts') plt.ylabel('count frequency')
plt.title(f'p{proposalNB} run {runNB}')
return h, f return (h, hd), f
def load_dssc_module(proposalNB, runNB, moduleNB=15, def load_dssc_module(proposalNB, runNB, moduleNB=15,
subset=slice(None), drop_intra_darks=True, persist=True): subset=slice(None), drop_intra_darks=True, persist=False):
"""Load single module dssc data as dask array. """Load single module dssc data as dask array.
Inputs Inputs
...@@ -561,7 +580,7 @@ def load_dssc_module(proposalNB, runNB, moduleNB=15, ...@@ -561,7 +580,7 @@ def load_dssc_module(proposalNB, runNB, moduleNB=15,
moduleNB: default 15, module number moduleNB: default 15, module number
subset: default slice(None), subset of trains to load subset: default slice(None), subset of trains to load
drop_intra_darks: boolean, default True, remove intra darks from the data drop_intra_darks: boolean, default True, remove intra darks from the data
persist: load all data in memory persist: default False, load all data persistently in memory
Returns Returns
------- -------
...@@ -579,6 +598,8 @@ def load_dssc_module(proposalNB, runNB, moduleNB=15, ...@@ -579,6 +598,8 @@ def load_dssc_module(proposalNB, runNB, moduleNB=15,
arr[arr == 0] = 256 arr[arr == 0] = 256
ppt = run[source, key][subset].data_counts() ppt = run[source, key][subset].data_counts()
# ignore train with no pulses, can happen in burst mode acquisition
ppt = ppt[ppt > 0]
tid = ppt.index.to_numpy() tid = ppt.index.to_numpy()
ppt = np.unique(ppt) ppt = np.unique(ppt)
...@@ -795,26 +816,24 @@ def inspect_plane_fitting(avg, rois, vmin=None, vmax=None): ...@@ -795,26 +816,24 @@ def inspect_plane_fitting(avg, rois, vmin=None, vmax=None):
fig, axs = plt.subplots(2, 3, sharex=True, figsize=(6, 6)) fig, axs = plt.subplots(2, 3, sharex=True, figsize=(6, 6))
img_rois = {} img_rois = {}
centers = {}
for k, r in enumerate(['n', '0', 'p']): for k, r in enumerate(['n', '0', 'p']):
img_rois[r] = avg[rois[r]['yl']:rois[r]['yh'], roi = rois[r]
rois[r]['xl']:rois[r]['xh']] centers[r] = np.array([(roi['yl'] + roi['yh'])//2,
(roi['xl'] + roi['xh'])//2])
d = '0'
roi = rois[d]
for k, r in enumerate(['n', '0', 'p']):
img_rois[r] = np.roll(avg, tuple(centers[d] - centers[r]))[
roi['yl']:roi['yh'], roi['xl']:roi['xh']]
im = axs[0, k].imshow(img_rois[r], im = axs[0, k].imshow(img_rois[r],
vmin=vmin, vmin=vmin,
vmax=vmax) vmax=vmax)
for k, r in enumerate(['n', '0', 'p']): for k, r in enumerate(['n', '0', 'p']):
if img_rois[r].shape[1] != img_rois['0'].shape[1]: v = img_rois[r]/img_rois['0']
if k == 0:
n1 = img_rois[r].shape[1]
n = img_rois['0'].shape[1]
v = img_rois[r][:, (n1-n):]/img_rois['0']
else:
n1 = img_rois[r].shape[1]
n = img_rois['0'].shape[1]
v = img_rois[r][:, :-(n1-n)]/img_rois['0']
else:
v = img_rois[r]/img_rois['0']
im2 = axs[1, k].imshow(v, vmin=0.2, vmax=1.1, cmap='RdBu_r') im2 = axs[1, k].imshow(v, vmin=0.2, vmax=1.1, cmap='RdBu_r')
cbar = fig.colorbar(im, ax=axs[0, :], orientation="horizontal") cbar = fig.colorbar(im, ax=axs[0, :], orientation="horizontal")
...@@ -1073,16 +1092,17 @@ def nl_crit(p, domain, alpha, arr_dark, arr, tid, rois, mask, flat_field, ...@@ -1073,16 +1092,17 @@ def nl_crit(p, domain, alpha, arr_dark, arr, tid, rois, mask, flat_field,
# drop saturated shots # drop saturated shots
d = data.where(data['sat_sat'] == False, drop=True) d = data.where(data['sat_sat'] == False, drop=True)
# calculated error from transmission of 1.0 v_1 = snr(d['n'].values.flatten(), d['0'].values.flatten(),
v = d['n'].values.flatten()/d['0'].values.flatten() methods=['weighted'])
err1 = 1e8*np.nanmean((v - 1.0)**2) err_1 = 1e8*v_1['weighted']['s']**2
err2 = np.sum((Fmodel-np.arange(2**9))**2) v_2 = snr(d['p'].values.flatten(), d['0'].values.flatten(),
methods=['weighted'])
err_2 = 1e8*v_2['weighted']['s']**2
# print(f'{err}: {p}') err_a = np.sum((Fmodel-np.arange(2**9))**2)
# logging.info(f'{err}: {p}')
return (1.0 - alpha)*err1 + alpha*err2 return (1.0 - alpha)*0.5*(err_1 + err_2) + alpha*err_a
def nl_fit(params, domain): def nl_fit(params, domain):
...@@ -1175,44 +1195,68 @@ def inspect_nl_fit(res_fit): ...@@ -1175,44 +1195,68 @@ def inspect_nl_fit(res_fit):
return f return f
def snr(sig, ref, verbose=False): def snr(sig, ref, methods=None, verbose=False):
""" Compute mean, std and SNR with and without weight from transmitted signal sig """ Compute mean, std and SNR from transmitted signal sig and I0 signal ref.
and I0 signal ref
Inputs
------
sig: 1D signal samples
ref: 1D reference samples
methods: None by default or list of strings to select which methods to use.
Possible values are 'direct', 'weighted', 'diff'. In case of None, all
methods will be calculated.
verbose: booleand, if True prints calculated values
Returns
-------
dictionnary of [methods][value] where value is 'mu' for mean and 's' for
standard deviation.
""" """
if methods is None:
methods = ['direct', 'weighted', 'diff']
w = ref w = ref
x = sig/ref x = sig/ref
mask = np.isfinite(x) & np.isfinite(sig) & np.isfinite(ref) mask = np.isfinite(x) & np.isfinite(sig) & np.isfinite(ref)
w = w[mask] w = w[mask]
sig = sig[mask] sig = sig[mask]
ref = ref[mask] ref = ref[mask]
x = x[mask] x = x[mask]
# direct mean and std
mu = np.mean(x)
s = np.std(x)
if verbose:
print(f'mu: {mu}, s: {s}, snr: {mu/s}')
res = {} res = {}
res['direct'] = {'mu': mu, 's':s}
# direct mean and std
if 'direct' in methods:
mu = np.mean(x)
s = np.std(x)
if verbose:
print(f'mu: {mu}, s: {s}, snr: {mu/s}')
res['direct'] = {'mu': mu, 's':s}
# weighted mean and std # weighted mean and std
wmu = np.sum(sig)/np.sum(ref) if 'weighted' in methods:
v1 = np.sum(w) wmu = np.sum(sig)/np.sum(ref)
v2 = np.sum(w**2) v1 = np.sum(w)
ws = np.sqrt(np.sum(w*(x - wmu)**2)/(v1 - v2/v1)) v2 = np.sum(w**2)
ws = np.sqrt(np.sum(w*(x - wmu)**2)/(v1 - v2/v1))
if verbose:
print(f'weighted mu: {wmu}, s: {ws}, snr: {wmu/ws}')
if verbose: res['weighted'] = {'mu': wmu, 's':ws}
print(f'weighted mu: {wmu}, s: {ws}, snr: {wmu/ws}')
res['weighted'] = {'mu': wmu, 's':ws}
# noise from diff # noise from diff
dmu = np.mean(x) if 'diff' in methods:
ds = np.std(np.diff(x))/np.sqrt(2) dmu = np.mean(x)
if verbose: ds = np.std(np.diff(x))/np.sqrt(2)
print(f'diff mu: {dmu}, s: {ds}, snr: {dmu/ds}') if verbose:
res['diff'] = {'mu': dmu, 's':ds} print(f'diff mu: {dmu}, s: {ds}, snr: {dmu/ds}')
res['diff'] = {'mu': dmu, 's':ds}
return res return res
...@@ -1239,13 +1283,22 @@ def inspect_correction(params, gain=None): ...@@ -1239,13 +1283,22 @@ def inspect_correction(params, gain=None):
fitrois[k] = params.rois[k] fitrois[k] = params.rois[k]
# flat flat_field # flat flat_field
ff = compute_flat_field_correction(params.rois, params.get_flat_field()) plane_ff = params.get_flat_field()
if plane_ff is None:
plane_ff = [0.0, 0.0, 1.0, -1.0]
ff = compute_flat_field_correction(params.rois, plane_ff)
# non linearities
Fnl = params.get_Fnl()
if Fnl is None:
Fnl = np.arange(2**9)
# compute all levels of correction
data = process(np.arange(2**9), params.arr_dark, params.arr, params.tid, data = process(np.arange(2**9), params.arr_dark, params.arr, params.tid,
fitrois, params.get_mask(), np.ones_like(ff), params.sat_level) fitrois, params.get_mask(), np.ones_like(ff), params.sat_level)
data_ff = process(np.arange(2**9), params.arr_dark, params.arr, params.tid, data_ff = process(np.arange(2**9), params.arr_dark, params.arr, params.tid,
fitrois, params.get_mask(), ff, params.sat_level) fitrois, params.get_mask(), ff, params.sat_level)
data_ff_nl = process(params.get_Fnl(), params.arr_dark, params.arr, data_ff_nl = process(Fnl, params.arr_dark, params.arr,
params.tid, fitrois, params.get_mask(), ff, params.sat_level) params.tid, fitrois, params.get_mask(), ff, params.sat_level)
# for conversion to nb of photons # for conversion to nb of photons
...@@ -1256,7 +1309,7 @@ def inspect_correction(params, gain=None): ...@@ -1256,7 +1309,7 @@ def inspect_correction(params, gain=None):
scale = 1e-6 scale = 1e-6
f, axs = plt.subplots(3, 3, figsize=(6, 6), sharex=True, sharey=True) f, axs = plt.subplots(3, 3, figsize=(8, 6), sharex=True)
# nbins = np.linspace(0.01, 1.0, 100) # nbins = np.linspace(0.01, 1.0, 100)
...@@ -1278,16 +1331,11 @@ def inspect_correction(params, gain=None): ...@@ -1278,16 +1331,11 @@ def inspect_correction(params, gain=None):
snr_v = snr(good_d[n].values.flatten(), snr_v = snr(good_d[n].values.flatten(),
good_d[r].values.flatten(), verbose=True) good_d[r].values.flatten(), verbose=True)
if k == 0: m = snr_v['direct']['mu']
m = np.nanmean(good_d[n].values.flatten()
/good_d[r].values.flatten())
else:
m = 1
h, xedges, yedges, img = axs[l, k].hist2d( h, xedges, yedges, img = axs[l, k].hist2d(
g*scale*good_d[r].values.flatten(), g*scale*good_d[r].values.flatten(),
good_d[n].values.flatten()/good_d[r].values.flatten()/m, good_d[n].values.flatten()/good_d[r].values.flatten(),
[photon_scale, np.linspace(0.95, 1.05, 150)], [photon_scale, np.linspace(0.95, 1.05, 150)*m],
cmap='Blues', cmap='Blues',
vmax=200, vmax=200,
norm=LogNorm(), norm=LogNorm(),
...@@ -1295,13 +1343,14 @@ def inspect_correction(params, gain=None): ...@@ -1295,13 +1343,14 @@ def inspect_correction(params, gain=None):
) )
h, xedges, yedges, img2 = axs[l, k].hist2d( h, xedges, yedges, img2 = axs[l, k].hist2d(
g*scale*sat_d[r].values.flatten(), g*scale*sat_d[r].values.flatten(),
sat_d[n].values.flatten()/sat_d[r].values.flatten()/m, sat_d[n].values.flatten()/sat_d[r].values.flatten(),
[photon_scale, np.linspace(0.95, 1.05, 150)], [photon_scale, np.linspace(0.95, 1.05, 150)*m],
cmap='Reds', cmap='Reds',
vmax=200, vmax=200,
norm=LogNorm(), norm=LogNorm(),
# alpha=0.5 # make the plot looks ugly with lots of white lines # alpha=0.5 # make the plot looks ugly with lots of white lines
) )
v = snr_v['direct']['mu']/snr_v['direct']['s'] v = snr_v['direct']['mu']/snr_v['direct']['s']
axs[l, k].text(0.4, 0.15, f'SNR: {v:.0f}', axs[l, k].text(0.4, 0.15, f'SNR: {v:.0f}',
transform = axs[l, k].transAxes) transform = axs[l, k].transAxes)
...@@ -1312,9 +1361,11 @@ def inspect_correction(params, gain=None): ...@@ -1312,9 +1361,11 @@ def inspect_correction(params, gain=None):
# axs[l, k].plot(3*nbins, 1+np.sqrt(2/(1e6*nbins)), c='C1', ls='--') # axs[l, k].plot(3*nbins, 1+np.sqrt(2/(1e6*nbins)), c='C1', ls='--')
# axs[l, k].plot(3*nbins, 1-np.sqrt(2/(1e6*nbins)), c='C1', ls='--') # axs[l, k].plot(3*nbins, 1-np.sqrt(2/(1e6*nbins)), c='C1', ls='--')
axs[l, k].set_ylim([0.95*m, 1.05*m])
for k in range(3): for k in range(3):
for l in range(3): #for l in range(3):
axs[l, k].set_ylim([0.95, 1.05]) # axs[l, k].set_ylim([0.95, 1.05])
if gain: if gain:
axs[2, k].set_xlabel('#ph (10$^6$)') axs[2, k].set_xlabel('#ph (10$^6$)')
else: else:
......