Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • SCS/ToolBox
  • kluyvert/ToolBox
2 results
Show changes
Commits on Source (41)
%% Cell type:code id: tags:
``` python
import numpy as np
%matplotlib notebook
import matplotlib.pyplot as plt
plt.rcParams['figure.constrained_layout.use'] = True
import dask
print(f'dask: {dask.__version__}')
import dask.array as da
from dask.distributed import Client, progress
from dask_jobqueue import SLURMCluster
import netCDF4
import pickle
```
%% Cell type:code id: tags:
``` python
from psutil import virtual_memory
import gc
# gc.collect() # run garbage collection to free possible memory
mem = virtual_memory()
print(f'Physical memory: {mem.total/1024/1024/1024:.0f} Gb') # total physical memory available
```
%% Cell type:code id: tags:
``` python
import logging
logging.basicConfig(filename='example.log', level=logging.DEBUG)
```
%% Cell type:code id: tags:
``` python
%load_ext autoreload
%autoreload 2
import toolbox_scs as tb
print(tb.__file__)
import toolbox_scs.routines.boz as boz
```
%% Cell type:code id: tags:
``` python
%load_ext line_profiler
%load_ext memory_profiler
```
%% Cell type:markdown id: tags:
# Slum cluster setup
%% Cell type:markdown id: tags:
Change to *True* to allocate a slurm cluster and run the calculation on it instead of the current machine.
# Create parameters object
%% Cell type:code id: tags:
%% Cell type:code id: tags:parameters
``` python
if False:
partition = 'exfel' # For users
# partition = 'upex' # For users
cluster = SLURMCluster(
queue=partition,
local_directory='/scratch', # Local disk space for workers to use
# Resources per SLURM job (per node, the way SLURM is configured on Maxwell)
# processes=16 runs 16 Dask workers in a job, so each worker has 1 core & 32 GB RAM.
processes=8, cores=16, memory='512GB',
walltime='9:00:00',
#interface='ib0',
#job_extra=["--reservation=upex_002619"] # reserved partition
)
# dashboard link
# print('https://max-jhub.desy.de/user/lleguy/proxy/8787/graph')
# Submit 2 SLURM jobs, for 32 Dask workers
cluster.scale(32)
#cluster.adapt(minimum=0, maximum=32)
client = Client(cluster)
print("Created dask client:", client)
# Get a notbook widget showing the cluster state
cluster
proposal = 2937
darkrun = 478
run = 477
module = 15
gain = 0.5
sat_level = 500
rois_th = 1
```
%% Cell type:code id: tags:
``` python
try:
client.close()
cluster.close()
except:
print('No client defined')
params = boz.parameters(proposal=proposal, darkrun=darkrun, run=run, module=module, gain=gain)
```
%% Cell type:markdown id: tags:
# Create parameters object
%% Cell type:code id: tags:
``` python
params = boz.parameters(proposal=2712, darkrun=5, run=6, module=15)
#params = boz.parameters(proposal=2619, darkrun=33, run=38, module=15)
from extra_data.read_machinery import find_proposal
root = find_proposal(f'p{proposal:06d}')
path = root + '/usr/processed_runs/' + f'r{params.run}/'
print(path)
```
%% Cell type:code id: tags:
``` python
print(params)
```
%% Cell type:markdown id: tags:
### load data persistently
%% Cell type:code id: tags:
``` python
params.dask_load()
```
%% Cell type:markdown id: tags:
# Dark run inspection
%% Cell type:markdown id: tags:
The aim is to check dark level and extract bad pixel map.
%% Cell type:code id: tags:
``` python
mean_th = (30, 56)
dark = boz.average_module(params.arr_dark).compute()
```
%% Cell type:code id: tags:
``` python
pedestal = np.mean(dark)
pedestal
```
%% Cell type:code id: tags:
``` python
mean_th = (pedestal-25, pedestal+30)
f = boz.inspect_dark(params.arr_dark, mean_th=mean_th)
f.savefig(path+f'p{params.proposal}-r{params.run}-d{params.darkrun}-inspect-dark.png', dpi=300)
```
%% Cell type:code id: tags:
``` python
params.mean_th = mean_th
params.set_mask(boz.bad_pixel_map(params))
```
%% Cell type:code id: tags:
``` python
print(params)
```
%% Cell type:markdown id: tags:
# Histogram
%% Cell type:code id: tags:
``` python
h, f = boz.inspect_histogram(params.proposal, params.run, params.module,
h, f = boz.inspect_histogram(params.arr,
params.arr_dark,
mask=params.get_mask() #, extra_lines=True
)
f.suptitle(f'p:{params.proposal} r:{params.run} d:{params.darkrun}')
f.savefig(path+f'p{params.proposal}-r{params.run}-d{params.darkrun}-histogram.png', dpi=300)
```
%% Cell type:markdown id: tags:
adding guide to the eye
%% Cell type:code id: tags:
``` python
ax = f.gca()
pf = np.polyfit([60, 220], [7, 4], 1)
ax.plot([40, 400], 2*10**np.polyval(pf, [40, 400]))
ax.plot([40, 400], 0.25*2*10**np.polyval(pf, [40, 400]))
```
%% Cell type:markdown id: tags:
# ROIs extraction
%% Cell type:code id: tags:
``` python
params.rois_th = 50
params.rois_th = rois_th
params.rois = boz.find_rois_from_params(params)
```
%% Cell type:code id: tags:
``` python
print(params)
```
%% Cell type:code id: tags:
``` python
dark = boz.average_module(params.arr_dark).compute()
data = boz.average_module(params.arr, dark=dark).compute()
dataM = data.mean(axis=0) # mean over pulseId
```
%% Cell type:code id: tags:
``` python
f = boz.inspect_rois(dataM, params.rois, params.rois_th)
f.savefig(path+f'p{params.proposal}-r{params.run}-d{params.darkrun}-rois.png', dpi=300)
```
%% Cell type:markdown id: tags:
# Flat field extraction
%% Cell type:markdown id: tags:
The first step is to compute a good average image, this mean remove saturated shots and ignoring bad pixels
%% Cell type:code id: tags:
``` python
params.sat_level = 511
params.sat_level = sat_level
res = boz.average_module(params.arr, dark=dark,
ret='mean', mask=params.get_mask(), sat_roi=params.rois['sat'],
sat_level=params.sat_level)
avg = res.compute().mean(axis=0)
```
%% Cell type:markdown id: tags:
The second step is from that good average image to fit the plane field on n/0 and p/0 rois. We have to make sure that the rois have same width.
%% Cell type:code id: tags:
``` python
f = boz.inspect_plane_fitting(avg, params.rois)
```
%% Cell type:code id: tags:
``` python
f.savefig(path+f'p{params.proposal}-r{params.run}-d{params.darkrun}-inspect-noflatfield.png', dpi=300)
```
%% Cell type:markdown id: tags:
fit the plane field correction
%% Cell type:code id: tags:
``` python
plane = boz.plane_fitting(params)
```
%% Cell type:code id: tags:
``` python
plane
```
%% Cell type:markdown id: tags:
compute the correction and inspect the result of its application
%% Cell type:code id: tags:
``` python
params.set_flat_field(plane.x)
ff = boz.compute_flat_field_correction(params.rois, params.get_flat_field())
```
%% Cell type:code id: tags:
``` python
f = boz.inspect_plane_fitting(avg/ff, params.rois)
```
%% Cell type:code id: tags:
``` python
f.savefig(path+f'p{params.proposal}-r{params.run}-d{params.darkrun}-inspect-withflatfield.png', dpi=300)
```
%% Cell type:markdown id: tags:
# Non-linearities correction extraction
%% Cell type:markdown id: tags:
To speed up online analysis, we save the corrections with a dummy non-linearity correction. The saved file can be used for online analysis as soon as it appears.
%% Cell type:code id: tags:
``` python
N = 40
domain = boz.nl_domain(N, 40, 280)
params.set_Fnl(np.arange(2**9))
params.save(path=path)
```
%% Cell type:code id: tags:
``` python
N = 80
domain = boz.nl_domain(N, 40, 511)
params.alpha = 0.5
params.max_iter = 25
```
%% Cell type:markdown id: tags:
## minimizing
%% Cell type:code id: tags:
``` python
res, fit_res = boz.nl_fit(params, domain)
```
%% Cell type:code id: tags:
``` python
params.set_Fnl(boz.nl_lut(domain, res.x))
```
%% Cell type:code id: tags:
``` python
f = boz.inspect_correction(params, gain=3)
print(params)
```
%% Cell type:code id: tags:
``` python
f = boz.inspect_correction(params, gain=params.gain)
```
%% Cell type:code id: tags:
``` python
f.savefig(f'p{params.proposal}-r{params.run}-d{params.darkrun}-inspect-correction.png', dpi=300)
f.savefig(path+f'p{params.proposal}-r{params.run}-d{params.darkrun}-inspect-correction.png', dpi=300)
```
%% Cell type:markdown id: tags:
### plotting the fitted correction
%% Cell type:code id: tags:
``` python
f = boz.inspect_Fnl(params.get_Fnl())
```
%% Cell type:code id: tags:
``` python
f.savefig(f'p{params.proposal}-r{params.run}-d{params.darkrun}-inspect-Fnl.png', dpi=300)
f.savefig(path+f'p{params.proposal}-r{params.run}-d{params.darkrun}-inspect-Fnl.png', dpi=300)
```
%% Cell type:markdown id: tags:
### plotting the fit progresion
%% Cell type:code id: tags:
``` python
f = boz.inspect_nl_fit(fit_res)
```
%% Cell type:code id: tags:
``` python
f.savefig(f'p{params.proposal}-r{params.run}-d{params.darkrun}-inspect-nl-fit.png', dpi=300)
f.savefig(path+f'p{params.proposal}-r{params.run}-d{params.darkrun}-inspect-nl-fit.png', dpi=300)
```
%% Cell type:markdown id: tags:
# Save the analysis parameters
%% Cell type:code id: tags:
``` python
params.save()
params.save(path=path)
```
......
This diff is collapsed.
``Getting started``
~~~~~~~~~~~~~~~~~~~
Getting started
~~~~~~~~~~~~~~~
Installation
------------
The ToolBox may be installed in any environment. However, it depends on the extra_data and the euxfel_bunch_pattern package, which are no official third party python modules. Within environments where the latter are not present, they need to be installed by hand.
Toolbox Installation
--------------------
Furthermore, as long as the ToolBox is not yet added to one of our custom environments, it needs to be installed locally. Activate your preferred environment and check installation of scs_toolbox by typing:
Recommended: Proposal Environment setup
+++++++++++++++++++++++++++++++++++++++
The proposal specific environment is installed by launching in a shell
terminal on Maxwell:
.. code:: bash
module load exfel exfel_anaconda3
scs-activate-toolbox-kernel --proposal 2780
where in this example 2780 is the proposal number. After this and refreshing the
browser, a new kernel named ``SCS Toolbox (p002780)`` is available and should
be used to run jupyter notebooks on the Maxwell Jupyter hub.
Alternative: Manual ToolBox Installation
++++++++++++++++++++++++++++++++++++++++
The ToolBox may be installed in any environment. However, it depends on the
extra_data and the euxfel_bunch_pattern package, which are no official third
party python modules. Within environments where the latter are not present, they
need to be installed by hand.
Furthermore, as long as the ToolBox is not yet added to one of our custom
environments, it needs to be installed locally. Activate your preferred
environment and check installation of scs_toolbox by typing:
.. code:: bash
pip show toolbox_scs
If the toolbox has been installed in your home directory previously, everything is set up. Otherwise it needs to be installed (only once). In that case enter following command from the the ToolBox top-level directory:
If the toolbox has been installed in your home directory previously, everything
is set up. Otherwise it needs to be installed (only once). In that case enter
following command from the the ToolBox top-level directory:
.. code:: bash
pip install --user ".[maxwell]"
Alternatively, use the -e flag for installation to install the package in development mode.
Alternatively, use the -e flag for installation to install the package in
development mode.
.. code:: bash
pip install --user -e ".[maxwell]"
Transferring Data
-----------------
The DAQ system save data on the online cluster. To analyze data on the
Maxwell offline cluster, they need to be transferred there. This is achieved by
login at:
https://in.xfel.eu/metadata
and then navigating to the proposal, then to the ``Runs`` tab from where runs can
be transferred to the offline cluster by marking them as ``Good`` in the
``Data Assessment``. Depending on the amount of data in the run, this can take a
while.
.. image:: metadata.png
Processing Data
---------------
On the Maxwell Jupyter hub:
https://max-jhub.desy.de
notebooks can be executed using the corresponding Proposal Environment
kernel or the ``xfel`` kernel if the toolbox was manually installed. For quick
startup, example notebooks (.ipynb files) can be directly downloaded from the
:ref:`How to's` section by clicking on the ``View page source``.
``How to's``
~~~~~~~~~~~~
.. _how to's:
How to's
~~~~~~~~
top
---
......@@ -38,6 +40,36 @@ Photo-Electron Spectrometer (PES)
routines
--------
* :doc:`BOZ analysis part I parameters determination <BOZ analysis part I parameters determination>`.
* :doc:`BOZ analysis part II run processing <BOZ analysis part II run processing>`.
* *to do*
BOZ: Beam-Splitting Off-axis Zone plate analysis
++++++++++++++++++++++++++++++++++++++++++++++++
The BOZ analysis consists of 2 notebooks and a script. The first notebook
:doc:`BOZ analysis part I parameters determination <BOZ analysis part I parameters determination>`
is used to determine all the necessary correction, that is the flat field
correction from the zone plate optics and the non-linearity correction from the
DSSC gain. The inputs are a dark run and a run with X-rays on three broken or
empty membranes. For the latter, an alternative is to use pre-edge data on an
actual sample. The result is a JSON file that contains the flat field and
non-linearity correction as well as the parameters used for their determination
such that this can be reproduced and investigated in case of issues. The
determination of the flat field correction is rather quick, few minutes and is
the most important correction for the change in XAS computed from the 1st and
1st order. For quick correction of the online preview one can bypass the
non-linearity calculation by taking the JSON file as soon as it appears.
The determination of the non-linearity correction is a lot longer and can take
some 2 to 8 hours depending on the number of pulses in the
train. For this reason it is possible to use a script
``scripts/boz_parameters_job.sh`` in the toolbox to launch the first notebook
via slurm:
``sbatch ./boz_parameters_job.sh 615 614 3``
where 615 is the dark run number, 614 is the run on 3 broken membranes and 3 is
the DSSC gain in photon per bin. The proposal run number is defined inside the
script file.
The second notebook
:doc:`BOZ analysis part II run processing <BOZ analysis part II run processing>`
then use the JSON file to load all needed corrections and
process an run with a corresponding dark run to bin data and compute a
spectrum or a time resolved XAS scan
doc/metadata.png

136 KiB

#!/bin/bash
#SBATCH -N 1
#SBATCH --partition=exfel
#SBATCH --time=12:00:00
#SBATCH --mail-type=END,FAIL
#SBATCH --output=logs/%j-%x.out
PROPOSAL=2619
DARK=$1
RUN=$2
GAIN=$3
ROISTH=${4:-1}
SATLEVEL=${5:-500}
MODULE=15
source /etc/profile.d/modules.sh
module load exfel
module load exfel_anaconda3/1.1
echo processing run $RUN
mkdir ../../processed_runs/r$RUN
papermill 'BOZ analysis part I parameters determination.ipynb' ../../processed_runs/r$RUN/output.ipynb \
-p proposal $PROPOSAL -p darkrun $DARK -p run $RUN -p module $MODULE \
-p gain $GAIN -p rois_th $ROISTH -p sat_level $SATLEVEL -k xfel
......@@ -2,7 +2,7 @@ from setuptools import setup, find_packages
with open('README.rst') as f:
readme = f.read()
with open('VERSION') as f:
_version = f.read()
_version = _version.strip("\n")
......@@ -12,7 +12,7 @@ basic_analysis_reqs = ['numpy', 'scipy',] # and is readily available in Karabo
advanced_analysis_reqs = [
'pandas', 'imageio', 'xarray>=0.13.0', 'h5py', 'h5netcdf',]
interactive_reqs = ['ipykernel', 'matplotlib', 'tqdm',]
maxwell_reqs = ['joblib', 'extra_data', 'euxfel_bunch_pattern>=0.6']
maxwell_reqs = ['joblib', 'papermill', 'extra_data', 'euxfel_bunch_pattern>=0.6']
setup(name='toolbox_scs',
......
......@@ -132,19 +132,19 @@ mnemonics = {
'key': 'value.value',
'dim': None},),
"PES_RV": ({'source': 'SA3_XTD10_PES/MDL/DAQ_MPOD',
'key': 'u215.value',
'key': 'u213.value',
'dim': None},),
"PES_N2": ({'source': 'SA3_XTD10_PES/DCTRL/V30300S_NITROGEN',
'key': 'interlock.AActionState.value',
'key': 'hardwareState.value',
'dim': None},),
"PES_Ne": ({'source': 'SA3_XTD10_PES/DCTRL/V30310S_NEON',
'key': 'interlock.AActionState.value',
'key': 'hardwareState.value',
'dim': None},),
"PES_Kr": ({'source': 'SA3_XTD10_PES/DCTRL/V30320S_KRYPTON',
'key': 'interlock.AActionState.value',
'key': 'hardwareState.value',
'dim': None},),
"PES_Xe": ({'source': 'SA3_XTD10_PES/DCTRL/V30330S_XENON',
'key': 'interlock.AActionState.value',
'key': 'hardwareState.value',
'dim': None},),
# XTD10 MCP (after GATT)
......@@ -273,6 +273,14 @@ mnemonics = {
"VFM_BENDERF": ({'source': 'SCS_KBS_VFM/MOTOR/BENDERF',
'key': 'encoderPosition.value',
'dim': None},),
"HFM_BENDING": ({'source': 'SCS_KBS_HFM/MDL/AVERAGER',
'key': 'result.value',
'dim': None},),
"VFM_BENDING": ({'source': 'SCS_KBS_VFM/MDL/AVERAGER',
'key': 'result.value',
'dim': None},),
# AFS LASER
"AFS_PhaseShifter": ({'source': 'SCS_ILH_LAS/PHASESHIFTER/DOOCS',
......@@ -352,9 +360,12 @@ mnemonics = {
"scannerY_enc": ({'source': 'SCS_CDIFFT_SAM/ENC/SCANNERY',
'key': 'value.value',
'dim': None},),
"SAM-Z-MOT": ({'source': 'SCS_CDIFFT_MOV/MOTOR/SAM_Z',
'key': 'actualPosition.value',
'dim': None},),
"SAM-Z": ({'source': 'SCS_CDIFFT_MOV/ENC/SAM_Z',
'key': 'value.value',
'dim': None},),
'dim': None},),
"magnet": ({'source': 'SCS_CDIFFT_MAG/ASENS/PSU_CMON',
'key': 'value.value',
'dim': None},
......@@ -371,6 +382,11 @@ mnemonics = {
'key': 'data.image.pixels',
'dim': ['llc2_y', 'llc2_x']},),
# PI-MTE3 CCD camera
"MTE3": ({'source': 'SCS_CDIDET_MTE3/CAM/CAMERA:daqOutput',
'key': 'data.image.pixels',
'dim': ['x', 'y']},),
# FastCCD, if in raw folder, raw images
# if in proc folder, dark substracted and relative gain corrected
"fastccd": ({'source': 'SCS_CDIDET_FCCD2M/DAQ/FCCD:daqOutput',
......
......@@ -14,7 +14,7 @@ import extra_data as ed
from ..misc.bunch_pattern_external import is_sase_3
from ..mnemonics_machinery import (mnemonics_to_process,
mnemonics_for_run)
from ..constants import mnemonics as _mnemonics
__all__ = [
'get_pes_params',
......@@ -27,6 +27,7 @@ log = logging.getLogger(__name__)
def get_pes_tof(run, mnemonics=None, merge_with=None,
start=31390, width=300, origin=None, width_ns=None,
subtract_baseline=True,
baseStart=None, baseWidth=80,
sample_rate=2e9):
"""
......@@ -57,6 +58,9 @@ def get_pes_tof(run, mnemonics=None, merge_with=None,
width_ns: float
time window for one spectrum. If None, the time window is defined by
width / sample rate.
subtract_baseline: bool
If True, subtract baseline defined by baseStart and baseWidth to each
spectrum.
baseStart: int
starting sample of the baseline.
baseWidth: int
......@@ -89,7 +93,7 @@ def get_pes_tof(run, mnemonics=None, merge_with=None,
elif 'bunchPatternTable' in run_mnemonics:
bpt = run.get_array(*run_mnemonics['bunchPatternTable'].values())
elif 'bunchPatternTable_SA3' in run_mnemonics:
bpt = run.get_array(*mnemonics['bunchPatternTable_SA3'].values())
bpt = run.get_array(*run_mnemonics['bunchPatternTable_SA3'].values())
else:
bpt = None
......@@ -114,26 +118,35 @@ def get_pes_tof(run, mnemonics=None, merge_with=None,
arr = merge_with[m]
else:
arr = run.get_array(*run_mnemonics[m].values(), name=m)
if arr.sizes['PESsampleId'] < npulses*period*440 + start + width:
log.warning('Not all pulses were recorded. The number of samples on '
f'the digitizer {arr.sizes["PESsampleId"]} is not enough '
f'to cover the {npulses} spectra. Missing spectra will be '
'filled with NaNs.')
spectra = []
for p in range(npulses):
begin = p*period*440 + start
end = begin + width
baseBegin = p*period*440 + baseStart
baseEnd = baseBegin + baseWidth
if end > arr.sizes['PESsampleId']:
break
pes = arr.isel(PESsampleId=slice(begin, end))
bl = arr.isel(
PESsampleId=slice(baseBegin, baseEnd)).mean(dim='PESsampleId')
spectra.append(pes - bl)
if subtract_baseline:
baseBegin = p*period*440 + baseStart
baseEnd = baseBegin + baseWidth
bl = arr.isel(
PESsampleId=slice(baseBegin, baseEnd)).mean(dim='PESsampleId')
pes = pes - bl
spectra.append(pes)
spectra = xr.concat(spectra,
dim='sa3_pId').rename(m.replace('raw', 'tof'))
ds = ds.merge(spectra)
if len(ds.variables) > 0:
ds = ds.assign_coords({'sa3_pId': mask_on['pulse_slot'].values})
ds = ds.assign_coords({'sa3_pId': mask_on['pulse_slot'][:ds.sizes['sa3_pId']].values})
ds = ds.rename({'PESsampleId': 'time_ns'})
ds = ds.assign_coords({'time_ns': time_ns})
if bool(merge_with):
ds = merge_with.drop(to_process,
errors='ignore').merge(ds, join='inner')
errors='ignore').merge(ds, join='left')
return ds
......@@ -156,10 +169,13 @@ def get_pes_params(run):
params = {}
sel = run.select_trains(ed.by_index[:20])
mnemonics = mnemonics_for_run(run)
for gas in ['N2', 'Ne', 'Kr', 'Xe']:
arr = sel.get_array(*mnemonics[f'PES_{gas}'].values())
if arr[0] == 0:
gas_dict = {'N2': 409.9, 'Ne': 870.2, 'Kr': 1921, 'Xe': 1148.7}
for gas in gas_dict.keys():
mnemo = _mnemonics[f'PES_{gas}'][0]
arr = sel.get_run_value(mnemo['source'], mnemo['key'])
if arr == 'ON':
params['gas'] = gas
params['binding_energy'] = gas_dict[gas]
break
if 'gas' not in params:
params['gas'] = 'unknown'
......
......@@ -42,13 +42,15 @@ class parameters():
proposal: int, proposal number
darkrun: int, run number for the dark run
run: int, run number for the data run
module: int: DSSC module number
module: int, DSSC module number
gain: float, number of ph per bin
"""
def __init__(self, proposal, darkrun, run, module):
def __init__(self, proposal, darkrun, run, module, gain):
self.proposal = proposal
self.darkrun = darkrun
self.run = run
self.module = module
self.gain = gain
self.mask_idx = None
self.mean_th = (None, None)
self.std_th = (None, None)
......@@ -112,7 +114,10 @@ class parameters():
def get_flat_field(self):
"""Get the flat field plane definition."""
return np.array(self.flat_field)
if self.flat_field is None:
return None
else:
return np.array(self.flat_field)
def set_Fnl(self, Fnl):
"""Set the non-linear correction function."""
......@@ -123,7 +128,10 @@ class parameters():
def get_Fnl(self):
"""Get the non-linear correction function."""
return np.array(self.Fnl)
if self.Fnl is None:
return None
else:
return np.array(self.Fnl)
def save(self, path='./'):
"""Save the parameters as a JSON file.
......@@ -137,6 +145,7 @@ class parameters():
v['darkrun'] = self.darkrun
v['run'] = self.run
v['module'] = self.module
v['gain'] = self.gain
v['mask'] = self.mask_idx
v['mean_th'] = self.mean_th
......@@ -168,7 +177,7 @@ class parameters():
"""
with open(fname, 'r') as f:
v = json.load(f)
c = cls(v['proposal'], v['darkrun'], v['run'], v['module'])
c = cls(v['proposal'], v['darkrun'], v['run'], v['module'], v['gain'])
c.mean_th = v['mean_th']
c.std_th = v['std_th']
......@@ -188,7 +197,7 @@ class parameters():
def __str__(self):
f = f'proposal:{self.proposal} darkrun:{self.darkrun} run:{self.run}'
f += f' module:{self.module}\n'
f += f' module:{self.module} gain:{self.gain} ph/bin\n'
if self.mask_idx is not None:
f += f'mean threshold:{self.mean_th} std threshold:{self.std_th}\n'
......@@ -337,16 +346,16 @@ def find_rois(data_mean, threshold):
rois: dictionnary of rois
"""
# compute vertical and horizontal projection
pX = data_mean.sum(axis=0)
pX = data_mean.mean(axis=0)
pX = pX[:256] # half the ladder since there is a gap in the middle
pY = data_mean.sum(axis=1)
pY = data_mean.mean(axis=1)
# along X
lowX = int(np.argmax(pX[:128] > threshold)) # 1st occurrence returned
highX = int(np.argmax(pX[128:] < threshold) + 128) # 1st occ. returned
midX = (lowX + highX)//2
leftX = int(np.argmin(pX[(lowX+20):midX]) + lowX + 20)
rightX = int(np.argmin(pX[midX:highX-20]) + midX)
lowX = int(np.argmax(pX[:64] > threshold)) # 1st occurrence returned
highX = int(np.argmax(pX[192:] <= threshold) + 192) # 1st occ. returned
leftX = int(np.argmin(pX[64:128]) + 64)
rightX = int(np.argmin(pX[128:192]) + 128)
# along Y
lowY = int(np.argmax(pY[:64] > threshold)) # 1st occurrence returned
......@@ -429,9 +438,9 @@ def inspect_rois(data_mean, rois, threshold=None, allrois=False):
matplotlib figure
"""
# compute vertical and horizontal projection
pX = data_mean.sum(axis=0)
pX = data_mean.mean(axis=0)
pX = pX[:256] # half the ladder since there is a gap in the middle
pY = data_mean.sum(axis=1)
pY = data_mean.mean(axis=1)
# Set up the axes with gridspec
fig = plt.figure(figsize=(5, 3))
......@@ -451,6 +460,23 @@ def inspect_rois(data_mean, rois, threshold=None, allrois=False):
vmax=np.percentile(data_mean[:, :256], 99))
main_ax.set_aspect('equal')
from matplotlib.patches import Rectangle
roi = rois['n']
main_ax.add_patch(Rectangle((roi['xl'], 128-roi['yh']),
roi['xh'] - roi['xl'],
roi['yh'] - roi['yl'],
alpha=0.3, color='b'))
roi = rois['0']
main_ax.add_patch(Rectangle((roi['xl'], 128-roi['yh']),
roi['xh'] - roi['xl'],
roi['yh'] - roi['yl'],
alpha=0.3, color='g'))
roi = rois['p']
main_ax.add_patch(Rectangle((roi['xl'], 128-roi['yh']),
roi['xh'] - roi['xl'],
roi['yh'] - roi['yl'],
alpha=0.3, color='r'))
x.plot(Xs, pX)
x.invert_yaxis()
if threshold is not None:
......@@ -471,62 +497,58 @@ def inspect_rois(data_mean, rois, threshold=None, allrois=False):
return fig
def histogram_module(proposalNB, runNB, moduleNB, mask=None):
def histogram_module(arr, mask=None):
"""Compute a histogram of the 9 bits raw pixel values over a module.
Inputs
------
proposalNB: proposal number
runNB: run number
moduleNB: module number
arr: dask array of reshaped dssc data (trainId, pulseId, x, y)
mask: optional bad pixel mask
Returns
-------
histogram
"""
run = open_run(proposal=proposalNB, run=runNB)
# DSSC
source = f'SCS_DET_DSSC1M-1/DET/{moduleNB}CH0:xtdf'
key = 'image.data'
arr = run[source, key].dask_array()
arr = arr.rechunk((100, -1, -1, -1))
if mask is not None:
w = da.repeat(da.repeat(da.array(mask[None, None, :, :]),
arr.shape[1], axis=1), arr.shape[0], axis=0)
w = w.rechunk((100, -1, -1, -1))
return da.bincount(arr.ravel(), w.ravel()).compute()
return da.bincount(arr.ravel(), w.ravel(), minlength=512).compute()
else:
return da.bincount(arr.ravel()).compute()
return da.bincount(arr.ravel(), minlength=512).compute()
def inspect_histogram(proposalNB, runNB, moduleNB,
mask=None, extra_lines=False):
def inspect_histogram(arr, arr_dark=None, mask=None, extra_lines=False):
"""Compute and plot a histogram of the 9 bits raw pixel values.
Inputs
------
proposalNB: proposal number
runNB: run number
moduleNB: module number
arr: dask array of reshaped dssc data (trainId, pulseId, x, y)
arr: dask array of reshaped dssc dark data (trainId, pulseId, x, y)
mask: optional bad pixel mask
extra_lines: boolean, default False, plot extra lines at period values
Returns
-------
histogram
(h, hd): histogram of arr, arr_dark
figure
"""
h = histogram_module(proposalNB, runNB, moduleNB, mask=mask)
from matplotlib.ticker import MultipleLocator
f = plt.figure(figsize=(6, 3))
plt.plot(np.arange(2**9), h, marker='o',
h = histogram_module(arr, mask=mask)
Sum_h = np.sum(h)
plt.plot(np.arange(2**9), h/Sum_h, marker='o',
ms=3, markerfacecolor='none', lw=1)
if arr_dark is not None:
hd = histogram_module(arr_dark, mask=mask)
Sum_hd = np.sum(hd)
plt.plot(np.arange(2**9), hd/Sum_hd, marker='o',
ms=3, markerfacecolor='none', lw=1, c='k', alpha=.5)
else:
hd = None
if extra_lines:
for k in range(50, 271):
if not (k - 2) % 8:
......@@ -537,21 +559,18 @@ def inspect_histogram(proposalNB, runNB, moduleNB,
plt.axvline(k, c='r', alpha=0.3, ls='--')
plt.axvline(271, c='C1', alpha=0.5, ls='--')
plt.fill_between(np.arange(2**9)[30:51], h[30:51], 1, alpha=0.5)
plt.ylim([1, None])
plt.xlim([0, 2**9-1])
plt.yscale('log')
plt.axes().xaxis.set_minor_locator(MultipleLocator(10))
plt.xlabel('DSSC pixel value')
plt.ylabel('counts')
plt.title(f'p{proposalNB} run {runNB}')
plt.ylabel('count frequency')
return h, f
return (h, hd), f
def load_dssc_module(proposalNB, runNB, moduleNB=15,
subset=slice(None), drop_intra_darks=True, persist=True):
subset=slice(None), drop_intra_darks=True, persist=False):
"""Load single module dssc data as dask array.
Inputs
......@@ -561,7 +580,7 @@ def load_dssc_module(proposalNB, runNB, moduleNB=15,
moduleNB: default 15, module number
subset: default slice(None), subset of trains to load
drop_intra_darks: boolean, default True, remove intra darks from the data
persist: load all data in memory
persist: default False, load all data persistently in memory
Returns
-------
......@@ -579,6 +598,8 @@ def load_dssc_module(proposalNB, runNB, moduleNB=15,
arr[arr == 0] = 256
ppt = run[source, key][subset].data_counts()
# ignore train with no pulses, can happen in burst mode acquisition
ppt = ppt[ppt > 0]
tid = ppt.index.to_numpy()
ppt = np.unique(ppt)
......@@ -795,26 +816,24 @@ def inspect_plane_fitting(avg, rois, vmin=None, vmax=None):
fig, axs = plt.subplots(2, 3, sharex=True, figsize=(6, 6))
img_rois = {}
centers = {}
for k, r in enumerate(['n', '0', 'p']):
img_rois[r] = avg[rois[r]['yl']:rois[r]['yh'],
rois[r]['xl']:rois[r]['xh']]
roi = rois[r]
centers[r] = np.array([(roi['yl'] + roi['yh'])//2,
(roi['xl'] + roi['xh'])//2])
d = '0'
roi = rois[d]
for k, r in enumerate(['n', '0', 'p']):
img_rois[r] = np.roll(avg, tuple(centers[d] - centers[r]))[
roi['yl']:roi['yh'], roi['xl']:roi['xh']]
im = axs[0, k].imshow(img_rois[r],
vmin=vmin,
vmax=vmax)
for k, r in enumerate(['n', '0', 'p']):
if img_rois[r].shape[1] != img_rois['0'].shape[1]:
if k == 0:
n1 = img_rois[r].shape[1]
n = img_rois['0'].shape[1]
v = img_rois[r][:, (n1-n):]/img_rois['0']
else:
n1 = img_rois[r].shape[1]
n = img_rois['0'].shape[1]
v = img_rois[r][:, :-(n1-n)]/img_rois['0']
else:
v = img_rois[r]/img_rois['0']
v = img_rois[r]/img_rois['0']
im2 = axs[1, k].imshow(v, vmin=0.2, vmax=1.1, cmap='RdBu_r')
cbar = fig.colorbar(im, ax=axs[0, :], orientation="horizontal")
......@@ -1073,16 +1092,17 @@ def nl_crit(p, domain, alpha, arr_dark, arr, tid, rois, mask, flat_field,
# drop saturated shots
d = data.where(data['sat_sat'] == False, drop=True)
# calculated error from transmission of 1.0
v = d['n'].values.flatten()/d['0'].values.flatten()
err1 = 1e8*np.nanmean((v - 1.0)**2)
v_1 = snr(d['n'].values.flatten(), d['0'].values.flatten(),
methods=['weighted'])
err_1 = 1e8*v_1['weighted']['s']**2
err2 = np.sum((Fmodel-np.arange(2**9))**2)
v_2 = snr(d['p'].values.flatten(), d['0'].values.flatten(),
methods=['weighted'])
err_2 = 1e8*v_2['weighted']['s']**2
# print(f'{err}: {p}')
# logging.info(f'{err}: {p}')
err_a = np.sum((Fmodel-np.arange(2**9))**2)
return (1.0 - alpha)*err1 + alpha*err2
return (1.0 - alpha)*0.5*(err_1 + err_2) + alpha*err_a
def nl_fit(params, domain):
......@@ -1175,44 +1195,68 @@ def inspect_nl_fit(res_fit):
return f
def snr(sig, ref, verbose=False):
""" Compute mean, std and SNR with and without weight from transmitted signal sig
and I0 signal ref
def snr(sig, ref, methods=None, verbose=False):
""" Compute mean, std and SNR from transmitted signal sig and I0 signal ref.
Inputs
------
sig: 1D signal samples
ref: 1D reference samples
methods: None by default or list of strings to select which methods to use.
Possible values are 'direct', 'weighted', 'diff'. In case of None, all
methods will be calculated.
verbose: booleand, if True prints calculated values
Returns
-------
dictionnary of [methods][value] where value is 'mu' for mean and 's' for
standard deviation.
"""
if methods is None:
methods = ['direct', 'weighted', 'diff']
w = ref
x = sig/ref
mask = np.isfinite(x) & np.isfinite(sig) & np.isfinite(ref)
w = w[mask]
sig = sig[mask]
ref = ref[mask]
x = x[mask]
# direct mean and std
mu = np.mean(x)
s = np.std(x)
if verbose:
print(f'mu: {mu}, s: {s}, snr: {mu/s}')
res = {}
res['direct'] = {'mu': mu, 's':s}
# direct mean and std
if 'direct' in methods:
mu = np.mean(x)
s = np.std(x)
if verbose:
print(f'mu: {mu}, s: {s}, snr: {mu/s}')
res['direct'] = {'mu': mu, 's':s}
# weighted mean and std
wmu = np.sum(sig)/np.sum(ref)
v1 = np.sum(w)
v2 = np.sum(w**2)
ws = np.sqrt(np.sum(w*(x - wmu)**2)/(v1 - v2/v1))
if 'weighted' in methods:
wmu = np.sum(sig)/np.sum(ref)
v1 = np.sum(w)
v2 = np.sum(w**2)
ws = np.sqrt(np.sum(w*(x - wmu)**2)/(v1 - v2/v1))
if verbose:
print(f'weighted mu: {wmu}, s: {ws}, snr: {wmu/ws}')
if verbose:
print(f'weighted mu: {wmu}, s: {ws}, snr: {wmu/ws}')
res['weighted'] = {'mu': wmu, 's':ws}
res['weighted'] = {'mu': wmu, 's':ws}
# noise from diff
dmu = np.mean(x)
ds = np.std(np.diff(x))/np.sqrt(2)
if verbose:
print(f'diff mu: {dmu}, s: {ds}, snr: {dmu/ds}')
res['diff'] = {'mu': dmu, 's':ds}
if 'diff' in methods:
dmu = np.mean(x)
ds = np.std(np.diff(x))/np.sqrt(2)
if verbose:
print(f'diff mu: {dmu}, s: {ds}, snr: {dmu/ds}')
res['diff'] = {'mu': dmu, 's':ds}
return res
......@@ -1239,13 +1283,22 @@ def inspect_correction(params, gain=None):
fitrois[k] = params.rois[k]
# flat flat_field
ff = compute_flat_field_correction(params.rois, params.get_flat_field())
plane_ff = params.get_flat_field()
if plane_ff is None:
plane_ff = [0.0, 0.0, 1.0, -1.0]
ff = compute_flat_field_correction(params.rois, plane_ff)
# non linearities
Fnl = params.get_Fnl()
if Fnl is None:
Fnl = np.arange(2**9)
# compute all levels of correction
data = process(np.arange(2**9), params.arr_dark, params.arr, params.tid,
fitrois, params.get_mask(), np.ones_like(ff), params.sat_level)
data_ff = process(np.arange(2**9), params.arr_dark, params.arr, params.tid,
fitrois, params.get_mask(), ff, params.sat_level)
data_ff_nl = process(params.get_Fnl(), params.arr_dark, params.arr,
data_ff_nl = process(Fnl, params.arr_dark, params.arr,
params.tid, fitrois, params.get_mask(), ff, params.sat_level)
# for conversion to nb of photons
......@@ -1256,7 +1309,7 @@ def inspect_correction(params, gain=None):
scale = 1e-6
f, axs = plt.subplots(3, 3, figsize=(6, 6), sharex=True, sharey=True)
f, axs = plt.subplots(3, 3, figsize=(8, 6), sharex=True)
# nbins = np.linspace(0.01, 1.0, 100)
......@@ -1278,16 +1331,11 @@ def inspect_correction(params, gain=None):
snr_v = snr(good_d[n].values.flatten(),
good_d[r].values.flatten(), verbose=True)
if k == 0:
m = np.nanmean(good_d[n].values.flatten()
/good_d[r].values.flatten())
else:
m = 1
m = snr_v['direct']['mu']
h, xedges, yedges, img = axs[l, k].hist2d(
g*scale*good_d[r].values.flatten(),
good_d[n].values.flatten()/good_d[r].values.flatten()/m,
[photon_scale, np.linspace(0.95, 1.05, 150)],
good_d[n].values.flatten()/good_d[r].values.flatten(),
[photon_scale, np.linspace(0.95, 1.05, 150)*m],
cmap='Blues',
vmax=200,
norm=LogNorm(),
......@@ -1295,13 +1343,14 @@ def inspect_correction(params, gain=None):
)
h, xedges, yedges, img2 = axs[l, k].hist2d(
g*scale*sat_d[r].values.flatten(),
sat_d[n].values.flatten()/sat_d[r].values.flatten()/m,
[photon_scale, np.linspace(0.95, 1.05, 150)],
sat_d[n].values.flatten()/sat_d[r].values.flatten(),
[photon_scale, np.linspace(0.95, 1.05, 150)*m],
cmap='Reds',
vmax=200,
norm=LogNorm(),
# alpha=0.5 # make the plot looks ugly with lots of white lines
)
v = snr_v['direct']['mu']/snr_v['direct']['s']
axs[l, k].text(0.4, 0.15, f'SNR: {v:.0f}',
transform = axs[l, k].transAxes)
......@@ -1312,9 +1361,11 @@ def inspect_correction(params, gain=None):
# axs[l, k].plot(3*nbins, 1+np.sqrt(2/(1e6*nbins)), c='C1', ls='--')
# axs[l, k].plot(3*nbins, 1-np.sqrt(2/(1e6*nbins)), c='C1', ls='--')
axs[l, k].set_ylim([0.95*m, 1.05*m])
for k in range(3):
for l in range(3):
axs[l, k].set_ylim([0.95, 1.05])
#for l in range(3):
# axs[l, k].set_ylim([0.95, 1.05])
if gain:
axs[2, k].set_xlabel('#ph (10$^6$)')
else:
......