Skip to content
Snippets Groups Projects
Commit 96ac4137 authored by Cyril Danilevski's avatar Cyril Danilevski :scooter:
Browse files

Remove duplicate operation_mode parameter in FastCCD notebook

parent d5898d20
No related branches found
No related tags found
No related merge requests found
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
# FastCCD Dark Characterization # FastCCD Dark Characterization
Author: I. Klačková, S. Hauf, K. Setoodehnia and M. Cascella Author: I. Klačková, S. Hauf, K. Setoodehnia and M. Cascella
The following notebook provides dark image analysis of the FastCCD detector. The following notebook provides dark image analysis of the FastCCD detector.
Dark characterization evaluates offset and noise of the FastCCD detector, corrects the noise for Common Mode (CM), and defines bad pixels relative to offset and CM corrected noise. Bad pixels are then excluded and CM corrected noise is recalculated excluding the bad pixels. Resulting offset and CM corrected noise maps, as well as the bad pixel map are sent to the calibration database. Dark characterization evaluates offset and noise of the FastCCD detector, corrects the noise for Common Mode (CM), and defines bad pixels relative to offset and CM corrected noise. Bad pixels are then excluded and CM corrected noise is recalculated excluding the bad pixels. Resulting offset and CM corrected noise maps, as well as the bad pixel map are sent to the calibration database.
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
cluster_profile = "noDB" # ipcluster profile to use cluster_profile = "noDB" # ipcluster profile to use
in_folder = "/gpfs/exfel/exp/SCS/201930/p900074/raw" # input folder, required in_folder = "/gpfs/exfel/exp/SCS/201930/p900074/raw" # input folder, required
out_folder = '/gpfs/exfel/data/scratch/ahmedk/test/fastccd' # output folder, required out_folder = '/gpfs/exfel/data/scratch/ahmedk/test/fastccd' # output folder, required
sequence = 0 # sequence file to use sequence = 0 # sequence file to use
run = 351 # which run to read data from, required run = 351 # which run to read data from, required
karabo_da = ['DA05'] # data aggregators karabo_da = ['DA05'] # data aggregators
karabo_id = "SCS_CDIDET_FCCD2M" # karabo prefix of PNCCD devices karabo_id = "SCS_CDIDET_FCCD2M" # karabo prefix of PNCCD devices
receiver_id = "FCCD" # inset for receiver devices receiver_id = "FCCD" # inset for receiver devices
path_template = 'RAW-R{:04d}-{}-S{{:05d}}.h5' # the template to use to access data path_template = 'RAW-R{:04d}-{}-S{{:05d}}.h5' # the template to use to access data
h5path = '/INSTRUMENT/{}/DAQ/{}:daqOutput/data/image/pixels' # path to the data in the HDF5 file h5path = '/INSTRUMENT/{}/DAQ/{}:daqOutput/data/image/pixels' # path to the data in the HDF5 file
h5path_t = '/CONTROL/{}/CTRL/LSLAN/inputA/crdg/value' # path to find temperature h5path_t = '/CONTROL/{}/CTRL/LSLAN/inputA/crdg/value' # path to find temperature
h5path_cntrl = '/RUN/{}/DET/FCCD' # path to find control data h5path_cntrl = '/RUN/{}/DET/FCCD' # path to find control data
use_dir_creation_date = True # To be used to retrieve calibration constants later on (for database time derivation) use_dir_creation_date = True # To be used to retrieve calibration constants later on (for database time derivation)
cal_db_interface = "tcp://max-exfl016:8020" # the calibration database interface to use cal_db_interface = "tcp://max-exfl016:8020" # the calibration database interface to use
cal_db_timeout = 300000 # timeout on calibration database requests cal_db_timeout = 300000 # timeout on calibration database requests
db_output = False # Output constants to the calibration database db_output = False # Output constants to the calibration database
local_output = True # output constants locally local_output = True # output constants locally
number_dark_frames = 0 # number of images to be used, if set to 0 all available images are used number_dark_frames = 0 # number of images to be used, if set to 0 all available images are used
# The two operation modes for FastCCD have fixed names which cannot be changed: # The two operation modes for FastCCD have fixed names which cannot be changed:
operation_mode = "FF" # FS stands for frame-store and FF for full-frame opeartion. operation_mode = "FF" # FS stands for frame-store and FF for full-frame operation.
temp_limits = 5 # to find calibration constants later on, the sensor temperature is allowed to vary by 5 units temp_limits = 5 # to find calibration constants later on, the sensor temperature is allowed to vary by 5 units
bad_pixel_offset_sigma = 5. # Any pixel whose offset is beyond 5 standard deviations, is a bad pixel bad_pixel_offset_sigma = 5. # Any pixel whose offset is beyond 5 standard deviations, is a bad pixel
bad_pixel_noise_sigma = 5. # Any pixel whose noise is beyond 5 standard deviations, is a bad pixel bad_pixel_noise_sigma = 5. # Any pixel whose noise is beyond 5 standard deviations, is a bad pixel
sigmaNoise = 5. # Any pixel whose signal exceeds 'sigmaNoise'*noiseCM (common mode corrected noise) will be masked sigmaNoise = 5. # Any pixel whose signal exceeds 'sigmaNoise'*noiseCM (common mode corrected noise) will be masked
fix_temperature = 0. # Fixed operation temperature in Kelvins. If set to 0, mean value of the data file's temperature is fix_temperature = 0. # Fixed operation temperature in Kelvins. If set to 0, mean value of the data file's temperature is
# used. # used.
chunkSize = 100 # Number of images to read per chunk chunkSize = 100 # Number of images to read per chunk
cpuCores = 40 # Specifies the number of running cpu cores cpuCores = 40 # Specifies the number of running cpu cores
commonModeAxis = 1 # Axis along which common mode will be calculated (0: along rows, 1: along columns) commonModeAxis = 1 # Axis along which common mode will be calculated (0: along rows, 1: along columns)
run_parallel = True # For parallel computation run_parallel = True # For parallel computation
# According to our gain calibration using 55Fe source, we have the following conversion gains (e.g., 1 ADU = 6.3 e-): # According to our gain calibration using 55Fe source, we have the following conversion gains (e.g., 1 ADU = 6.3 e-):
ADU_to_electron_upper_hg = 6.3 # for upper hemisphere and high gain ADU_to_electron_upper_hg = 6.3 # for upper hemisphere and high gain
ADU_to_electron_lower_hg = 6.4 # for lower hemisphere and high gain ADU_to_electron_lower_hg = 6.4 # for lower hemisphere and high gain
ADU_to_electron_upper_mg = 23.4 # for upper hemisphere and medium gain ADU_to_electron_upper_mg = 23.4 # for upper hemisphere and medium gain
ADU_to_electron_lower_mg = 23.4 # for lower hemisphere and medium gain ADU_to_electron_lower_mg = 23.4 # for lower hemisphere and medium gain
ADU_to_electron_upper_lg = 49.3 # for upper hemisphere and low gain ADU_to_electron_upper_lg = 49.3 # for upper hemisphere and low gain
ADU_to_electron_lower_lg = 47.3 # for lower hemisphere and low gain ADU_to_electron_lower_lg = 47.3 # for lower hemisphere and low gain
operation_mode = '' # Detector operation mode, optional
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Required Packages: # Required Packages:
import copy import copy
import datetime import datetime
import time import time
import os import os
import warnings import warnings
warnings.filterwarnings('ignore') warnings.filterwarnings('ignore')
import h5py import h5py
from IPython.display import display, Markdown from IPython.display import display, Markdown
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
%matplotlib inline %matplotlib inline
import numpy as np import numpy as np
from prettytable import PrettyTable from prettytable import PrettyTable
from cal_tools.tools import (get_dir_creation_date, get_random_db_interface, from cal_tools.tools import (get_dir_creation_date, get_random_db_interface,
get_pdu_from_db, get_report, get_pdu_from_db, get_report,
save_const_to_h5, send_to_db) save_const_to_h5, send_to_db)
from cal_tools.enums import BadPixels from cal_tools.enums import BadPixels
from iCalibrationDB import Constants, Conditions, Detectors, Versions from iCalibrationDB import Constants, Conditions, Detectors, Versions
from iCalibrationDB.detectors import DetectorTypes from iCalibrationDB.detectors import DetectorTypes
from XFELDetAna import xfelpyanatools as xana from XFELDetAna import xfelpyanatools as xana
from XFELDetAna import xfelpycaltools as xcal from XFELDetAna import xfelpycaltools as xcal
from XFELDetAna.detectors.fastccd import readerh5 as fastccdreaderh5 from XFELDetAna.detectors.fastccd import readerh5 as fastccdreaderh5
from XFELDetAna.util import env from XFELDetAna.util import env
env.iprofile = cluster_profile env.iprofile = cluster_profile
import XFELDetAna.xfelprofiler as xprof import XFELDetAna.xfelprofiler as xprof
profiler = xprof.Profiler() profiler = xprof.Profiler()
profiler.disable() profiler.disable()
from XFELDetAna.xfelreaders import ChunkReader from XFELDetAna.xfelreaders import ChunkReader
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Output Folder Creation: # Output Folder Creation:
if not os.path.exists(out_folder): if not os.path.exists(out_folder):
os.makedirs(out_folder) os.makedirs(out_folder)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Number of Images: # Number of Images:
def nImagesOrLimit(nImages, limit): def nImagesOrLimit(nImages, limit):
if limit == 0: if limit == 0:
return nImages return nImages
else: else:
return min(nImages, limit) return min(nImages, limit)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
proposal = list(filter(None, in_folder.strip('/').split('/')))[-2] proposal = list(filter(None, in_folder.strip('/').split('/')))[-2]
file_loc = 'proposal:{} runs:{}'.format(proposal, run) file_loc = 'proposal:{} runs:{}'.format(proposal, run)
report = get_report(out_folder) report = get_report(out_folder)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Detector Operation Mode, Calibration Database Settings, and Some Initial Run Parameters & Paths: # Detector Operation Mode, Calibration Database Settings, and Some Initial Run Parameters & Paths:
display(Markdown('### Initial Settings')) display(Markdown('### Initial Settings'))
if operation_mode == "FS": if operation_mode == "FS":
x = 960 # rows of the FastCCD to analyze in FS mode x = 960 # rows of the FastCCD to analyze in FS mode
y = 960 # columns of the FastCCD to analyze in FS mode y = 960 # columns of the FastCCD to analyze in FS mode
print('\nYou are analyzing data in FS mode.') print('\nYou are analyzing data in FS mode.')
else: else:
x = 1934 # rows of the FastCCD to analyze in FF mode x = 1934 # rows of the FastCCD to analyze in FF mode
y = 960 # columns of the FastCCD to analyze in FF mode y = 960 # columns of the FastCCD to analyze in FF mode
print('\nYou are analyzing data in FF mode.') print('\nYou are analyzing data in FF mode.')
ped_dir = "{}/r{:04d}".format(in_folder, run) ped_dir = "{}/r{:04d}".format(in_folder, run)
fp_name = path_template.format(run, karabo_da[0]) fp_name = path_template.format(run, karabo_da[0])
fp_path = '{}/{}'.format(ped_dir, fp_name) fp_path = '{}/{}'.format(ped_dir, fp_name)
filename = fp_path.format(sequence) filename = fp_path.format(sequence)
h5path = h5path.format(karabo_id, receiver_id) h5path = h5path.format(karabo_id, receiver_id)
h5path_t = h5path_t.format(karabo_id) h5path_t = h5path_t.format(karabo_id)
h5path_cntrl = h5path_cntrl.format(karabo_id) h5path_cntrl = h5path_cntrl.format(karabo_id)
creation_time = None creation_time = None
if use_dir_creation_date: if use_dir_creation_date:
creation_time = get_dir_creation_date(in_folder, run) creation_time = get_dir_creation_date(in_folder, run)
cal_db_interface = get_random_db_interface(cal_db_interface) cal_db_interface = get_random_db_interface(cal_db_interface)
print('Calibration database interface: {}'.format(cal_db_interface)) print('Calibration database interface: {}'.format(cal_db_interface))
print("Sending constants to the calibration database: {}".format(db_output)) print("Sending constants to the calibration database: {}".format(db_output))
print("HDF5 path to data: {}".format(h5path)) print("HDF5 path to data: {}".format(h5path))
print("Run number: {}".format(run)) print("Run number: {}".format(run))
print("Reading data from: {}".format(filename)) print("Reading data from: {}".format(filename))
if creation_time: if creation_time:
print("Using {} as creation time".format(creation_time.isoformat())) print("Using {} as creation time".format(creation_time.isoformat()))
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Reading Parameters such as Detector Bias, Gain, etc. from the Data: # Reading Parameters such as Detector Bias, Gain, etc. from the Data:
memoryCells = 1 # FastCCD has 1 memory cell memoryCells = 1 # FastCCD has 1 memory cell
sensorSize = [x, y] sensorSize = [x, y]
blockSize = [sensorSize[0]//2, sensorSize[1]] # Sensor area will be analysed according to blocksize blockSize = [sensorSize[0]//2, sensorSize[1]] # Sensor area will be analysed according to blocksize
xcal.defaultBlockSize = blockSize xcal.defaultBlockSize = blockSize
nImages = fastccdreaderh5.getDataSize(filename, h5path)[0] # Specifies total number of images to proceed nImages = fastccdreaderh5.getDataSize(filename, h5path)[0] # Specifies total number of images to proceed
nImages = nImagesOrLimit(nImages, number_dark_frames) nImages = nImagesOrLimit(nImages, number_dark_frames)
profile = False profile = False
with h5py.File(filename, 'r') as f: with h5py.File(filename, 'r') as f:
bias_voltage = int(f['{}/biasclock/bias/value'.format(h5path_cntrl)][0]) bias_voltage = int(f['{}/biasclock/bias/value'.format(h5path_cntrl)][0])
det_gain = int(f['{}/exposure/gain/value'.format(h5path_cntrl)][0]) det_gain = int(f['{}/exposure/gain/value'.format(h5path_cntrl)][0])
integration_time = int(f['{}/exposure/exposure_time/value'.format(h5path_cntrl)][0]) integration_time = int(f['{}/exposure/exposure_time/value'.format(h5path_cntrl)][0])
temperature = np.mean(f[h5path_t]) temperature = np.mean(f[h5path_t])
temperature = round(temperature, 2) temperature = round(temperature, 2)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Printing the Parameters Read from the Data File: # Printing the Parameters Read from the Data File:
display(Markdown('### Evaluated Parameters')) display(Markdown('### Evaluated Parameters'))
print("Number of dark images to analyze:", nImages) print("Number of dark images to analyze:", nImages)
gain_dict = { gain_dict = {
"high gain" : 8, "high gain" : 8,
"medium gain" : 2, "medium gain" : 2,
"low gain" : 1, "low gain" : 1,
"auto gain" : 0 "auto gain" : 0
} }
for gain, value in gain_dict.items(): for gain, value in gain_dict.items():
if det_gain == value: if det_gain == value:
gain_setting = gain gain_setting = gain
print("Bias voltage is {} V.".format(bias_voltage)) print("Bias voltage is {} V.".format(bias_voltage))
print("Detector gain is set to x{} ({}).".format(det_gain, gain_setting)) print("Detector gain is set to x{} ({}).".format(det_gain, gain_setting))
print("Detector integration time is set to {}".format(integration_time), 'ms.') print("Detector integration time is set to {}".format(integration_time), 'ms.')
if fix_temperature != 0.: if fix_temperature != 0.:
print("Using a fixed temperature of {} K".format(fix_temperature)) print("Using a fixed temperature of {} K".format(fix_temperature))
else: else:
# This is needed while sending the # This is needed while sending the
# calibration constant to the DB later # calibration constant to the DB later
fix_temperature = temperature + 273.15 fix_temperature = temperature + 273.15
print("Temperature is not fixed.") print("Temperature is not fixed.")
print("Mean temperature was {:0.2f} °C / {:0.2f} K".format(temperature, fix_temperature)) print("Mean temperature was {:0.2f} °C / {:0.2f} K".format(temperature, fix_temperature))
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Reading Files in Chunks: # Reading Files in Chunks:
# Chunk reader returns an iterator to access the data in the file within the ranges: # Chunk reader returns an iterator to access the data in the file within the ranges:
reader = ChunkReader(filename, fastccdreaderh5.readData, nImages, chunkSize, path = h5path, pixels_x = sensorSize[0], reader = ChunkReader(filename, fastccdreaderh5.readData, nImages, chunkSize, path = h5path, pixels_x = sensorSize[0],
pixels_y = sensorSize[1],) pixels_y = sensorSize[1],)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Calculator: # Calculator:
# noiseCal is a noise map calculator, which internally also produces a per-pixel mean map, i.e. an offset map: # noiseCal is a noise map calculator, which internally also produces a per-pixel mean map, i.e. an offset map:
noiseCal = xcal.NoiseCalculator(sensorSize, memoryCells, cores=cpuCores, blockSize=blockSize, runParallel=run_parallel) noiseCal = xcal.NoiseCalculator(sensorSize, memoryCells, cores=cpuCores, blockSize=blockSize, runParallel=run_parallel)
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
### First Iteration ### First Iteration
Characterization of dark images with purpose to create dark maps (offset, noise and bad pixel maps) is an iterative process. Firstly, initial offset and noise maps are produced from raw dark data. Characterization of dark images with purpose to create dark maps (offset, noise and bad pixel maps) is an iterative process. Firstly, initial offset and noise maps are produced from raw dark data.
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
counter1 = 1 # To count how many "if data.shape[2] >= chunkSize" instances are there. counter1 = 1 # To count how many "if data.shape[2] >= chunkSize" instances are there.
counter2 = 0 # To count how many "if data.shape[2] < chunkSize" instances are there. counter2 = 0 # To count how many "if data.shape[2] < chunkSize" instances are there.
chunkSize_new = 0 # See below chunkSize_new = 0 # See below
for data in reader.readChunks(): for data in reader.readChunks():
data = np.bitwise_and(data.astype(np.uint16), 0b0011111111111111).astype(np.float32) data = np.bitwise_and(data.astype(np.uint16), 0b0011111111111111).astype(np.float32)
dx = np.count_nonzero(data, axis=(0, 1)) dx = np.count_nonzero(data, axis=(0, 1))
data = data[:,:,dx != 0] data = data[:,:,dx != 0]
# Some sequences may have less than 500 frames in them. To find out how many images there are, we will temporarily # Some sequences may have less than 500 frames in them. To find out how many images there are, we will temporarily
# change chunkSize to be the same as whatever number of frames the last chunk of data has: # change chunkSize to be the same as whatever number of frames the last chunk of data has:
if data.shape[2] < chunkSize: if data.shape[2] < chunkSize:
chunkSize_new = data.shape[2] chunkSize_new = data.shape[2]
print("Number of images are less than chunkSize. chunkSize is temporarily changed to {}." print("Number of images are less than chunkSize. chunkSize is temporarily changed to {}."
.format(chunkSize_new)) .format(chunkSize_new))
images = images + chunkSize_new images = images + chunkSize_new
counter2 += 1 counter2 += 1
else: else:
images = counter1*chunkSize + counter2*chunkSize_new images = counter1*chunkSize + counter2*chunkSize_new
counter1 += 1 counter1 += 1
noiseCal.fill(data) # Filling calculators with data noiseCal.fill(data) # Filling calculators with data
chunkSize = 100 # resetting the chunkSize to its default value for the next sequence or data-chunk chunkSize = 100 # resetting the chunkSize to its default value for the next sequence or data-chunk
print('A total number of {} images are processed.'.format(images)) print('A total number of {} images are processed.'.format(images))
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
offsetMap = noiseCal.getOffset() # Producing offset map offsetMap = noiseCal.getOffset() # Producing offset map
noiseMap = noiseCal.get() # Producing noise map noiseMap = noiseCal.get() # Producing noise map
noiseCal.reset() # Resetting noise calculator noiseCal.reset() # Resetting noise calculator
print("Initial maps are created.") print("Initial maps are created.")
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
### Offset and Noise Maps prior to Common Mode Correction ### Offset and Noise Maps prior to Common Mode Correction
In the following, the histogram of the FastCCD offset, FastCCD offset map, as well as the initial uncorrected noise map are plotted: In the following, the histogram of the FastCCD offset, FastCCD offset map, as well as the initial uncorrected noise map are plotted:
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
#************** OFFSET MAP HISTOGRAM ***********# #************** OFFSET MAP HISTOGRAM ***********#
ho, co = np.histogram(offsetMap.flatten(), bins=700) # ho = offset histogram; co = offset bin centers ho, co = np.histogram(offsetMap.flatten(), bins=700) # ho = offset histogram; co = offset bin centers
do = {'x': co[:-1], do = {'x': co[:-1],
'y': ho, 'y': ho,
'y_err': np.sqrt(ho[:]), 'y_err': np.sqrt(ho[:]),
'drawstyle': 'bars', 'drawstyle': 'bars',
'color': 'cornflowerblue', 'color': 'cornflowerblue',
'label': 'Raw Signal (ADU)' 'label': 'Raw Signal (ADU)'
} }
fig = xana.simplePlot(do, figsize='1col', aspect=1, x_label = 'Raw Signal (ADU)', y_label="Counts", fig = xana.simplePlot(do, figsize='1col', aspect=1, x_label = 'Raw Signal (ADU)', y_label="Counts",
x_range = (3400, 4400), title = 'Offset Histogram') x_range = (3400, 4400), title = 'Offset Histogram')
#fig.savefig('Offset_Hist.svg', format='svg', dpi=1200, bbox_inches='tight') #fig.savefig('Offset_Hist.svg', format='svg', dpi=1200, bbox_inches='tight')
t0 = PrettyTable() t0 = PrettyTable()
t0.title = "Raw Signal" t0.title = "Raw Signal"
t0.field_names = ["Mean", "Median", "Standard Deviation"] t0.field_names = ["Mean", "Median", "Standard Deviation"]
t0.add_row(["{:0.3f} (ADU)".format(np.mean(data)), "{:0.3f} (ADU)".format(np.median(data)), "{:0.3f} (ADU)" t0.add_row(["{:0.3f} (ADU)".format(np.mean(data)), "{:0.3f} (ADU)".format(np.median(data)), "{:0.3f} (ADU)"
.format(np.std(data))]) .format(np.std(data))])
print(t0,'\n') print(t0,'\n')
#************** OffsetMAP *******************# #************** OffsetMAP *******************#
fig = xana.heatmapPlot(offsetMap[:,:,0], x_label='Column Number', y_label='Row Number', aspect=1, fig = xana.heatmapPlot(offsetMap[:,:,0], x_label='Column Number', y_label='Row Number', aspect=1,
x_range=(0, y), y_range=(0, x), vmin=3000, vmax=4300, lut_label='Offset (ADU)', x_range=(0, y), y_range=(0, x), vmin=3000, vmax=4300, lut_label='Offset (ADU)',
panel_x_label='Columns Stat (ADU)', panel_y_label='Rows Stat (ADU)', panel_x_label='Columns Stat (ADU)', panel_y_label='Rows Stat (ADU)',
panel_top_low_lim = 3000, panel_top_high_lim = 4500, panel_side_low_lim = 3000, panel_top_low_lim = 3000, panel_top_high_lim = 4500, panel_side_low_lim = 3000,
panel_side_high_lim = 5000, title = 'OffsetMap') panel_side_high_lim = 5000, title = 'OffsetMap')
#fig.savefig('RawOffsetMap.pdf', format='pdf', dpi=400, bbox_inches='tight') #fig.savefig('RawOffsetMap.pdf', format='pdf', dpi=400, bbox_inches='tight')
#************** Raw NoiseMAP *******************# #************** Raw NoiseMAP *******************#
fig = xana.heatmapPlot(noiseMap[:,:,0], x_label='Column Number', y_label='Row Number', aspect=1, fig = xana.heatmapPlot(noiseMap[:,:,0], x_label='Column Number', y_label='Row Number', aspect=1,
lut_label='Uncorrected Noise (ADU)', x_range=(0, y), lut_label='Uncorrected Noise (ADU)', x_range=(0, y),
y_range=(0, x), vmax=2*np.mean(noiseMap), panel_x_label='Columns Stat (ADU)', y_range=(0, x), vmax=2*np.mean(noiseMap), panel_x_label='Columns Stat (ADU)',
panel_y_label='Rows Stat (ADU)', panel_top_low_lim = 0, panel_top_high_lim = 20, panel_y_label='Rows Stat (ADU)', panel_top_low_lim = 0, panel_top_high_lim = 20,
panel_side_low_lim = 0, panel_side_high_lim = 50, title = 'Uncorrected NoiseMap') panel_side_low_lim = 0, panel_side_high_lim = 50, title = 'Uncorrected NoiseMap')
#fig.savefig('RawNoiseMap.pdf', format='pdf', dpi=400, bbox_inches='tight') #fig.savefig('RawNoiseMap.pdf', format='pdf', dpi=400, bbox_inches='tight')
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Offset Correction: # Offset Correction:
offsetCorrection = xcal.OffsetCorrection(sensorSize, offsetMap, nCells = memoryCells, cores=cpuCores, gains=None, offsetCorrection = xcal.OffsetCorrection(sensorSize, offsetMap, nCells = memoryCells, cores=cpuCores, gains=None,
runParallel=run_parallel, blockSize=blockSize) runParallel=run_parallel, blockSize=blockSize)
offsetCorrection.debug() offsetCorrection.debug()
# Common Mode Correction: # Common Mode Correction:
# This is the new method subtracting the median of all pixels that are read out at the same time along a row: # This is the new method subtracting the median of all pixels that are read out at the same time along a row:
cmCorrection = xcal.CommonModeCorrection([data.shape[0], data.shape[1]], [data.shape[0]//2, data.shape[1]], cmCorrection = xcal.CommonModeCorrection([data.shape[0], data.shape[1]], [data.shape[0]//2, data.shape[1]],
commonModeAxis, parallel=run_parallel, dType=np.float32, stride=10, commonModeAxis, parallel=run_parallel, dType=np.float32, stride=10,
noiseMap=noiseMap.astype(np.float32), minFrac=0) noiseMap=noiseMap.astype(np.float32), minFrac=0)
cmCorrection.debug() cmCorrection.debug()
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Histogram Calculators: # Histogram Calculators:
# For offset corrected data: # For offset corrected data:
histCalCorrected = xcal.HistogramCalculator(sensorSize, bins=400, range=[-200, 200], memoryCells=memoryCells, histCalCorrected = xcal.HistogramCalculator(sensorSize, bins=400, range=[-200, 200], memoryCells=memoryCells,
cores=cpuCores, gains=None, blockSize=blockSize) cores=cpuCores, gains=None, blockSize=blockSize)
# For common mode corrected data: # For common mode corrected data:
histCalCMCorrected = xcal.HistogramCalculator(sensorSize, bins=400, range=[-200, 200], memoryCells=memoryCells, histCalCMCorrected = xcal.HistogramCalculator(sensorSize, bins=400, range=[-200, 200], memoryCells=memoryCells,
cores=cpuCores, gains=None, blockSize=blockSize) cores=cpuCores, gains=None, blockSize=blockSize)
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
### Second Iteration ### Second Iteration
During the second iteration, the data are offset corrected and then common mode corrected to produced a common mode corrected noise map. The common mode correction is calculated by subtracting out the median of all pixels that are read out at the same time along a row. During the second iteration, the data are offset corrected and then common mode corrected to produced a common mode corrected noise map. The common mode correction is calculated by subtracting out the median of all pixels that are read out at the same time along a row.
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
counter1 = 1 # To count how many "if data.shape[2] >= chunkSize" instances are there. counter1 = 1 # To count how many "if data.shape[2] >= chunkSize" instances are there.
counter2 = 0 # To count how many "if data.shape[2] < chunkSize" instances are there. counter2 = 0 # To count how many "if data.shape[2] < chunkSize" instances are there.
chunkSize_new = 0 # See below chunkSize_new = 0 # See below
for data in reader.readChunks(): for data in reader.readChunks():
data = np.bitwise_and(data.astype(np.uint16), 0b0011111111111111).astype(np.float32) data = np.bitwise_and(data.astype(np.uint16), 0b0011111111111111).astype(np.float32)
dx = np.count_nonzero(data, axis=(0, 1)) dx = np.count_nonzero(data, axis=(0, 1))
data = data[:,:,dx != 0] data = data[:,:,dx != 0]
# Some sequences may have less than 500 frames in them. To find out how many images there are, we will temporarily # Some sequences may have less than 500 frames in them. To find out how many images there are, we will temporarily
# change chunkSize to be the same as whatever number of frames the last chunk of data has: # change chunkSize to be the same as whatever number of frames the last chunk of data has:
if data.shape[2] < chunkSize: if data.shape[2] < chunkSize:
chunkSize_new = data.shape[2] chunkSize_new = data.shape[2]
print("Number of images are less than chunkSize. chunkSize is temporarily changed to {}." print("Number of images are less than chunkSize. chunkSize is temporarily changed to {}."
.format(chunkSize_new)) .format(chunkSize_new))
images = images + chunkSize_new images = images + chunkSize_new
counter2 += 1 counter2 += 1
else: else:
images = counter1*chunkSize + counter2*chunkSize_new images = counter1*chunkSize + counter2*chunkSize_new
counter1 += 1 counter1 += 1
data = offsetCorrection.correct(data) # Offset correction data = offsetCorrection.correct(data) # Offset correction
offset_corr_data = copy.copy(data) # I am copying this so that I can have access to it in the table below offset_corr_data = copy.copy(data) # I am copying this so that I can have access to it in the table below
histCalCorrected.fill(data) histCalCorrected.fill(data)
cellTable=np.zeros(data.shape[2], np.int32) # Common mode correction cellTable=np.zeros(data.shape[2], np.int32) # Common mode correction
data = cmCorrection.correct(data.astype(np.float32), cellTable=cellTable) # Common mode correction data = cmCorrection.correct(data.astype(np.float32), cellTable=cellTable) # Common mode correction
histCalCMCorrected.fill(data) histCalCMCorrected.fill(data)
noiseCal.fill(data) # Filling noise calculator with common mode (CM) corrected data noiseCal.fill(data) # Filling noise calculator with common mode (CM) corrected data
chunkSize = 100 # resetting the chunkSize to its default value for the next sequence or data-chunk chunkSize = 100 # resetting the chunkSize to its default value for the next sequence or data-chunk
print('A total number of {} images are processed.'.format(images)) print('A total number of {} images are processed.'.format(images))
print("Offset and common mode corrections are applied.") print("Offset and common mode corrections are applied.")
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
noiseMapCM = noiseCal.get() # Produces CM corrected noise map noiseMapCM = noiseCal.get() # Produces CM corrected noise map
ho, eo, co, so = histCalCorrected.get() ho, eo, co, so = histCalCorrected.get()
hCM, eCM, cCM, sCM = histCalCMCorrected.get() hCM, eCM, cCM, sCM = histCalCMCorrected.get()
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# We are copying these so that we can replot them later after the calculators are reset: # We are copying these so that we can replot them later after the calculators are reset:
ho_second_trial = copy.copy(ho) ho_second_trial = copy.copy(ho)
co_second_trial = copy.copy(co) co_second_trial = copy.copy(co)
hCM_second_trial = copy.copy(hCM) hCM_second_trial = copy.copy(hCM)
cCM_second_trial = copy.copy(cCM) cCM_second_trial = copy.copy(cCM)
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
### Signal after Offset and Common Mode Corrections ### Signal after Offset and Common Mode Corrections
Here, the offset corrected signal is compared to the common-mode corrected signal (in the form of binned histograms): Here, the offset corrected signal is compared to the common-mode corrected signal (in the form of binned histograms):
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
do = [{'x': co, do = [{'x': co,
'y': ho, 'y': ho,
'y_err': np.sqrt(ho[:]), 'y_err': np.sqrt(ho[:]),
'drawstyle': 'steps-mid', 'drawstyle': 'steps-mid',
'color': 'cornflowerblue', 'color': 'cornflowerblue',
'label': 'Offset Corrected Signal' 'label': 'Offset Corrected Signal'
}, },
{'x': cCM, {'x': cCM,
'y': hCM, 'y': hCM,
'y_err': np.sqrt(hCM[:]), 'y_err': np.sqrt(hCM[:]),
'drawstyle': 'steps-mid', 'drawstyle': 'steps-mid',
'color': 'red', 'color': 'red',
'label': 'Common Mode Corrected Signal' 'label': 'Common Mode Corrected Signal'
}] }]
fig = xana.simplePlot(do, figsize='2col', aspect=1, x_label = 'Corrected Signal (ADU)', y_label="Counts", fig = xana.simplePlot(do, figsize='2col', aspect=1, x_label = 'Corrected Signal (ADU)', y_label="Counts",
x_range = (-20, 20), legend='top-right-frame-1col', title = 'Corrected Signal - 2nd Iteration') x_range = (-20, 20), legend='top-right-frame-1col', title = 'Corrected Signal - 2nd Iteration')
#fig.savefig('Corrected_Signal_Hist_1.svg', format='svg', dpi=1200, bbox_inches='tight') #fig.savefig('Corrected_Signal_Hist_1.svg', format='svg', dpi=1200, bbox_inches='tight')
t0 = PrettyTable() t0 = PrettyTable()
t0.title = "Comparison of the First Round of Corrections - Bad Pixels Not Excluded" t0.title = "Comparison of the First Round of Corrections - Bad Pixels Not Excluded"
t0.field_names = ["After Offset Correction", "After Common Mode Correction"] t0.field_names = ["After Offset Correction", "After Common Mode Correction"]
t0.add_row(["Mean: {:0.3f} (ADU)".format(np.mean(offset_corr_data)), "Mean: {:0.3f} (ADU)".format(np.mean(data))]) t0.add_row(["Mean: {:0.3f} (ADU)".format(np.mean(offset_corr_data)), "Mean: {:0.3f} (ADU)".format(np.mean(data))])
t0.add_row(["Median: {:0.3f} (ADU)".format(np.median(offset_corr_data)), "Median: {:0.3f} (ADU)" t0.add_row(["Median: {:0.3f} (ADU)".format(np.median(offset_corr_data)), "Median: {:0.3f} (ADU)"
.format(np.median(data))]) .format(np.median(data))])
t0.add_row(["Standard Deviation: {:0.3f} (ADU)".format(np.std(offset_corr_data)), "Standard Deviation: {:0.3f} (ADU)" t0.add_row(["Standard Deviation: {:0.3f} (ADU)".format(np.std(offset_corr_data)), "Standard Deviation: {:0.3f} (ADU)"
.format(np.std(data))]) .format(np.std(data))])
print(t0,'\n') print(t0,'\n')
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
### Noise Map after Common Mode Correction ### Noise Map after Common Mode Correction
In the following, the effect of common mode correction on the noise is shown. Finally common mode corrected noise map (noiseMapCM) is displayed and compared to the initial uncorrected noise map: In the following, the effect of common mode correction on the noise is shown. Finally common mode corrected noise map (noiseMapCM) is displayed and compared to the initial uncorrected noise map:
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
#*****NOISE MAP HISTOGRAM FROM THE COMMON MODE CORRECTED DATA*******# #*****NOISE MAP HISTOGRAM FROM THE COMMON MODE CORRECTED DATA*******#
hn, cn = np.histogram(noiseMap.flatten(), bins=200, range=(0, 40)) # hn: histogram of noise, cn: bin centers for noise hn, cn = np.histogram(noiseMap.flatten(), bins=200, range=(0, 40)) # hn: histogram of noise, cn: bin centers for noise
hn_CM, cn_CM = np.histogram(noiseMapCM.flatten(), bins=200, range=(0, 40)) hn_CM, cn_CM = np.histogram(noiseMapCM.flatten(), bins=200, range=(0, 40))
dn = [{'x': cn[:-1], dn = [{'x': cn[:-1],
'y': hn, 'y': hn,
#'y_err': np.sqrt(hn[:]), #'y_err': np.sqrt(hn[:]),
'drawstyle': 'steps-mid',#'bars', 'drawstyle': 'steps-mid',#'bars',
'color': 'blue',#'cornflowerblue', 'color': 'blue',#'cornflowerblue',
'label': 'Uncorrected Noise' 'label': 'Uncorrected Noise'
}, },
{'x': cn_CM[:-1], {'x': cn_CM[:-1],
'y': hn_CM, 'y': hn_CM,
#'y_err': np.sqrt(hn_CM[:]), #'y_err': np.sqrt(hn_CM[:]),
'drawstyle': 'steps-mid',#'bars', 'drawstyle': 'steps-mid',#'bars',
'color': 'crimson',#'red',#'cornflowerblue', 'color': 'crimson',#'red',#'cornflowerblue',
#'ecolor': 'crimson', #'ecolor': 'crimson',
'label': 'Common Mode Corrected Noise' 'label': 'Common Mode Corrected Noise'
}] }]
fig = xana.simplePlot(dn, figsize='2col', aspect=1, x_label = 'Noise (ADU)', y_label="Counts", fig = xana.simplePlot(dn, figsize='2col', aspect=1, x_label = 'Noise (ADU)', y_label="Counts",
x_range=(0, 40), y_range=(0, 1e6), y_log=True, legend='top-center-frame-1col', x_range=(0, 40), y_range=(0, 1e6), y_log=True, legend='top-center-frame-1col',
title = 'Noise Comparison') title = 'Noise Comparison')
#fig.savefig('Noise_CM_1_Hist.svg', format='svg', dpi=1200, bbox_inches='tight') #fig.savefig('Noise_CM_1_Hist.svg', format='svg', dpi=1200, bbox_inches='tight')
fig = xana.heatmapPlot(noiseMapCM[:,:,0], aspect=1, x_label='Column Number', y_label='Row Number', fig = xana.heatmapPlot(noiseMapCM[:,:,0], aspect=1, x_label='Column Number', y_label='Row Number',
lut_label='Common Mode Corrected Noise (ADU)', x_range=(0,y), y_range=(0,x), lut_label='Common Mode Corrected Noise (ADU)', x_range=(0,y), y_range=(0,x),
vmax=2*np.mean(noiseMapCM), panel_top_low_lim = 0, panel_top_high_lim = 20, vmax=2*np.mean(noiseMapCM), panel_top_low_lim = 0, panel_top_high_lim = 20,
panel_side_low_lim = 0, panel_side_high_lim = 50, title = 'Common Mode Corrected Noise', panel_side_low_lim = 0, panel_side_high_lim = 50, title = 'Common Mode Corrected Noise',
panel_x_label='Columns Stat (ADU)', panel_y_label='Rows Stat (ADU)') panel_x_label='Columns Stat (ADU)', panel_y_label='Rows Stat (ADU)')
#fig.savefig('NoiseMapCM.pdf', format='pdf', dpi=400, bbox_inches='tight') #fig.savefig('NoiseMapCM.pdf', format='pdf', dpi=400, bbox_inches='tight')
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Resetting the calculators so we can do a third iteration later: # Resetting the calculators so we can do a third iteration later:
noiseCal.reset() noiseCal.reset()
histCalCorrected.reset() histCalCorrected.reset()
histCalCMCorrected.reset() histCalCMCorrected.reset()
cmCorrection.reset() cmCorrection.reset()
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
### Initial BadPixelMap ### Initial BadPixelMap
This is generated based on the offset and CM corrected noise maps: This is generated based on the offset and CM corrected noise maps:
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
bad_pixels = np.zeros(offsetMap.shape, np.uint32) bad_pixels = np.zeros(offsetMap.shape, np.uint32)
mnoffset = np.nanmedian(offsetMap) mnoffset = np.nanmedian(offsetMap)
stdoffset = np.nanstd(offsetMap) stdoffset = np.nanstd(offsetMap)
bad_pixels[(offsetMap < mnoffset-bad_pixel_offset_sigma*stdoffset) | bad_pixels[(offsetMap < mnoffset-bad_pixel_offset_sigma*stdoffset) |
(offsetMap > mnoffset+bad_pixel_offset_sigma*stdoffset)] = BadPixels.OFFSET_OUT_OF_THRESHOLD.value (offsetMap > mnoffset+bad_pixel_offset_sigma*stdoffset)] = BadPixels.OFFSET_OUT_OF_THRESHOLD.value
mnnoise = np.nanmedian(noiseMapCM) mnnoise = np.nanmedian(noiseMapCM)
stdnoise = np.nanstd(noiseMapCM) stdnoise = np.nanstd(noiseMapCM)
bad_pixels[(noiseMapCM < mnnoise-bad_pixel_noise_sigma*stdnoise) | bad_pixels[(noiseMapCM < mnnoise-bad_pixel_noise_sigma*stdnoise) |
(noiseMapCM > mnnoise+bad_pixel_noise_sigma*stdnoise)] = BadPixels.NOISE_OUT_OF_THRESHOLD.value (noiseMapCM > mnnoise+bad_pixel_noise_sigma*stdnoise)] = BadPixels.NOISE_OUT_OF_THRESHOLD.value
fig = xana.heatmapPlot(np.log2(bad_pixels[:,:,0]),aspect=1, x_label='Column Number', y_label='Row Number', fig = xana.heatmapPlot(np.log2(bad_pixels[:,:,0]),aspect=1, x_label='Column Number', y_label='Row Number',
lut_label='2^(Assigned Value to Bad Pixels)', x_range=(0, y), y_range=(0, x), lut_label='2^(Assigned Value to Bad Pixels)', x_range=(0, y), y_range=(0, x),
title = 'Bad Pixels Map Excluding Non-Sensitive\n Areas in Middle of CCD', title = 'Bad Pixels Map Excluding Non-Sensitive\n Areas in Middle of CCD',
panel_x_label= 'Columns Stat', panel_y_label='Rows Stat') panel_x_label= 'Columns Stat', panel_y_label='Rows Stat')
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
Here, we are adding the pixels in the hole (center of the FastCCD) as well as 4 rows in the center of the detector, which we call overscan region: Here, we are adding the pixels in the hole (center of the FastCCD) as well as 4 rows in the center of the detector, which we call overscan region:
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
def create_circular_mask(h, w, center=None, radius=None): def create_circular_mask(h, w, center=None, radius=None):
import numpy as np import numpy as np
import math import math
if center is None: # use the middle of the image if center is None: # use the middle of the image
center = [int(w/2), int(h/2)] center = [int(w/2), int(h/2)]
if radius is None: # use the smallest distance between the center and image walls if radius is None: # use the smallest distance between the center and image walls
radius = min(center[0], center[1], w-center[0], h-center[1]) radius = min(center[0], center[1], w-center[0], h-center[1])
Y, X = np.ogrid[:h, :w] Y, X = np.ogrid[:h, :w]
dist_from_center = np.sqrt((X - center[0])**2 + (Y-center[1])**2) dist_from_center = np.sqrt((X - center[0])**2 + (Y-center[1])**2)
mask = dist_from_center < radius mask = dist_from_center < radius
return mask return mask
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
mask = np.zeros(offsetMap.shape, np.uint32) mask = np.zeros(offsetMap.shape, np.uint32)
# Defining a circular mask + a rectangular mask (overscan) for the hole in the middle of the CCD: # Defining a circular mask + a rectangular mask (overscan) for the hole in the middle of the CCD:
h, w = (x, y) h, w = (x, y)
hole_mask_bool = create_circular_mask(h-4, w, radius=61.5, center=(w//2, (h-4)//2)) hole_mask_bool = create_circular_mask(h-4, w, radius=61.5, center=(w//2, (h-4)//2))
hole_mask = np.zeros(hole_mask_bool.shape, np.uint32) hole_mask = np.zeros(hole_mask_bool.shape, np.uint32)
hole_mask[hole_mask_bool] = BadPixels.NON_SENSITIVE.value hole_mask[hole_mask_bool] = BadPixels.NON_SENSITIVE.value
overscan_mask = np.full((4, w), BadPixels.OVERSCAN.value) overscan_mask = np.full((4, w), BadPixels.OVERSCAN.value)
mask[:,:,0] = np.insert(hole_mask, (h-4)//2, overscan_mask, 0) mask[:,:,0] = np.insert(hole_mask, (h-4)//2, overscan_mask, 0)
# Assigning this masked area as bad pixels: # Assigning this masked area as bad pixels:
bad_pixels = np.bitwise_or(bad_pixels, mask) bad_pixels = np.bitwise_or(bad_pixels, mask)
fig = xana.heatmapPlot(np.log2(bad_pixels[:,:,0]),aspect=1, x_label='Column Number', y_label='Row Number', fig = xana.heatmapPlot(np.log2(bad_pixels[:,:,0]),aspect=1, x_label='Column Number', y_label='Row Number',
lut_label='2^(Assigned Value to Bad Pixels)', x_range=(0, y), y_range=(0, x), lut_label='2^(Assigned Value to Bad Pixels)', x_range=(0, y), y_range=(0, x),
panel_top_low_lim = 0, panel_top_high_lim = 20, panel_side_low_lim = 0, panel_side_high_lim = 20, panel_top_low_lim = 0, panel_top_high_lim = 20, panel_side_low_lim = 0, panel_side_high_lim = 20,
title = 'Bad Pixels Map Including Non-Sensitive\n Areas in Middle of CCD', title = 'Bad Pixels Map Including Non-Sensitive\n Areas in Middle of CCD',
panel_x_label='Columns Stat', panel_y_label='Rows Stat', vmax=20) panel_x_label='Columns Stat', panel_y_label='Rows Stat', vmax=20)
#fig.savefig('BadPixelMap_1.svg', format='svg', dpi=1200, bbox_inches='tight') #fig.savefig('BadPixelMap_1.svg', format='svg', dpi=1200, bbox_inches='tight')
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
### Third Iteration ### Third Iteration
During the third iteration, the bad pixel map is applied to the data. Bad pixels are masked. Offset and common mode corrections are applied once again to the data, which now have bad pixdels excluded, to produce a common mode corrected noise map: During the third iteration, the bad pixel map is applied to the data. Bad pixels are masked. Offset and common mode corrections are applied once again to the data, which now have bad pixdels excluded, to produce a common mode corrected noise map:
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# bad_pixels is an array of (1934, 960, 1) filled with zeros except at indices where we have the actual bad pixels, whose # bad_pixels is an array of (1934, 960, 1) filled with zeros except at indices where we have the actual bad pixels, whose
# values are set to be: 2 (2^1: BadPixels.OFFSET_OUT_OF_THRESHOLD.value), or # values are set to be: 2 (2^1: BadPixels.OFFSET_OUT_OF_THRESHOLD.value), or
# 262144 (2^18: BadPixels.OVERSCAN.value), or 524288 (2^19: BadPixels.NON_SENSITIVE.value). These indices can be found # 262144 (2^18: BadPixels.OVERSCAN.value), or 524288 (2^19: BadPixels.NON_SENSITIVE.value). These indices can be found
# using np.argwhere(bad_pixels != 0) # using np.argwhere(bad_pixels != 0)
event_threshold = sigmaNoise*np.median(noiseMapCM) # for exclusion of possible cosmic ray events event_threshold = sigmaNoise*np.median(noiseMapCM) # for exclusion of possible cosmic ray events
noiseCal.setBadPixelMask(bad_pixels != 0) noiseCal.setBadPixelMask(bad_pixels != 0)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
counter1 = 1 # To count how many "if data.shape[2] >= chunkSize" instances are there. counter1 = 1 # To count how many "if data.shape[2] >= chunkSize" instances are there.
counter2 = 0 # To count how many "if data.shape[2] < chunkSize" instances are there. counter2 = 0 # To count how many "if data.shape[2] < chunkSize" instances are there.
chunkSize_new = 0 # See below chunkSize_new = 0 # See below
for data in reader.readChunks(): for data in reader.readChunks():
#data = data.astype(np.float32) #data = data.astype(np.float32)
data = np.bitwise_and(data.astype(np.uint16), 0b0011111111111111).astype(np.float32) data = np.bitwise_and(data.astype(np.uint16), 0b0011111111111111).astype(np.float32)
dx = np.count_nonzero(data, axis=(0, 1)) dx = np.count_nonzero(data, axis=(0, 1))
data = data[:,:,dx != 0] data = data[:,:,dx != 0]
# Some sequences may have less than 500 frames in them. To find out how many images there are, we will temporarily # Some sequences may have less than 500 frames in them. To find out how many images there are, we will temporarily
# change chunkSize to be the same as whatever number of frames the last chunk of data has: # change chunkSize to be the same as whatever number of frames the last chunk of data has:
if data.shape[2] < chunkSize: if data.shape[2] < chunkSize:
chunkSize_new = data.shape[2] chunkSize_new = data.shape[2]
print("Number of images are less than chunkSize. chunkSize is temporarily changed to {}." print("Number of images are less than chunkSize. chunkSize is temporarily changed to {}."
.format(chunkSize_new)) .format(chunkSize_new))
images = images + chunkSize_new images = images + chunkSize_new
counter2 += 1 counter2 += 1
else: else:
images = counter1*chunkSize + counter2*chunkSize_new images = counter1*chunkSize + counter2*chunkSize_new
counter1 += 1 counter1 += 1
data_copy = offsetCorrection.correct(copy.copy(data)) data_copy = offsetCorrection.correct(copy.copy(data))
cellTable=np.zeros(data_copy.shape[2], np.int32) cellTable=np.zeros(data_copy.shape[2], np.int32)
data_copy = cmCorrection.correct(data_copy.astype(np.float32), cellTable=cellTable) data_copy = cmCorrection.correct(data_copy.astype(np.float32), cellTable=cellTable)
data[data_copy > event_threshold] = np.nan # cosmic rays data[data_copy > event_threshold] = np.nan # cosmic rays
data = np.ma.MaskedArray(data, np.isnan(data), fill_value=0) # masking cosmics, the default fill_value is 1e+20 data = np.ma.MaskedArray(data, np.isnan(data), fill_value=0) # masking cosmics, the default fill_value is 1e+20
data = offsetCorrection.correct(data) data = offsetCorrection.correct(data)
offset_corr_data2 = copy.copy(data) # I am copying this so that I can have access to it in the table below offset_corr_data2 = copy.copy(data) # I am copying this so that I can have access to it in the table below
histCalCorrected.fill(data) histCalCorrected.fill(data)
cellTable=np.zeros(data.shape[2], np.int32) cellTable=np.zeros(data.shape[2], np.int32)
data = cmCorrection.correct(data.astype(np.float32), cellTable=cellTable) data = cmCorrection.correct(data.astype(np.float32), cellTable=cellTable)
histCalCMCorrected.fill(data) histCalCMCorrected.fill(data)
noiseCal.fill(data) noiseCal.fill(data)
chunkSize = 100 # resetting the chunkSize to its default value for the next sequence or data-chunk chunkSize = 100 # resetting the chunkSize to its default value for the next sequence or data-chunk
print('A total number of {} images are processed.'.format(images)) print('A total number of {} images are processed.'.format(images))
print("Final iteration is Performed.") print("Final iteration is Performed.")
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
noiseMapCM_2nd = noiseCal.get().filled(0) # the masked pixels are filled with zero noiseMapCM_2nd = noiseCal.get().filled(0) # the masked pixels are filled with zero
ho2, eo2, co2, so2 = histCalCorrected.get() ho2, eo2, co2, so2 = histCalCorrected.get()
hCM2, eCM2, cCM2, sCM2 = histCalCMCorrected.get() hCM2, eCM2, cCM2, sCM2 = histCalCMCorrected.get()
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
### Plots of the Final Results ### Plots of the Final Results
The following plot and table compare the offset and common mode corrected signal with and without the bad pixels: The following plot and table compare the offset and common mode corrected signal with and without the bad pixels:
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
do_Final = [{'x': co_second_trial, do_Final = [{'x': co_second_trial,
'y': ho_second_trial, 'y': ho_second_trial,
'y_err': np.sqrt(ho_second_trial[:]), 'y_err': np.sqrt(ho_second_trial[:]),
'drawstyle': 'steps-mid', 'drawstyle': 'steps-mid',
'color': 'blue',#'cornflowerblue', 'color': 'blue',#'cornflowerblue',
'errorstyle': 'bars', 'errorstyle': 'bars',
'label': 'Offset Corrected Signal, Bad Pixels Included - 2nd Trial' 'label': 'Offset Corrected Signal, Bad Pixels Included - 2nd Trial'
}, },
{'x': cCM_second_trial, {'x': cCM_second_trial,
'y': hCM_second_trial, 'y': hCM_second_trial,
'y_err': np.sqrt(hCM_second_trial[:]), 'y_err': np.sqrt(hCM_second_trial[:]),
'drawstyle': 'steps-mid', 'drawstyle': 'steps-mid',
'color': 'red', 'color': 'red',
'errorstyle': 'bars', 'errorstyle': 'bars',
'ecolor': 'crimson', 'ecolor': 'crimson',
'label': 'Common Mode Corrected Signal, Bad Pixels Included - 2nd Trial' 'label': 'Common Mode Corrected Signal, Bad Pixels Included - 2nd Trial'
}, },
{'x': co2, {'x': co2,
'y': ho2, 'y': ho2,
'y_err': np.sqrt(ho2[:]), 'y_err': np.sqrt(ho2[:]),
'drawstyle': 'steps-mid', 'drawstyle': 'steps-mid',
'color': 'black', #'cornflowerblue', 'color': 'black', #'cornflowerblue',
'errorstyle': 'bars', 'errorstyle': 'bars',
'label': 'Offset Corrected Signal, Bad Pixels Excluded - 3rd Trial' 'label': 'Offset Corrected Signal, Bad Pixels Excluded - 3rd Trial'
}, },
{'x': cCM2, {'x': cCM2,
'y': hCM2, 'y': hCM2,
'y_err': np.sqrt(hCM2[:]), 'y_err': np.sqrt(hCM2[:]),
'drawstyle': 'steps-mid', 'drawstyle': 'steps-mid',
'color': 'orange', #'cornflowerblue', 'color': 'orange', #'cornflowerblue',
'errorstyle': 'bars', 'errorstyle': 'bars',
'label': 'Common Mode Corrected Signal, Bad Pixels Excluded - 3rd Trial' 'label': 'Common Mode Corrected Signal, Bad Pixels Excluded - 3rd Trial'
}] }]
fig = xana.simplePlot(do_Final, figsize='2col', aspect=1, x_label = 'Corrected Signal (ADU)', fig = xana.simplePlot(do_Final, figsize='2col', aspect=1, x_label = 'Corrected Signal (ADU)',
y_label="Counts (Logarithmic Scale)", y_log=True, x_range=(-40, 40), y_label="Counts (Logarithmic Scale)", y_log=True, x_range=(-40, 40),
legend='bottom-left-frame-1col', title = 'Comparison of Corrected Signal') legend='bottom-left-frame-1col', title = 'Comparison of Corrected Signal')
#fig.savefig('Corrected_Signal_Hist_2.svg', format='svg', dpi=1200, bbox_inches='tight') #fig.savefig('Corrected_Signal_Hist_2.svg', format='svg', dpi=1200, bbox_inches='tight')
# offset_corr_data2 and data most likely have some nan's => I am going to use nanmean, nanmedian and nanstd functions: # offset_corr_data2 and data most likely have some nan's => I am going to use nanmean, nanmedian and nanstd functions:
t0 = PrettyTable() t0 = PrettyTable()
t0.title = "Comparison of the Second Round of Corrections - Bad Pixels Excluded" t0.title = "Comparison of the Second Round of Corrections - Bad Pixels Excluded"
t0.field_names = ["After Offset Correction", "After Common Mode Correction"] t0.field_names = ["After Offset Correction", "After Common Mode Correction"]
t0.add_row(["Mean: {:0.3f} (ADU)".format(np.nanmean(offset_corr_data2)), "Mean: {:0.3f} (ADU)".format(np.nanmean(data))]) t0.add_row(["Mean: {:0.3f} (ADU)".format(np.nanmean(offset_corr_data2)), "Mean: {:0.3f} (ADU)".format(np.nanmean(data))])
t0.add_row(["Median: {:0.3f} (ADU)".format(np.nanmedian(offset_corr_data2)), "Median: {:0.3f} (ADU)" t0.add_row(["Median: {:0.3f} (ADU)".format(np.nanmedian(offset_corr_data2)), "Median: {:0.3f} (ADU)"
.format(np.nanmedian(data))]) .format(np.nanmedian(data))])
t0.add_row(["Standard Deviation: {:0.3f} (ADU)".format(np.nanstd(offset_corr_data2)), t0.add_row(["Standard Deviation: {:0.3f} (ADU)".format(np.nanstd(offset_corr_data2)),
"Standard Deviation: {:0.3f} (ADU)".format(np.nanstd(data))]) "Standard Deviation: {:0.3f} (ADU)".format(np.nanstd(data))])
print(t0,'\n') print(t0,'\n')
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
### Final NoiseMap ### Final NoiseMap
The effect of exclusion of bad pixels on common mode corrected noise is shown below. Finally common mode corrected noise map with bad pixels excluded (noiseMapCM_2nd) is displayed: The effect of exclusion of bad pixels on common mode corrected noise is shown below. Finally common mode corrected noise map with bad pixels excluded (noiseMapCM_2nd) is displayed:
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
#*****NOISE MAP HISTOGRAM FROM THE COMMON MODE CORRECTED DATA*******# #*****NOISE MAP HISTOGRAM FROM THE COMMON MODE CORRECTED DATA*******#
hn_CM2, cn_CM2 = np.histogram(noiseMapCM_2nd.flatten(), bins=200, range=(0, 40)) hn_CM2, cn_CM2 = np.histogram(noiseMapCM_2nd.flatten(), bins=200, range=(0, 40))
dn2 = [{'x': cn[:-1], dn2 = [{'x': cn[:-1],
'y': hn, 'y': hn,
#'y_err': np.sqrt(hn[:]), #'y_err': np.sqrt(hn[:]),
'drawstyle': 'steps-mid',#'bars', 'drawstyle': 'steps-mid',#'bars',
'color': 'blue', #'cornflowerblue', 'color': 'blue', #'cornflowerblue',
'label': 'Uncorrected Noise' 'label': 'Uncorrected Noise'
}, },
{'x': cn_CM[:-1], {'x': cn_CM[:-1],
'y': hn_CM, 'y': hn_CM,
#'y_err': np.sqrt(hn_CM[:]), #'y_err': np.sqrt(hn_CM[:]),
'drawstyle': 'steps-mid', 'drawstyle': 'steps-mid',
'color': 'red', 'color': 'red',
#'ecolor': 'crimson', #'ecolor': 'crimson',
'label': 'Common Mode Corrected Noise prior to Bad Pixels Exclusion' 'label': 'Common Mode Corrected Noise prior to Bad Pixels Exclusion'
}, },
{'x': cn_CM2[:-1], {'x': cn_CM2[:-1],
'y': hn_CM2, 'y': hn_CM2,
#'y_err': np.sqrt(hn_CM2[:]), #'y_err': np.sqrt(hn_CM2[:]),
'drawstyle': 'steps-mid', 'drawstyle': 'steps-mid',
'color': 'black', #'cornflowerblue', 'color': 'black', #'cornflowerblue',
'label': 'Common Mode Corrected Noise after Bad Pixels Exclusion' 'label': 'Common Mode Corrected Noise after Bad Pixels Exclusion'
}] }]
fig = xana.simplePlot(dn2, figsize='2col', aspect = 1, x_label = 'Noise (ADU)', y_label="Counts", y_log=True, fig = xana.simplePlot(dn2, figsize='2col', aspect = 1, x_label = 'Noise (ADU)', y_label="Counts", y_log=True,
x_range=(0, 40), y_range=(0, 1e6), legend='top-right-frame-1col', title = 'Final Noise Comparison') x_range=(0, 40), y_range=(0, 1e6), legend='top-right-frame-1col', title = 'Final Noise Comparison')
#fig.savefig('Noise_Hist_2.svg', format='svg', dpi=1200, bbox_inches='tight') #fig.savefig('Noise_Hist_2.svg', format='svg', dpi=1200, bbox_inches='tight')
fig = xana.heatmapPlot(np.log2(noiseMapCM_2nd[:,:,0]), aspect=1, x_label='Column Number', y_label='Row Number', fig = xana.heatmapPlot(np.log2(noiseMapCM_2nd[:,:,0]), aspect=1, x_label='Column Number', y_label='Row Number',
lut_label='Noise (ADU)', x_range=(0, y), y_range=(0, x), vmax=2*np.mean(noiseMapCM_2nd), lut_label='Noise (ADU)', x_range=(0, y), y_range=(0, x), vmax=2*np.mean(noiseMapCM_2nd),
title = 'Final Common Mode Corrected Noise\n (Bad Pixels Excluded)', title = 'Final Common Mode Corrected Noise\n (Bad Pixels Excluded)',
panel_x_label='Columns Stat (ADU)', panel_y_label='Rows Stat (ADU)') panel_x_label='Columns Stat (ADU)', panel_y_label='Rows Stat (ADU)')
#fig.savefig('NoiseMapCM_2nd.pdf', format='pdf', dpi=400, bbox_inches='tight') #fig.savefig('NoiseMapCM_2nd.pdf', format='pdf', dpi=400, bbox_inches='tight')
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
### Final Bad Pixel Map ### Final Bad Pixel Map
Lastly, the final bad pixel map is generated based on the OffsetMap and the noiseMapCM_2nd (common mode corrected noise after exclusion of the initial bad pixels): Lastly, the final bad pixel map is generated based on the OffsetMap and the noiseMapCM_2nd (common mode corrected noise after exclusion of the initial bad pixels):
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
bad_pixels = np.zeros(offsetMap.shape, np.uint32) bad_pixels = np.zeros(offsetMap.shape, np.uint32)
mnoffset = np.nanmedian(offsetMap) mnoffset = np.nanmedian(offsetMap)
stdoffset = np.nanstd(offsetMap) stdoffset = np.nanstd(offsetMap)
bad_pixels[(offsetMap < mnoffset-bad_pixel_offset_sigma*stdoffset) | bad_pixels[(offsetMap < mnoffset-bad_pixel_offset_sigma*stdoffset) |
(offsetMap > mnoffset+bad_pixel_offset_sigma*stdoffset)] = BadPixels.OFFSET_OUT_OF_THRESHOLD.value (offsetMap > mnoffset+bad_pixel_offset_sigma*stdoffset)] = BadPixels.OFFSET_OUT_OF_THRESHOLD.value
mnnoise = np.nanmedian(noiseMapCM_2nd) mnnoise = np.nanmedian(noiseMapCM_2nd)
stdnoise = np.nanstd(noiseMapCM_2nd) stdnoise = np.nanstd(noiseMapCM_2nd)
bad_pixels[(noiseMapCM_2nd < mnnoise-bad_pixel_noise_sigma*stdnoise) | bad_pixels[(noiseMapCM_2nd < mnnoise-bad_pixel_noise_sigma*stdnoise) |
(noiseMapCM_2nd > mnnoise+bad_pixel_noise_sigma*stdnoise)] = BadPixels.NOISE_OUT_OF_THRESHOLD.value (noiseMapCM_2nd > mnnoise+bad_pixel_noise_sigma*stdnoise)] = BadPixels.NOISE_OUT_OF_THRESHOLD.value
bad_pixels = np.bitwise_or(bad_pixels, mask) bad_pixels = np.bitwise_or(bad_pixels, mask)
fig = xana.heatmapPlot(np.log2(bad_pixels[:,:,0]),aspect=1, x_label='Column Number', y_label='Row Number', fig = xana.heatmapPlot(np.log2(bad_pixels[:,:,0]),aspect=1, x_label='Column Number', y_label='Row Number',
lut_label='2^(Assigned Value to Bad Pixels)', x_range=(0, y), y_range=(0, x), lut_label='2^(Assigned Value to Bad Pixels)', x_range=(0, y), y_range=(0, x),
panel_top_low_lim = 0, panel_top_high_lim = 20, panel_side_low_lim = 0, panel_side_high_lim = 20, panel_top_low_lim = 0, panel_top_high_lim = 20, panel_side_low_lim = 0, panel_side_high_lim = 20,
title = 'Final Bad Pixels Map', panel_x_label='Columns Stat', title = 'Final Bad Pixels Map', panel_x_label='Columns Stat',
panel_y_label='Rows Stat', vmax=20) panel_y_label='Rows Stat', vmax=20)
#fig.savefig('BadPixelMap_2.svg', format='svg', dpi=1200, bbox_inches='tight') #fig.savefig('BadPixelMap_2.svg', format='svg', dpi=1200, bbox_inches='tight')
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
display(Markdown('### Statistics on the Bad Pixels')) display(Markdown('### Statistics on the Bad Pixels'))
num_bad_pixels = np.count_nonzero(bad_pixels) num_bad_pixels = np.count_nonzero(bad_pixels)
num_all_pixels = x*y num_all_pixels = x*y
percentage_bad_pixels = num_bad_pixels*100/num_all_pixels percentage_bad_pixels = num_bad_pixels*100/num_all_pixels
print("Number of bad pixels: {:0.0f}, i.e. {:0.2f}% of all pixels".format(num_bad_pixels, percentage_bad_pixels)) print("Number of bad pixels: {:0.0f}, i.e. {:0.2f}% of all pixels".format(num_bad_pixels, percentage_bad_pixels))
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
### Electronic Noise ### Electronic Noise
According to Table 6.1 (page 80) of Ivana Klačková's master's thesis: "Conversion gain for the FastCCD high gain is: lower hemisphere = 6.2e-/ADU and upper hemisphere = 6.1e-/ADU." Also, we know that the high gain/medium gain and high gain/low gain ratios are 4 and 8, respectively since high gain = x8, medium gain = x2 and low gain = x1. We do not currently (October - 2019) know the conversion gains for the FastCCD medium and lows gains in electrons. Therefore, we will use those of the high gains (in both hemispheres) together with the gain ratios to convert the noise in ADU to electrons. According to Table 6.1 (page 80) of Ivana Klačková's master's thesis: "Conversion gain for the FastCCD high gain is: lower hemisphere = 6.2e-/ADU and upper hemisphere = 6.1e-/ADU." Also, we know that the high gain/medium gain and high gain/low gain ratios are 4 and 8, respectively since high gain = x8, medium gain = x2 and low gain = x1. We do not currently (October - 2019) know the conversion gains for the FastCCD medium and lows gains in electrons. Therefore, we will use those of the high gains (in both hemispheres) together with the gain ratios to convert the noise in ADU to electrons.
The following Tables present the noise along lower hemisphere, upper hemisphere, and the entire FastCCD detector at different stages. Here, the values in the first table (in ADU and e-) are the mean of noise per pixel, where noise is considered to be the initial uncorrected noise, CM corrected noise after second trial (including bad pixels) and CM corrected noise after third trial (excluding bad pixels). The following Tables present the noise along lower hemisphere, upper hemisphere, and the entire FastCCD detector at different stages. Here, the values in the first table (in ADU and e-) are the mean of noise per pixel, where noise is considered to be the initial uncorrected noise, CM corrected noise after second trial (including bad pixels) and CM corrected noise after third trial (excluding bad pixels).
The values of the second table (in electrons) are the standard deviation of noise per pixel. The values of the second table (in electrons) are the standard deviation of noise per pixel.
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# noiseMap refers to the initial uncorrected noise, noiseMapCM refers to common mode corrected noise with inclusion of # noiseMap refers to the initial uncorrected noise, noiseMapCM refers to common mode corrected noise with inclusion of
# bad pixels, and noiseMapCM_2nd refers to common mode corrected noise without inclusion of bad pixels: # bad pixels, and noiseMapCM_2nd refers to common mode corrected noise without inclusion of bad pixels:
ADU_to_electron_hg = (ADU_to_electron_upper_hg + ADU_to_electron_lower_hg)/2 # Average of ADU_to_electron for entire CCD ADU_to_electron_hg = (ADU_to_electron_upper_hg + ADU_to_electron_lower_hg)/2 # Average of ADU_to_electron for entire CCD
# for high gain # for high gain
ADU_to_electron_mg = (ADU_to_electron_upper_mg + ADU_to_electron_lower_mg)/2 # Average of ADU_to_electron for entire CCD ADU_to_electron_mg = (ADU_to_electron_upper_mg + ADU_to_electron_lower_mg)/2 # Average of ADU_to_electron for entire CCD
# for medium gain # for medium gain
ADU_to_electron_lg = (ADU_to_electron_upper_lg + ADU_to_electron_lower_lg)/2 # Average of ADU_to_electron for entire CCD ADU_to_electron_lg = (ADU_to_electron_upper_lg + ADU_to_electron_lower_lg)/2 # Average of ADU_to_electron for entire CCD
# for low gain # for low gain
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
for gain, value in gain_dict.items(): for gain, value in gain_dict.items():
if det_gain == gain_dict["low gain"]: if det_gain == gain_dict["low gain"]:
ADU_to_electron = ADU_to_electron_lg ADU_to_electron = ADU_to_electron_lg
ADU_to_electron_upper = ADU_to_electron_upper_lg ADU_to_electron_upper = ADU_to_electron_upper_lg
ADU_to_electron_lower = ADU_to_electron_lower_lg ADU_to_electron_lower = ADU_to_electron_lower_lg
elif det_gain == gain_dict["medium gain"]: elif det_gain == gain_dict["medium gain"]:
ADU_to_electron = ADU_to_electron_mg ADU_to_electron = ADU_to_electron_mg
ADU_to_electron_upper = ADU_to_electron_upper_mg ADU_to_electron_upper = ADU_to_electron_upper_mg
ADU_to_electron_lower = ADU_to_electron_lower_mg ADU_to_electron_lower = ADU_to_electron_lower_mg
else: # Here, we assume the auto gain and high gain conversions from ADU to electrons are the same. else: # Here, we assume the auto gain and high gain conversions from ADU to electrons are the same.
ADU_to_electron = ADU_to_electron_hg ADU_to_electron = ADU_to_electron_hg
ADU_to_electron_upper = ADU_to_electron_upper_hg ADU_to_electron_upper = ADU_to_electron_upper_hg
ADU_to_electron_lower = ADU_to_electron_lower_hg ADU_to_electron_lower = ADU_to_electron_lower_hg
print("Abbreviations:") print("Abbreviations:")
print(" - ED = Entire Detector;\n - LH: Lower Hemisphere;\n - UH: Upper Hemisphere;") print(" - ED = Entire Detector;\n - LH: Lower Hemisphere;\n - UH: Upper Hemisphere;")
print(" - CM Noise: Common Mode Corrected Noise;") print(" - CM Noise: Common Mode Corrected Noise;")
print(" - BP: Bad Pixels\n") print(" - BP: Bad Pixels\n")
t0 = PrettyTable() t0 = PrettyTable()
t0.title = "Averages of Noise per Pixel" t0.title = "Averages of Noise per Pixel"
t0.field_names = ["Uncorrected Noise", "CM Noise, BP Incl.", "CM Noise, BP Excl."] t0.field_names = ["Uncorrected Noise", "CM Noise, BP Incl.", "CM Noise, BP Excl."]
t0.add_row(["ED: {:0.2f} ADU = {:0.2f} e-".format(np.mean(noiseMap), np.mean(noiseMap)*ADU_to_electron), t0.add_row(["ED: {:0.2f} ADU = {:0.2f} e-".format(np.mean(noiseMap), np.mean(noiseMap)*ADU_to_electron),
"ED: {:0.2f} ADU = {:0.2f} e-".format(np.mean(noiseMapCM), np.mean(noiseMapCM)*ADU_to_electron), "ED: {:0.2f} ADU = {:0.2f} e-".format(np.mean(noiseMapCM), np.mean(noiseMapCM)*ADU_to_electron),
"ED: {:0.2f} ADU = {:0.2f} e-".format(np.mean(noiseMapCM_2nd), np.mean(noiseMapCM_2nd)*ADU_to_electron)]) "ED: {:0.2f} ADU = {:0.2f} e-".format(np.mean(noiseMapCM_2nd), np.mean(noiseMapCM_2nd)*ADU_to_electron)])
t0.add_row(["LH: {:0.2f} ADU = {:0.2f} e-".format(np.mean(noiseMap[:x//2,:]), t0.add_row(["LH: {:0.2f} ADU = {:0.2f} e-".format(np.mean(noiseMap[:x//2,:]),
np.mean(noiseMap[:x//2,:])*ADU_to_electron_lower), np.mean(noiseMap[:x//2,:])*ADU_to_electron_lower),
"LH: {:0.2f} ADU = {:0.2f} e-".format(np.mean(noiseMapCM[:x//2,:]), "LH: {:0.2f} ADU = {:0.2f} e-".format(np.mean(noiseMapCM[:x//2,:]),
np.mean(noiseMapCM[:x//2,:])*ADU_to_electron_lower), np.mean(noiseMapCM[:x//2,:])*ADU_to_electron_lower),
"LH: {:0.2f} ADU = {:0.2f} e-".format(np.mean(noiseMapCM_2nd[:x//2,:]), "LH: {:0.2f} ADU = {:0.2f} e-".format(np.mean(noiseMapCM_2nd[:x//2,:]),
np.mean(noiseMapCM_2nd[:x//2,:])*ADU_to_electron_lower)]) np.mean(noiseMapCM_2nd[:x//2,:])*ADU_to_electron_lower)])
t0.add_row(["UH: {:0.2f} ADU = {:0.2f} e-".format(np.mean(noiseMap[x//2:,:]), t0.add_row(["UH: {:0.2f} ADU = {:0.2f} e-".format(np.mean(noiseMap[x//2:,:]),
np.mean(noiseMap[x//2:,:])*ADU_to_electron_upper), np.mean(noiseMap[x//2:,:])*ADU_to_electron_upper),
"UH: {:0.2f} ADU = {:0.2f} e-".format(np.mean(noiseMapCM[x//2:,:]), "UH: {:0.2f} ADU = {:0.2f} e-".format(np.mean(noiseMapCM[x//2:,:]),
np.mean(noiseMapCM[x//2:,:])*ADU_to_electron_upper), np.mean(noiseMapCM[x//2:,:])*ADU_to_electron_upper),
"UH: {:0.2f} ADU = {:0.2f} e-".format(np.mean(noiseMapCM_2nd[x//2:,:]), "UH: {:0.2f} ADU = {:0.2f} e-".format(np.mean(noiseMapCM_2nd[x//2:,:]),
np.mean(noiseMapCM_2nd[x//2:,:])*ADU_to_electron_upper)]) np.mean(noiseMapCM_2nd[x//2:,:])*ADU_to_electron_upper)])
print(t0,'\n') print(t0,'\n')
t1 = PrettyTable() t1 = PrettyTable()
t1.title = "Standard Deviations of Noise per Pixel" t1.title = "Standard Deviations of Noise per Pixel"
t1.field_names = ["Uncorrected Noise", "CM Noise, BP Incl.", "CM Noise, BP Excl."] t1.field_names = ["Uncorrected Noise", "CM Noise, BP Incl.", "CM Noise, BP Excl."]
t1.add_row(["ED: {:0.2f} e-".format(np.std(noiseMap)*ADU_to_electron), t1.add_row(["ED: {:0.2f} e-".format(np.std(noiseMap)*ADU_to_electron),
"ED: {:0.2f} e-".format(np.std(noiseMapCM)*ADU_to_electron), "ED: {:0.2f} e-".format(np.std(noiseMapCM)*ADU_to_electron),
"ED: {:0.2f} e-".format(np.std(noiseMapCM_2nd)*ADU_to_electron)]) "ED: {:0.2f} e-".format(np.std(noiseMapCM_2nd)*ADU_to_electron)])
t1.add_row(["LH: {:0.2f} e-".format(np.std(noiseMap[:x//2,:])*ADU_to_electron_lower), t1.add_row(["LH: {:0.2f} e-".format(np.std(noiseMap[:x//2,:])*ADU_to_electron_lower),
"LH: {:0.2f} e-".format(np.std(noiseMapCM[:x//2,:])*ADU_to_electron_lower), "LH: {:0.2f} e-".format(np.std(noiseMapCM[:x//2,:])*ADU_to_electron_lower),
"LH: {:0.2f} e-".format(np.std(noiseMapCM_2nd[:x//2,:])*ADU_to_electron_lower)]) "LH: {:0.2f} e-".format(np.std(noiseMapCM_2nd[:x//2,:])*ADU_to_electron_lower)])
t1.add_row(["UH: {:0.2f} e-".format(np.std(noiseMap[x//2:,:])*ADU_to_electron_upper), t1.add_row(["UH: {:0.2f} e-".format(np.std(noiseMap[x//2:,:])*ADU_to_electron_upper),
"UH: {:0.2f} e-".format(np.std(noiseMapCM[x//2:,:])*ADU_to_electron_upper), "UH: {:0.2f} e-".format(np.std(noiseMapCM[x//2:,:])*ADU_to_electron_upper),
"UH: {:0.2f} e-".format(np.std(noiseMapCM_2nd[x//2:,:])*ADU_to_electron_upper)]) "UH: {:0.2f} e-".format(np.std(noiseMapCM_2nd[x//2:,:])*ADU_to_electron_upper)])
print(t1) print(t1)
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
### Calibration Constants ### Calibration Constants
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
dictionary = {} dictionary = {}
dictionary['Offset'] = offsetMap.data dictionary['Offset'] = offsetMap.data
dictionary['Noise'] = noiseMapCM_2nd.data dictionary['Noise'] = noiseMapCM_2nd.data
dictionary['BadPixelsDark'] = bad_pixels.data dictionary['BadPixelsDark'] = bad_pixels.data
md = None md = None
for const in dictionary: for const in dictionary:
dconst = getattr(Constants.CCD(DetectorTypes.fastCCD), const)() dconst = getattr(Constants.CCD(DetectorTypes.fastCCD), const)()
dconst.data = dictionary[const] dconst.data = dictionary[const]
condition = Conditions.Dark.CCD(bias_voltage=bias_voltage, condition = Conditions.Dark.CCD(bias_voltage=bias_voltage,
integration_time=integration_time, integration_time=integration_time,
gain_setting=det_gain, gain_setting=det_gain,
temperature=fix_temperature, temperature=fix_temperature,
pixels_x=1934, pixels_x=1934,
pixels_y=960) pixels_y=960)
for parm in condition.parameters: for parm in condition.parameters:
if parm.name == "Sensor Temperature": if parm.name == "Sensor Temperature":
parm.lower_deviation = temp_limits parm.lower_deviation = temp_limits
parm.upper_deviation = temp_limits parm.upper_deviation = temp_limits
# This should be used in case of running notebook # This should be used in case of running notebook
# by a different method other than myMDC which already # by a different method other than myMDC which already
# sends CalCat info. # sends CalCat info.
# TODO: Set db_module to "" by default in the first cell # TODO: Set db_module to "" by default in the first cell
if not db_module: if not db_module:
db_module = get_pdu_from_db(karabo_id, karabo_da, dconst, db_module = get_pdu_from_db(karabo_id, karabo_da, dconst,
condition, cal_db_interface, condition, cal_db_interface,
snapshot_at=creation_time)[0] snapshot_at=creation_time)[0]
if db_output: if db_output:
md = send_to_db(db_module, karabo_id, dconst, condition, file_loc, report, md = send_to_db(db_module, karabo_id, dconst, condition, file_loc, report,
cal_db_interface, creation_time=creation_time, timeout=cal_db_timeout) cal_db_interface, creation_time=creation_time, timeout=cal_db_timeout)
if local_output: if local_output:
md = save_const_to_h5(db_module, karabo_id, dconst, condition, dconst.data, md = save_const_to_h5(db_module, karabo_id, dconst, condition, dconst.data,
file_loc, report, creation_time, out_folder) file_loc, report, creation_time, out_folder)
print(f"Calibration constant {const} is stored locally.") print(f"Calibration constant {const} is stored locally.")
print("Constants parameter conditions are:\n") print("Constants parameter conditions are:\n")
print(f"• bias_voltage: {bias_voltage}\n• integration_time: {integration_time}\n" print(f"• bias_voltage: {bias_voltage}\n• integration_time: {integration_time}\n"
f"• temperature: {fix_temperature}\n• gain_setting: {det_gain}\n" f"• temperature: {fix_temperature}\n• gain_setting: {det_gain}\n"
f"• creation_time: {md.calibration_constant_version.begin_at if md is not None else creation_time}\n") f"• creation_time: {md.calibration_constant_version.begin_at if md is not None else creation_time}\n")
``` ```
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment