{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# FastCCD Data Correction\n", "\n", "Authors: I. Klačková, S. Hauf, Version 1.0\n", "\n", "The following notebook provides correction of images acquired with the FastCCD." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2018-12-06T15:54:23.218849Z", "start_time": "2018-12-06T15:54:23.166497Z" } }, "outputs": [], "source": [ "cluster_profile = \"noDB\" #ipcluster profile to use\n", "in_folder = \"/gpfs/exfel/exp/SCS/201930/p900074/raw\" # input folder, required\n", "out_folder = '/gpfs/exfel/data/scratch/karnem/test/fastccd' # output folder, required\n", "sequences = [-1] # sequences to correct, set to -1 for all, range allowed\n", "run = 277 # run number\n", "\n", "karabo_da = 'DA05' # data aggregators\n", "karabo_id = \"SCS_CDIDET_FCCD2M\" # karabo prefix of PNCCD devices\n", "receiver_id = \"FCCD\" # inset for receiver devices\n", "path_template = 'RAW-R{:04d}-{}-S{{:05d}}.h5' # path template in hdf5 file\n", "h5path = '/INSTRUMENT/{}/DAQ/{}:daqOutput/data/image' # path in HDF5 file\n", "h5path_t = '/CONTROL/{}/CTRL/LSLAN/inputA/crdg/value' # temperature path in HDF5 file\n", "h5path_cntrl = '/RUN/{}/DET/FCCD' # path to control data\n", "\n", "use_dir_creation_date = True # use dir creation data for calDB queries\n", "cal_db_interface = \"tcp://max-exfl-cal001:8015#8025\" # calibration DB interface to use\n", "cal_db_timeout = 300000000 # timeout on caldb requests\n", "\n", "\n", "cpuCores = 16 #Specifies the number of running cpu cores\n", "operation_mode = \"FF\" # FS stands for frame-store and FF for full-frame opeartion\n", "split_evt_primary_threshold = 7. # primary threshold for split event classification in terms of n sigma noise\n", "split_evt_secondary_threshold = 4. # secondary threshold for split event classification in terms of n sigma noise\n", "split_evt_mip_threshold = 1000. # MIP threshold for event classification\n", "chunk_size_idim = 1 # H5 chunking size of output data\n", "overwrite = True # overwrite existing files\n", "sequences_per_node = 1 # sequences to correct per node\n", "limit_images = 0 # limit images per file \n", "time_offset_days = 0 # offset in days for calibration parameters\n", "photon_energy_gain_map = 5.9 # energy in keV\n", "fix_temperature = 0. # fix temperature to this value, set to 0 to use slow control value\n", "flipped_between = [\"2019-02-01\", \"2019-04-02\"] # detector was flipped during this timespan\n", "temp_limits = 5 # limits within which temperature is considered the same\n", "commonModeAxis = 1 # Axis along which common mode will be calculated (0: along rows, 1: along columns)\n", "\n", "# Correction Booleans\n", "only_offset = False # Only apply offset correction\n", "cti_corr = False # Apply CTI correction\n", "relgain_corr = True # Apply relative gain correction\n", "common_mode_corr = False # Apply commonMode correction\n", "correct_offset_drift = False # correct for offset drifts\n", "do_pattern_classification = True # classify split events\n", "\n", "def balance_sequences(in_folder, run, sequences, sequences_per_node, karabo_da):\n", " from xfel_calibrate.calibrate import balance_sequences as bs\n", " return bs(in_folder, run, sequences, sequences_per_node, karabo_da)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Fill dictionaries comprising bools and arguments for correction and data analysis\n", "\n", "# Here the herarichy and dependability for correction booleans are defined \n", "corr_bools = {}\n", "\n", "# offset is at the bottom of AGIPD correction pyramid.\n", "corr_bools[\"only_offset\"] = only_offset\n", "\n", "# Dont apply any corrections if only_offset is requested \n", "if not only_offset:\n", " \n", " # Apply relative gain correction, only if requested\n", " if relgain_corr:\n", " corr_bools[\"relgain\"] = relgain_corr\n", " \n", " # Apply CTI correction, only if requested\n", " if cti_corr:\n", " corr_bools[\"cti\"] = cti_corr\n", " \n", " corr_bools[\"common_mode\"] = common_mode_corr\n", " corr_bools[\"offset_drift\"] = correct_offset_drift\n", " corr_bools[\"pattern_class\"] = do_pattern_classification\n", "# Here the herarichy and dependability for data analysis booleans and arguments are defined \n", "data_analysis_parms = {}" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2018-12-06T15:54:23.455376Z", "start_time": "2018-12-06T15:54:23.413579Z" } }, "outputs": [], "source": [ "import XFELDetAna.xfelprofiler as xprof\n", "\n", "profiler = xprof.Profiler()\n", "profiler.disable()\n", "from XFELDetAna.util import env\n", "\n", "env.iprofile = cluster_profile\n", "\n", "import warnings\n", "\n", "warnings.filterwarnings('ignore')\n", "\n", "from XFELDetAna import xfelpyanatools as xana\n", "from XFELDetAna import xfelpycaltools as xcal\n", "from XFELDetAna.plotting.util import prettyPlotting\n", "\n", "prettyPlotting=True\n", "import copy\n", "import os\n", "import time\n", "from datetime import timedelta\n", "\n", "import dateutil.parser\n", "import h5py\n", "import matplotlib.pyplot as plt\n", "import numpy as np\n", "from cal_tools.tools import (\n", " get_constant_from_db_and_time,\n", " get_dir_creation_date,\n", " get_random_db_interface,\n", ")\n", "from iCalibrationDB import Conditions, ConstantMetaData, Constants, Detectors, Versions\n", "from iCalibrationDB.detectors import DetectorTypes\n", "from iminuit import Minuit\n", "from prettytable import PrettyTable\n", "from XFELDetAna.detectors.fastccd import readerh5 as fastccdreaderh5\n", "from XFELDetAna.xfelreaders import ChunkReader\n", "\n", "%matplotlib inline\n", "\n", "if sequences[0] == -1:\n", " sequences = None\n", "\n", "# select a random port for the data base \n", "cal_db_interface = get_random_db_interface(cal_db_interface)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2018-12-06T15:54:23.679069Z", "start_time": "2018-12-06T15:54:23.662821Z" } }, "outputs": [], "source": [ "if operation_mode == \"FS\":\n", " x = 960 # rows of the FastCCD to analyze in FS mode \n", " y = 960 # columns of the FastCCD to analyze in FS mode \n", " print('\\nYou are analyzing data in FS mode.')\n", "else:\n", " x = 1934 # rows of the FastCCD to analyze in FF mode \n", " y = 960 # columns of the FastCCD to analyze in FF mode\n", " print('\\nYou are analyzing data in FF mode.')\n", " \n", "ped_dir = \"{}/r{:04d}\".format(in_folder, run)\n", "fp_name = path_template.format(run, karabo_da)\n", "fp_path = '{}/{}'.format(ped_dir, fp_name)\n", "\n", "h5path = h5path.format(karabo_id, receiver_id)\n", "h5path_t = h5path_t.format(karabo_id)\n", "h5path_cntrl = h5path_cntrl.format(karabo_id)\n", "\n", "print(\"Reading data from: {}\\n\".format(fp_path))\n", "print(\"Run is: {}\".format(run))\n", "print(\"HDF5 path: {}\".format(h5path))\n", "print(\"Data is output to: {}\".format(out_folder))\n", "\n", "import datetime\n", "\n", "creation_time = None\n", "if use_dir_creation_date:\n", " creation_time = get_dir_creation_date(in_folder, run) + timedelta(days=time_offset_days)\n", "if creation_time:\n", " print(\"Using {} as creation time\".format(creation_time.isoformat()))" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2018-12-06T15:54:23.913269Z", "start_time": "2018-12-06T15:54:23.868910Z" } }, "outputs": [], "source": [ "sensorSize = [x, y]\n", "chunkSize = 100 #Number of images to read per chunk\n", "blockSize = [sensorSize[0]//2, sensorSize[1]] #Sensor area will be analysed according to blocksize\n", "xcal.defaultBlockSize = blockSize\n", "memoryCells = 1 #FastCCD has 1 memory cell\n", "#Specifies total number of images to proceed\n", "\n", "commonModeBlockSize = blockSize\n", "# commonModeAxisR = 'row'#Axis along which common mode will be calculated\n", "run_parallel = True\n", "profile = False\n", "\n", "filename = fp_path.format(sequences[0] if sequences else 0)\n", "with h5py.File(filename, 'r') as f:\n", " bias_voltage = int(f['{}/biasclock/bias/value'.format(h5path_cntrl)][0])\n", " det_gain = int(f['{}/exposure/gain/value'.format(h5path_cntrl)][0])\n", " integration_time = int(f['{}/exposure/exposure_time/value'.format(h5path_cntrl)][0])\n", " print(\"Bias voltage is {} V\".format(bias_voltage))\n", " print(\"Detector gain is set to x{}\".format(det_gain))\n", " print(\"Detector integration time is set to {}\".format(integration_time))\n", " temperature = np.mean(f[h5path_t])\n", " temperature_k = temperature + 273.15\n", " if fix_temperature != 0.:\n", " temperature_k = fix_temperature\n", " print(\"Using fixed temperature\")\n", " print(\"Mean temperature was {:0.2f} °C / {:0.2f} K at beginning of run\".format(temperature, temperature_k))\n", " \n", "\n", "if not os.path.exists(out_folder):\n", " os.makedirs(out_folder)\n", "elif not overwrite:\n", " # Stop Notebook not only this cell\n", " raise SystemExit(\"Output path exists! Exiting\") " ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2018-12-06T15:54:24.088948Z", "start_time": "2018-12-06T15:54:24.059925Z" } }, "outputs": [], "source": [ "dirlist = sorted(os.listdir(ped_dir))\n", "file_list = []\n", "total_sequences = 0\n", "fsequences = []\n", "for entry in dirlist:\n", "\n", " #only h5 file\n", " abs_entry = \"{}/{}\".format(ped_dir, entry)\n", " if os.path.isfile(abs_entry) and os.path.splitext(abs_entry)[1] == \".h5\":\n", " \n", " if sequences is None:\n", " for seq in range(len(dirlist)):\n", " \n", " if path_template.format(run, karabo_da).format(seq) in abs_entry:\n", " file_list.append(abs_entry)\n", " total_sequences += 1\n", " fsequences.append(seq)\n", " else:\n", " for seq in sequences:\n", " \n", " if path_template.format(run, karabo_da).format(seq) in abs_entry:\n", " file_list.append(os.path.abspath(abs_entry))\n", " total_sequences += 1\n", " fsequences.append(seq)\n", "sequences = fsequences" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2018-12-06T18:43:39.776018Z", "start_time": "2018-12-06T18:43:39.759185Z" } }, "outputs": [], "source": [ "import copy\n", "\n", "import tabulate\n", "from IPython.display import HTML, Latex, Markdown, display\n", "\n", "print(\"Processing a total of {} sequence files\".format(total_sequences))\n", "table = []\n", "\n", "\n", "for k, f in enumerate(file_list):\n", " table.append((k, f))\n", "if len(table): \n", " md = display(Latex(tabulate.tabulate(table, tablefmt='latex', headers=[\"#\", \"file\"]))) " ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "As a first step, dark maps have to be loaded." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2018-12-06T15:54:28.254544Z", "start_time": "2018-12-06T15:54:24.709521Z" } }, "outputs": [], "source": [ "offsetMap = None\n", "badPixelMap = None\n", "noiseMap = None\n", "\n", "\n", "# The following constants are saved in data base as a constant for each gain\n", "# Hence, a loop over all 3 gains is performed\n", "\n", "# This is to be used in messages in reports.\n", "g_name = {8: \"High gain\", 2: \"Medium gain\", 1: \"Low gain\"}\n", "\n", "for i, g in enumerate([8, 2, 1]):\n", "\n", " print(\"Retrieving constants for {}\".format(g_name[g]))\n", " \n", " # set the operating condition\n", " condition = Conditions.Dark.CCD(bias_voltage=bias_voltage,\n", " integration_time=integration_time,\n", " gain_setting=g,\n", " temperature=temperature_k,\n", " pixels_x=x,\n", " pixels_y=y)\n", "\n", " for parm in condition.parameters:\n", " if parm.name == \"Sensor Temperature\":\n", " parm.lower_deviation = temp_limits\n", " parm.upper_deviation = temp_limits\n", "\n", " \n", " offset = Constants.CCD(DetectorTypes.fastCCD).Offset()\n", " noise = Constants.CCD(DetectorTypes.fastCCD).Noise()\n", " bpix = Constants.CCD(DetectorTypes.fastCCD).BadPixelsDark()\n", " \n", " ## retrieve_offset\n", " offset, offset_time = get_constant_from_db_and_time(karabo_id, karabo_da,\n", " constant=offset,\n", " condition=condition,\n", " empty_constant=None,\n", " cal_db_interface=cal_db_interface, \n", " creation_time=creation_time,\n", " timeout=cal_db_timeout, print_once=False)\n", "\n", " if offsetMap is None:\n", " offsetMap = np.zeros(list(offset.shape)+[3], np.float32)\n", "\n", " if offset is not None:\n", " offsetMap[...,i] = offset\n", " else:\n", " print(\"NO OFFSET FOUND IN DB!\")\n", "\n", " offset_temperature = None\n", " for parm in condition.parameters:\n", "\n", " if parm.name == \"Sensor Temperature\":\n", " offset_temperature = parm.value\n", "\n", " print(\"Dark Offset was taken at temperature of {:0.2f}K at {}\"\n", " .format(offset_temperature, offset_time))\n", "\n", " ## retrieve_noise\n", " noise, noise_time = get_constant_from_db_and_time(karabo_id, karabo_da,\n", " constant=noise,\n", " condition=condition,\n", " empty_constant=None,\n", " cal_db_interface=cal_db_interface, \n", " creation_time=creation_time,\n", " timeout=cal_db_timeout, print_once=False)\n", " if noiseMap is None:\n", " noiseMap = np.zeros(list(noise.shape)+[3], np.float32)\n", " if noise is not None:\n", " noiseMap[...,i] = noise\n", " else:\n", " print(\"NO NOISE FOUND IN DB!\")\n", "\n", " print(\"Noise at {} was taken at {}\"\n", " .format(g_name[g], noise_time))\n", "\n", " ## retrieve_bad pixels\n", " bpix, bpix_time = get_constant_from_db_and_time(karabo_id, karabo_da,\n", " constant=bpix,\n", " condition=condition,\n", " empty_constant=None,\n", " cal_db_interface=cal_db_interface, \n", " creation_time=creation_time,\n", " timeout=cal_db_timeout, print_once=False)\n", " if badPixelMap is None:\n", " badPixelMap = np.zeros(list(bpix.shape)+[3], np.uint32)\n", " print(\"NO BadPixel FOUND IN DB!\")\n", " if bpix is not None:\n", " badPixelMap[...,i] = noise\n", " else:\n", " print(\"NO BADPIXEL FOUND IN DB!\")\n", "\n", " print(\"BadPixes at {} was taken at {}\"\n", " .format(g_name[g], bpix_time))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Loading cti and relative gain values" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2018-12-06T15:54:28.343869Z", "start_time": "2018-12-06T15:54:28.271344Z" } }, "outputs": [], "source": [ "# relative gain\n", "if corr_bools.get('relgain'):\n", "\n", " relgain = Constants.CCD(DetectorTypes.fastCCD).RelativeGain()\n", "\n", " # set the operating condition\n", " condition = Conditions.Illuminated.CCD(bias_voltage=bias_voltage,\n", " integration_time=integration_time,\n", " gain_setting=det_gain,\n", " temperature=temperature_k,\n", " pixels_x=x, pixels_y=y,\n", " photon_energy=photon_energy_gain_map)\n", "\n", " relgain, relgain_time = get_constant_from_db_and_time(karabo_id, karabo_da,\n", " constant=relgain,\n", " condition=condition,\n", " empty_constant=None,\n", " cal_db_interface=cal_db_interface,\n", " creation_time=creation_time,\n", " timeout=cal_db_timeout, print_once=False)\n", "\n", " # TODO: CHECK IF THIS FLIPPING IS CORRECT\n", " if relgain is None:\n", " corr_bools[\"relgain\"] = False\n", " print('Relative Gain was not found. Proceed without Relative Gain correction.')\n", " else:\n", " relGain = relgain[::-1, ...]\n", " print('Relative Gain was taken with creation-date:', relgain_time)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "if corr_bools.get('cti'):\n", " pass\n", " ## FASTCCD CTI CURRENTLY IS NOT SUPPORTED BY DET!\n", " \n", " # The code is left for tracing past algorithm for later\n", " # use during the development of the new CTI algorithm.\n", " # TODO: Proper CTI Retrieval from CTI and the correction\n", " # in following cells\n", " \n", " \n", "# relGainCA = copy.copy(relGain)\n", "# relGainC = relGainCA[:relGainCA.shape[0]//2,...]\n", "# ctiA = np.ones(relGainCA.shape[:2])\n", "# cti = np.ones(relGainC.shape[:2])\n", "# i = 0\n", "# idx = (relGainC[i, :, 0] < 0.9) | (relGainC[i,:,0] > 1.1)\n", "# mn1 = np.nanmean(relGainC[i, ~idx, 0])\n", "\n", "# for i in range(1, relGainC.shape[0]):\n", "# idx = (relGainC[i, :, 0] < 0.9) | (relGainC[i,:,0] > 1.1)\n", "# mn2 = np.nanmean(relGainC[i, ~idx, 0])\n", "# cti[i,:] = mn2/mn1\n", "# ctiA[:relGainCA.shape[0]//2,...] = cti\n", "\n", "# relGainC = relGainCA[relGainCA.shape[0]//2:,...]\n", "\n", "\n", "# cti = np.ones(relGainC.shape[:2])\n", "# i = -1\n", "# idx = (relGainC[i, :, 0] < 0.9) | (relGainC[i,:,0] > 1.1)\n", "# mn1 = np.nanmean(relGainC[i, ~idx, 0])\n", "\n", "# for i in range(relGainC.shape[0]-1, 1, -1):\n", "# idx = (relGainC[i, :, 0] < 0.9) | (relGainC[i,:,0] > 1.1)\n", "# mn2 = np.nanmean(relGainC[i, ~idx, 0])\n", "# cti[i,:] = mn2/mn1\n", "\n", "# ctiA[relGainCA.shape[0]//2:,...] = cti\n", "\n", "# relGainCA = copy.copy(relGain)\n", "# relGainC = relGainCA[:relGainCA.shape[0]//2,...]\n", "# for i in range(relGainC.shape[1]):\n", "# idx = (relGainC[:,i, 0] < 0.95) | (relGainC[:,i,0] > 1.05)\n", "# relGainC[idx,i,0] = np.nanmean(relGainC[~idx,i,0])\n", "# relGainC[idx,i,1] = np.nanmean(relGainC[~idx,i,1])\n", "# relGainC[idx,i,2] = np.nanmean(relGainC[~idx,i,2])\n", "# relGainCA[:relGainCA.shape[0]//2,...] = relGainC\n", "# relGainC = relGainCA[relGainCA.shape[0]//2:,...]\n", "# for i in range(relGainC.shape[1]):\n", "# idx = (relGainC[:,i, 0] < 0.95) | (relGainC[:,i,0] > 1.05)\n", "# relGainC[idx,i,0] = np.nanmean(relGainC[~idx,i,0])\n", "# relGainC[idx,i,1] = np.nanmean(relGainC[~idx,i,1])\n", "# relGainC[idx,i,2] = np.nanmean(relGainC[~idx,i,2])\n", "# relGainCA[relGainCA.shape[0]//2:,...] = relGainC\n", "# relGainC = relGainCA*ctiA[...,None]\n", "\n", "# relGain = relGainC" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "if corr_bools.get('relgain'):\n", " flipped_between = [dateutil.parser.parse(d) for d in flipped_between]\n", " flip_rgain = creation_time.replace(tzinfo=None) >= flipped_between[0] and creation_time.replace(tzinfo=None) <= flipped_between[1]\n", " flip_rgain &= (relgain_time.replace(tzinfo=None) >= flipped_between[0] \n", " and relgain_time.replace(tzinfo=None) <= flipped_between[1])\n", " print(\"Accounting for flipped detector: {}\".format(flip_rgain))" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2018-12-06T15:54:28.771629Z", "start_time": "2018-12-06T15:54:28.346051Z" } }, "outputs": [], "source": [ "#************************Calculators************************#\n", "\n", "if corr_bools.get('common_mode'):\n", " cmCorrection = xcal.CommonModeCorrection([x, y],\n", " commonModeBlockSize, \n", " commonModeAxis,\n", " nCells = memoryCells,\n", " stride=10,\n", " runParallel=True,\n", " stats=True, minFrac=0)\n", "\n", "patternClassifierLH = xcal.PatternClassifier([x//2, y], \n", " noiseMap[:x//2, :], \n", " split_evt_primary_threshold, \n", " split_evt_secondary_threshold,\n", " split_evt_mip_threshold,\n", " tagFirstSingles = 0, \n", " nCells=memoryCells, \n", " cores=cpuCores, \n", " allowElongated = False,\n", " blockSize=[x//2, y],\n", " runParallel=True)\n", "\n", "\n", "\n", "patternClassifierUH = xcal.PatternClassifier([x//2, y], \n", " noiseMap[x//2:, :], \n", " split_evt_primary_threshold, \n", " split_evt_secondary_threshold,\n", " split_evt_mip_threshold,\n", " tagFirstSingles = 0, \n", " nCells=memoryCells, \n", " cores=cpuCores, \n", " allowElongated = False,\n", " blockSize=[x//2, y],\n", " runParallel=True)\n", "\n", " " ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2018-12-06T16:08:51.886343Z", "start_time": "2018-12-06T16:08:51.842837Z" } }, "outputs": [], "source": [ "#*****************Histogram Calculators******************#\n", "\n", "histCalOffsetCor = xcal.HistogramCalculator([x, y], \n", " bins=1050, \n", " range=[-50, 1000],\n", " nCells=memoryCells, \n", " cores=cpuCores,\n", " blockSize=blockSize)\n", "\n", "histCalPcorr = xcal.HistogramCalculator([x, y], \n", " bins=1050, \n", " range=[-50, 1000],\n", " nCells=memoryCells, \n", " cores=cpuCores,\n", " blockSize=blockSize)\n", "\n", "histCalPcorrS = xcal.HistogramCalculator([x, y], \n", " bins=1050, \n", " range=[-50, 1000],\n", " nCells=memoryCells, \n", " cores=cpuCores,\n", " blockSize=blockSize)\n", "\n", "histCalCommonModeCor = xcal.HistogramCalculator([x, y], \n", " bins=1050, \n", " range=[-50, 1000],\n", " nCells=memoryCells, \n", " cores=cpuCores,\n", " blockSize=blockSize)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Applying corrections" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2018-12-06T16:08:52.441784Z", "start_time": "2018-12-06T16:08:52.437284Z" } }, "outputs": [], "source": [ "patternClassifierLH._imagesPerChunk = 500\n", "patternClassifierUH._imagesPerChunk = 500\n", "patternClassifierLH.debug()\n", "patternClassifierUH.debug()" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2018-12-06T16:08:53.042555Z", "start_time": "2018-12-06T16:08:53.034522Z" } }, "outputs": [], "source": [ "histCalOffsetCor.debug()\n", "histCalCommonModeCor.debug()\n", "histCalPcorr.debug()" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2018-12-06T16:08:53.551111Z", "start_time": "2018-12-06T16:08:53.531064Z" } }, "outputs": [], "source": [ "def copy_and_sanitize_non_cal_data(infile, outfile, h5base):\n", " \"\"\"\n", " \n", " \n", " :param infile: Input file \n", " :param outfile: Otput file\n", " :param h5base: \n", " \"\"\"\n", " \n", " if h5base.startswith(\"/\"):\n", " h5base = h5base[1:]\n", " dont_copy = ['pixels']\n", " dont_copy = [h5base+\"/{}\".format(do)\n", " for do in dont_copy]\n", "\n", " def visitor(k, item):\n", " \"\"\"\n", " \n", " \n", " :param k:\n", " :param item:\n", " \"\"\"\n", " if k not in dont_copy:\n", " if isinstance(item, h5py.Group):\n", " outfile.create_group(k)\n", " elif isinstance(item, h5py.Dataset):\n", " group = str(k).split(\"/\")\n", " group = \"/\".join(group[:-1])\n", " infile.copy(k, outfile[group])\n", " \n", " infile.visititems(visitor)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2018-12-06T16:10:55.917179Z", "start_time": "2018-12-06T16:09:01.603633Z" } }, "outputs": [], "source": [ "mean_im = None\n", "single_im = None\n", "mean_im_cc = None\n", "single_im_cc = None\n", "drift_lh = []\n", "drift_uh = []\n", "offsetMap = np.squeeze(offsetMap)\n", "noiseMap = np.squeeze(noiseMap)\n", "badPixelMap = np.squeeze(badPixelMap)\n", "if corr_bools.get('relgain'):\n", " #TODO: This should be removed after properly injecting gain const for all 3 gains\n", " if len(relGain.shape) == 3 and relGain.shape[2] == 1:\n", " # This is a temporary solution for the relGain of one gain in db\n", " relGain = np.repeat(relGain, 3, axis=2)\n", " relGain = np.squeeze(relGain)\n", "\n", "for k, f in enumerate(file_list):\n", " with h5py.File(f, 'r') as infile:\n", " out_fileb = \"{}/{}\".format(out_folder, f.split(\"/\")[-1])\n", " out_file = out_fileb.replace(\"RAW\", \"CORR\")\n", " #out_filed = out_fileb.replace(\"RAW\", \"CORR-SC\")\n", "\n", " data = None\n", " noise = None\n", " try:\n", " with h5py.File(out_file, \"w\") as ofile:\n", " \n", " copy_and_sanitize_non_cal_data(infile, ofile, h5path)\n", " data = infile[h5path+\"/pixels\"][()]\n", " nzidx = np.count_nonzero(data, axis=(1,2))\n", " data = data[nzidx != 0, ...]\n", " if limit_images > 0:\n", " data = data[:limit_images,...]\n", " oshape = data.shape\n", " data = np.moveaxis(data, 0, 2)\n", " ddset = ofile.create_dataset(h5path+\"/pixels\",\n", " oshape,\n", " chunks=(chunk_size_idim, oshape[1], oshape[2]),\n", " dtype=np.float32)\n", " \n", " ddsetm = ofile.create_dataset(h5path+\"/mask\",\n", " oshape,\n", " chunks=(chunk_size_idim, oshape[1], oshape[2]),\n", " dtype=np.uint32, compression=\"gzip\")\n", " \n", " ddsetg = ofile.create_dataset(h5path+\"/gain\",\n", " oshape,\n", " chunks=(chunk_size_idim, oshape[1], oshape[2]),\n", " dtype=np.uint8, compression=\"gzip\")\n", " \n", " # Getting the 14th and 15th bit from the data, \n", " # which contains gain before removing them from the data.\n", " gain = np.right_shift(data, 14)\n", " \n", " # gains are stored as 00 for High gain, \n", " # 01 for Medium gain and 11 for low gain.\n", " # Hence, the subtraction from the \n", " # gain's int values to have 0, 1 and 2\n", " gain[gain != 0] -= 1\n", " \n", " fstride = 1\n", " if corr_bools.get('relgain'):\n", " if not flip_rgain: # rgain was taken during flipped orientation\n", " fstride = -1\n", " \n", " data = np.bitwise_and(data, 0b0011111111111111).astype(np.float32)\n", "\n", " # creating maps for correction usage\n", " omap = np.repeat(offsetMap[...,None,:], data.shape[2], axis=2)\n", " nmap = np.repeat(noiseMap[...,None,:], data.shape[2], axis=2)\n", " bmap = np.repeat(badPixelMap[...,None,:], data.shape[2], axis=2)\n", "\n", " # selecting element value related to the gain of the pixel.\n", " offset = np.choose(gain, (omap[...,0], omap[...,1], omap[...,2]))\n", " noise = np.choose(gain, (nmap[...,0], nmap[...,1], nmap[...,2]))\n", " bpix = np.choose(gain, (bmap[...,0], bmap[...,1], bmap[...,2]))\n", "\n", " # same for relative gain if correction is available\n", " if corr_bools.get('relgain'):\n", " rmap = np.repeat(relGain[:,::fstride,None,:], data.shape[2], axis=2)\n", " rg = np.choose(gain, (rmap[...,0], rmap[...,1], rmap[...,2]))\n", "\n", " # Apply offset correction\n", " data -= offset\n", "\n", " if corr_bools.get('relgain'):\n", " # Apply relative gain correction\n", " # TODO: check relgain correction in pydetlib\n", " # and use it here if the same.\n", " data *= rg\n", " \n", " if corr_bools.get(\"offset_drift\"):\n", " # Put in consideration the temperature \n", " # change of the FastCCD's hole.\n", " lhd = np.mean(data[x//2-10:x//2,y//2-5:y//2+5,:], axis=(0,1))\n", " data[:x//2, :, :] -= lhd\n", " drift_lh.append(lhd)\n", " \n", " uhd = np.mean(data[x//2:x//2+10,y//2-5:y//2+5,:], axis=(0,1)) \n", " data[x//2:, :, :] -= uhd\n", " drift_uh.append(uhd)\n", " \n", " histCalOffsetCor.fill(data)\n", "\n", " ddset[...] = np.moveaxis(data, 2, 0)\n", " ddsetm[...] = np.moveaxis(bpix, 2, 0)\n", " ddsetg[...] = np.moveaxis(gain, 2, 0).astype(np.uint8)\n", " \n", " if mean_im is None:\n", " mean_im = np.nanmean(data, axis=2)\n", " single_im = data[...,0]\n", " \n", " if corr_bools.get(\"pattern_class\"):\n", " \n", " ddsetcm = ofile.create_dataset(h5path+\"/pixels_cm\",\n", " oshape,\n", " chunks=(chunk_size_idim, oshape[1], oshape[2]),\n", " dtype=np.float32)\n", "\n", " ddsetc = ofile.create_dataset(h5path+\"/pixels_classified\",\n", " oshape,\n", " chunks=(chunk_size_idim, oshape[1], oshape[2]),\n", " dtype=np.float32, compression=\"gzip\")\n", "\n", " ddsetp = ofile.create_dataset(h5path+\"/patterns\",\n", " oshape,\n", " chunks=(chunk_size_idim, oshape[1], oshape[2]),\n", " dtype=np.int32, compression=\"gzip\")\n", " \n", " # The calculation of the cluster map\n", " patternClassifierLH._noisemap = noise[:x//2, :, :]\n", " patternClassifierUH._noisemap = noise[x//2:, :, :]\n", " \n", " # common mode correction\n", " if corr_bools.get(\"common_mode\"):\n", " cellTable = np.zeros(data.shape[2], np.int32) # Common mode correction\n", " data = cmCorrection.correct(data.astype(np.float32),\n", " cellTable=cellTable,\n", " noiseMap=noise) \n", "\n", " # correct for the row common mode\n", " ddsetcm[...] = np.moveaxis(data, 2, 0)\n", "\n", " histCalCommonModeCor.fill(data)\n", "\n", " dataLH = data[:x//2, :, :]\n", " dataUH = data[x//2:, :, :]\n", "\n", " dataLH, patternsLH = patternClassifierLH.classify(dataLH)\n", " dataUH, patternsUH = patternClassifierUH.classify(dataUH)\n", "\n", " data[:x//2, :, :] = dataLH\n", " data[x//2:, :, :] = dataUH\n", "\n", " patterns = np.zeros(data.shape, patternsLH.dtype)\n", " patterns[:x//2, :, :] = patternsLH\n", " patterns[x//2:, :, :] = patternsUH\n", "\n", " data[data < split_evt_primary_threshold*noise] = 0\n", " ddsetc[...] = np.moveaxis(data, 2, 0)\n", " ddsetp[...] = np.moveaxis(patterns, 2, 0)\n", "\n", " histCalPcorr.fill(data)\n", " data[patterns != 100] = np.nan\n", " histCalPcorrS.fill(data)\n", "\n", " if mean_im_cc is None:\n", " mean_im_cc = np.nanmean(data, axis=2)\n", " single_im_cc = data[...,0]\n", " \n", " except Exception as e:\n", " print(\"Couldn't calibrate data in {}: {}\".format(f, e))" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2018-12-06T16:10:56.094985Z", "start_time": "2018-12-06T16:10:55.918900Z" } }, "outputs": [], "source": [ "if corr_bools.get(\"offset_drift\"):\n", " lhds = np.concatenate(drift_lh)\n", " uhds = np.concatenate(drift_uh)\n", " fig = plt.figure(figsize=(10,5))\n", " ax = fig.add_subplot(111)\n", " ax.plot(lhds, label=\"Lower hem.\")\n", " ax.plot(uhds, label=\"Upper hem.\")\n", " ax.set_xlabel(\"Frame #\")\n", " ax.set_xlabel(\"Offset drift (ADU)\")" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2018-12-06T16:10:56.126409Z", "start_time": "2018-12-06T16:10:56.096242Z" } }, "outputs": [], "source": [ "if corr_bools.get(\"pattern_class\"):\n", " print(\"******************LOWER HEMISPHERE******************\\n\")\n", "\n", " patternStatsLH = patternClassifierLH.getPatternStats()\n", " fig = plt.figure(figsize=(15,15))\n", " ax = fig.add_subplot(4,4,1)\n", " sfields = [\"singles\", \"first singles\", \"clusters\"]\n", " mfields = [\"doubles\", \"triples\", \"quads\"]\n", " relativeOccurances = []\n", " labels = []\n", " for i, f in enumerate(sfields):\n", " relativeOccurances.append(patternStatsLH[f])\n", " labels.append(f)\n", " for i, f in enumerate(mfields):\n", " for k in range(len(patternStatsLH[f])):\n", " relativeOccurances.append(patternStatsLH[f][k])\n", " labels.append(f+\"(\"+str(k)+\")\")\n", " relativeOccurances = np.array(relativeOccurances, np.float)\n", " relativeOccurances/=np.sum(relativeOccurances)\n", " pie = ax.pie(relativeOccurances, labels=labels, autopct='%1.1f%%', shadow=True)\n", " ax.set_title(\"Pattern occurrence\")\n", " # Set aspect ratio to be equal so that pie is drawn as a circle.\n", " a = ax.axis('equal')\n", "\n", " smaps = [\"singlemap\", \"firstsinglemap\", \"clustermap\"]\n", " for i, m in enumerate(smaps):\n", "\n", " ax = fig.add_subplot(4,4,2+i)\n", "\n", " pmap = ax.imshow(patternStatsLH[m], interpolation=\"nearest\", vmax=2*np.nanmedian(patternStatsLH[m]))\n", " ax.set_title(m)\n", " cb = fig.colorbar(pmap)\n", "\n", " mmaps = [\"doublemap\", \"triplemap\", \"quadmap\"]\n", " k = 0\n", " for i, m in enumerate(mmaps):\n", "\n", " for j in range(4):\n", " ax = fig.add_subplot(4,4,2+len(smaps)+k)\n", " pmap = ax.imshow(patternStatsLH[m][j], interpolation=\"nearest\", vmax=2*np.median(patternStatsLH[m][j]))\n", " ax.set_title(m+\"(\"+str(j)+\")\")\n", " cb = fig.colorbar(pmap)\n", " k+=1" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2018-12-06T16:10:56.176160Z", "start_time": "2018-12-06T16:10:56.127853Z" } }, "outputs": [], "source": [ "if corr_bools.get(\"pattern_class\"):\n", " print(\"******************UPPER HEMISPHERE******************\\n\")\n", "\n", " patternStatsUH = patternClassifierUH.getPatternStats()\n", " fig = plt.figure(figsize=(15,15))\n", " ax = fig.add_subplot(4,4,1)\n", " sfields = [\"singles\", \"first singles\", \"clusters\"]\n", " mfields = [\"doubles\", \"triples\", \"quads\"]\n", " relativeOccurances = []\n", " labels = []\n", " for i, f in enumerate(sfields):\n", " relativeOccurances.append(patternStatsUH[f])\n", " labels.append(f)\n", " for i, f in enumerate(mfields):\n", " for k in range(len(patternStatsUH[f])):\n", " relativeOccurances.append(patternStatsUH[f][k])\n", " labels.append(f+\"(\"+str(k)+\")\")\n", " relativeOccurances = np.array(relativeOccurances, np.float)\n", " relativeOccurances/=np.sum(relativeOccurances)\n", " pie = ax.pie(relativeOccurances, labels=labels, autopct='%1.1f%%', shadow=True)\n", " ax.set_title(\"Pattern occurrence\")\n", " # Set aspect ratio to be equal so that pie is drawn as a circle.\n", " a = ax.axis('equal')\n", "\n", " smaps = [\"singlemap\", \"firstsinglemap\", \"clustermap\"]\n", " for i, m in enumerate(smaps):\n", "\n", " ax = fig.add_subplot(4,4,2+i)\n", "\n", " pmap = ax.imshow(patternStatsUH[m], interpolation=\"nearest\", vmax=2*np.nanmedian(patternStatsUH[m]))\n", " ax.set_title(m)\n", " cb = fig.colorbar(pmap)\n", "\n", " mmaps = [\"doublemap\", \"triplemap\", \"quadmap\"]\n", " k = 0\n", " for i, m in enumerate(mmaps):\n", "\n", " for j in range(4):\n", " ax = fig.add_subplot(4,4,2+len(smaps)+k)\n", " pmap = ax.imshow(patternStatsUH[m][j], interpolation=\"nearest\", vmax=np.median(patternStatsUH[m][j]))\n", " ax.set_title(m+\"(\"+str(j)+\")\")\n", " cb = fig.colorbar(pmap)\n", " k+=1" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2018-12-06T16:10:56.190150Z", "start_time": "2018-12-06T16:10:56.177570Z" } }, "outputs": [], "source": [ "if corr_bools.get(\"pattern_class\"):\n", " t0 = PrettyTable()\n", " t0.title = \"Total number of Counts after all corrections\"\n", " t0.field_names = [\"Hemisphere\",\"Singles\", \"First-singles\", \"Clusters\"]\n", " t0.add_row([\"LH\", patternStatsLH['singles'], patternStatsLH['first singles'], patternStatsLH['clusters']])\n", " t0.add_row([\"UH\", patternStatsUH['singles'], patternStatsUH['first singles'], patternStatsUH['clusters']])\n", "\n", " print(t0)\n", "\n", " t1 = PrettyTable()\n", "\n", " t1.field_names = [\"Index\",\"D-LH\", \"D-UH\", \"T-LH\", \"T-UH\", \"Q-LH\", \"Q-UH\"]\n", "\n", " t1.add_row([0, patternStatsLH['doubles'][0], patternStatsUH['doubles'][0], patternStatsLH['triples'][0], patternStatsUH['triples'][0], patternStatsLH['quads'][0], patternStatsUH['quads'][0]])\n", " t1.add_row([1, patternStatsLH['doubles'][1], patternStatsUH['doubles'][1], patternStatsLH['triples'][1], patternStatsUH['triples'][1], patternStatsLH['quads'][1], patternStatsUH['quads'][1]])\n", " t1.add_row([2, patternStatsLH['doubles'][2], patternStatsUH['doubles'][2], patternStatsLH['triples'][2], patternStatsUH['triples'][2], patternStatsLH['quads'][2], patternStatsUH['quads'][2]])\n", " t1.add_row([3, patternStatsLH['doubles'][3], patternStatsUH['doubles'][3], patternStatsLH['triples'][3], patternStatsUH['triples'][3], patternStatsLH['quads'][3], patternStatsUH['quads'][3]])\n", "\n", " print(t1)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2018-12-06T16:10:56.203219Z", "start_time": "2018-12-06T16:10:56.191509Z" } }, "outputs": [], "source": [ "if corr_bools.get(\"pattern_class\"):\n", " doublesLH = patternStatsLH['doubles'][0] + patternStatsLH['doubles'][1] + patternStatsLH['doubles'][2] + patternStatsLH['doubles'][3]\n", " triplesLH = patternStatsLH['triples'][0] + patternStatsLH['triples'][1] + patternStatsLH['triples'][2] + patternStatsLH['triples'][3]\n", " quadsLH = patternStatsLH['quads'][0] + patternStatsLH['quads'][1] + patternStatsLH['quads'][2] + patternStatsLH['quads'][3]\n", " allsinglesLH = patternStatsLH['singles'] + patternStatsLH['first singles']\n", " eventsLH = allsinglesLH + doublesLH + triplesLH + quadsLH\n", "\n", " doublesUH = patternStatsUH['doubles'][0] + patternStatsUH['doubles'][1] + patternStatsUH['doubles'][2] + patternStatsUH['doubles'][3]\n", " triplesUH = patternStatsUH['triples'][0] + patternStatsUH['triples'][1] + patternStatsUH['triples'][2] + patternStatsUH['triples'][3]\n", " quadsUH = patternStatsUH['quads'][0] + patternStatsUH['quads'][1] + patternStatsUH['quads'][2] + patternStatsUH['quads'][3]\n", " allsinglesUH = patternStatsUH['singles'] + patternStatsUH['first singles']\n", " eventsUH = allsinglesUH + doublesUH + triplesUH + quadsUH\n", "\n", " reloccurLH = np.array([allsinglesLH/eventsLH, doublesLH/eventsLH, triplesLH/eventsLH, quadsLH/eventsLH])\n", " reloccurUH = np.array([allsinglesUH/eventsUH, doublesUH/eventsUH, triplesUH/eventsUH, quadsUH/eventsUH])" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2018-12-06T16:10:56.212586Z", "start_time": "2018-12-06T16:10:56.204731Z" } }, "outputs": [], "source": [ "if corr_bools.get(\"pattern_class\"):\n", " fig = plt.figure(figsize=(10,5))\n", " ax = fig.add_subplot(1,2,1)\n", " labels = ['singles', 'doubles', 'triples', 'quads']\n", " pie = ax.pie(reloccurLH, labels=labels, autopct='%1.1f%%', shadow=True)\n", " ax.set_title(\"Pattern occurrence LH\")\n", " # Set aspect ratio to be equal so that pie is drawn as a circle.\n", " a = ax.axis('equal')\n", " ax = fig.add_subplot(1,2,2)\n", " pie = ax.pie(reloccurUH, labels=labels, autopct='%1.1f%%', shadow=True)\n", " ax.set_title(\"Pattern occurrence UH\")\n", " # Set aspect ratio to be equal so that pie is drawn as a circle.\n", " a = ax.axis('equal')" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2018-12-06T16:13:12.889583Z", "start_time": "2018-12-06T16:13:11.122653Z" } }, "outputs": [], "source": [ "ho,eo,co,so = histCalOffsetCor.get()\n", "\n", "\n", "d = [{'x': co,\n", " 'y': ho,\n", " 'y_err': np.sqrt(ho[:]),\n", " 'drawstyle': 'steps-mid',\n", " 'errorstyle': 'bars',\n", " 'errorcoarsing': 2,\n", " 'label': 'Offset corr.'\n", " },\n", " \n", " ]\n", "\n", "if corr_bools.get(\"pattern_class\") and corr_bools.get(\"common_mode\"):\n", " hcm,ecm,ccm,scm = histCalCommonModeCor.get()\n", " d.append({'x': ccm,\n", " 'y': hcm,\n", " 'y_err': np.sqrt(hcm[:]),\n", " 'drawstyle': 'steps-mid',\n", " 'errorstyle': 'bars',\n", " 'errorcoarsing': 2,\n", " 'label': 'CommonMode corr.'\n", " })\n", "\n", "fig = xana.simplePlot(d, aspect=1, x_label='Energy(ADU)', \n", " y_label='Number of occurrences', figsize='2col',\n", " y_log=True, x_range=(-50,500),\n", " legend='top-center-frame-2col')" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2018-12-06T16:12:57.289742Z", "start_time": "2018-12-06T16:12:45.529734Z" } }, "outputs": [], "source": [ "if corr_bools.get(\"pattern_class\"):\n", " h1,e1L,c1L,s1L = histCalPcorr.get()\n", " h1s,e1Ls,c1Ls,s1Ls = histCalPcorrS.get()\n", "\n", "\n", " d = [\n", " {'x': c1L,\n", " 'y': h1,\n", " 'y_err': np.sqrt(h1[:]),\n", " 'drawstyle': 'steps-mid',\n", " 'label': 'Split event corrected'},\n", " {'x': c1Ls,\n", " 'y': h1s,\n", " 'y_err': np.sqrt(h1s[:]),\n", " 'drawstyle': 'steps-mid',\n", " 'label': 'Single pixel hits'}\n", " ]\n", "\n", "\n", " fig = xana.simplePlot(d, aspect=1, x_label='Energy(ADU)', \n", " y_label='Number of occurrences', figsize='2col',\n", " y_log=True, x_range=(0,200),x_log=False,\n", " legend='top-center-frame-2col')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Mean Image of first Sequence ##" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2018-12-06T16:11:08.317130Z", "start_time": "2018-12-06T16:11:05.788655Z" } }, "outputs": [], "source": [ "fig = xana.heatmapPlot(mean_im,\n", " x_label='Columns', y_label='Rows',\n", " lut_label='Signal (ADU)',\n", " x_range=(0,y),\n", " y_range=(0,x), vmin=-50, vmax=500)\n", "\n", "if corr_bools.get(\"pattern_class\"):\n", " fig = xana.heatmapPlot(mean_im_cc,\n", " x_label='Columns', y_label='Rows',\n", " lut_label='Signal (ADU)',\n", " x_range=(0,y),\n", " y_range=(0,x), vmin=-50, vmax=500)" ] }, { "cell_type": "markdown", "metadata": { "collapsed": true }, "source": [ "## Single Shot of first Sequnce ##" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2018-12-06T16:11:10.908912Z", "start_time": "2018-12-06T16:11:08.318486Z" } }, "outputs": [], "source": [ "fig = xana.heatmapPlot(single_im,\n", " x_label='Columns', y_label='Rows',\n", " lut_label='Signal (ADU)',\n", " x_range=(0,y),\n", " y_range=(0,x), vmin=-50, vmax=500)\n", "\n", "if corr_bools.get(\"pattern_class\"):\n", " fig = xana.heatmapPlot(single_im_cc,\n", " x_label='Columns', y_label='Rows',\n", " lut_label='Signal (ADU)',\n", " x_range=(0,y),\n", " y_range=(0,x), vmin=-50, vmax=500)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.6.7" }, "latex_envs": { "LaTeX_envs_menu_present": true, "autocomplete": true, "bibliofile": "biblio.bib", "cite_by": "apalike", "current_citInitial": 1, "eqLabelWithNumbers": true, "eqNumInitial": 1, "hotkeys": { "equation": "Ctrl-E", "itemize": "Ctrl-I" }, "labels_anchors": false, "latex_user_defs": false, "report_style_numbering": false, "user_envs_cfg": false } }, "nbformat": 4, "nbformat_minor": 1 }