diff --git a/cfel_cxi.py b/cfel_cxi.py
deleted file mode 100644
index 99d36b489c4a10876c27f468ff76995c14b0358c..0000000000000000000000000000000000000000
--- a/cfel_cxi.py
+++ /dev/null
@@ -1,575 +0,0 @@
-#    This file is part of cfelpyutils.
-#
-#    cfelpyutils is free software: you can redistribute it and/or modify
-#    it under the terms of the GNU General Public License as published by
-#    the Free Software Foundation, either version 3 of the License, or
-#    (at your option) any later version.
-#
-#    cfelpyutils is distributed in the hope that it will be useful,
-#    but WITHOUT ANY WARRANTY; without even the implied warranty of
-#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-#    GNU General Public License for more details.
-#
-#    You should have received a copy of the GNU General Public License
-#    along with cfelpyutils.  If not, see <http://www.gnu.org/licenses/>.
-"""
-Utilities for writing multi-event files in the CXIDB format.
-
-This module contains utilities to write files that adhere to the CXIDB file format:
-
-http://www.cxidb.org/cxi.html .
-"""
-
-from __future__ import (absolute_import, division, print_function,
-                        unicode_literals)
-
-from collections import namedtuple
-
-import h5py
-import numpy
-
-
-_CXISimpleEntry = namedtuple('SimpleEntry', ['path', 'data', 'overwrite'])
-
-
-def _assign_data_type(data):
-    if isinstance(data, numpy.ndarray):
-        data_type = data.dtype
-    elif isinstance(data, bytes):
-        data_type = numpy.dtype('S256')
-    else:
-        data_type = type(data)
-
-    return data_type
-
-
-class _Stack:
-    def __init__(self, path, data, axes, compression, chunk_size):
-
-        self._data_type = _assign_data_type(data)
-
-        if isinstance(data, (bytes, int, float)):
-            self._data_shape = ()
-        else:
-            self._data_shape = data.shape
-
-        self._data_to_write = data
-        self.path = path
-        self._axes = axes
-        self._compression = compression
-
-        if chunk_size is None:
-            self._chunk_size = (1,) + self._data_shape
-        else:
-            self._chunk_size = chunk_size
-
-    def is_there_data_to_write(self):
-        return self._data_to_write is not None
-
-    def write_initial_slice(self, file_handle, max_num_slices):
-
-        file_handle.create_dataset(self.path, shape=(max_num_slices,) + self._data_shape,
-                                   dtype=self._data_type,
-                                   maxshape=(max_num_slices,) + self._data_shape,
-                                   compression=self._compression, chunks=self._chunk_size)
-
-        dataset = file_handle[self.path]
-        dataset[0] = self._data_to_write
-
-        if self._axes is not None:
-            file_handle[self.path].attrs['axes'] = self._axes
-
-        self._data_to_write = None
-
-    def write_slice(self, file_handle, curr_slice):
-
-        file_handle[self.path][curr_slice] = self._data_to_write
-
-        self._data_to_write = None
-
-    def append_data(self, data):
-
-        if self._data_to_write is not None:
-            raise RuntimeError('Cannot append data to the stack entry at {}. The previous slice has not been written '
-                               'yet.'.format(self.path))
-
-        data_type = _assign_data_type(data)
-
-        if data_type != self._data_type:
-            raise RuntimeError('The type of the input data does not match what is already present in the stack.')
-
-        if isinstance(data, (bytes, int, float)):
-            curr_data_shape = ()
-        else:
-            curr_data_shape = data.shape
-
-        if curr_data_shape != self._data_shape:
-            raise RuntimeError('The shape of the input data does not match what is already present in the stack.')
-
-        self._data_to_write = data
-
-    def finalize(self, file_handle, curr_slice):
-
-        if self._data_to_write is not None:
-            raise RuntimeError('Cannot finalize the stack at {}, there is data waiting to be '
-                               'written.'.format(self.path))
-
-        final_size = curr_slice
-
-        file_handle[self.path].resize((final_size,) + self._data_shape)
-
-
-def _validate_data(data):
-    if not isinstance(data, (bytes, int, float, numpy.ndarray)):
-        raise RuntimeError('The CXI Writer only accepts numpy objects, numbers and ascii strings.')
-
-
-class CXIWriter:
-    """Writing of multi-event CXIDB files.
-
-    Implements a simple low-level CXIDB file format writer for multi event files. it allows the user to write data
-    "stacks" in the CXIDB files, making sure that the entries in all stacks are synchronized.
-
-    A CXI Writer instance manages one file. A user can add a stack to a CXI Writer instance with the
-    add_stack_to_writer function, which also writes the first entry in the stack. The user can then add to the writer
-    all the stacks that he wants in the file. Once all stacks are added, the user initializes them  with the
-    initialize_stacks function. After initialization, no more stacks can be added. Instead, entries can be appended to
-    the existing stacks, using the append_data_to_stack function.
-
-    A "slice" (a set of synced entries in all the stacks in the file) can be written to the a file only after an entry
-    has been appended to all stacks in the file. Conversely, after an entry has been appended to a stack, the user
-    cannot append another entry before a slice is written. This ensures synchronization of the data in all the stacks.
-
-    A file can be closed at any time. In any case, the writer will not allow a file to contain more than the
-    number_of_entries specified during instantiation.
-
-    Simple non-stack entries can be written to the file at any time, before or after stack initialization (provided of
-    course that the file is open). Entries and stacks will general never be overwritten unless the overwrite parameter
-    is set to True.
-
-    Example of usage of the stack API:
-
-    c1 = 0
-    c2 = 0
-
-    f1 = CXIWriter('test1.h5', )
-    f2 = CXIWriter('test2.h5', )
-
-    f1.add_stack_to_writer('detector1', '/entry_1/detector_1/data', numpy.random.rand(2, 2),
-                           'frame:y:x')
-    f2.add_stack_to_writer('detector2', '/entry_1/detector_1/data', numpy.random.rand(3, 2),
-                           'frame:y:x', compression=False, chunk_size=(1,3,2))
-
-    f1.add_stack_to_writer('counter1', '/entry_1/detector_1/count', c1)
-    f2.add_stack_to_writer('counter2', '/entry_1/detector_1/count', c2)
-
-    f1.write_simple_entry('detectorname1', '/entry_1/detector_1/name', 'FrontCSPAD')
-    f2.write_simple_entry('detectorname2', '/entry_1/detector_1/name', 'BackCSPAD')
-
-    f1.initialize_stacks()
-    f2.initialize_stacks()
-
-    a = numpy.random.rand(2, 2)
-    b = numpy.random.rand(3, 2)
-
-    c1 += 1
-    c2 += 2
-
-    f1.append_data_to_stack('detector1', a)
-    f2.append_data_to_stack('detector2', b)
-
-    f1.append_data_to_stack('counter1', c1)
-    f2.append_data_to_stack('counter2', c2)
-
-    f1.write_stack_slice_and_increment()
-    f2.write_stack_slice_and_increment()
-
-    f1.create_link('detectorname1', '/name')
-    f2.create_link('detectorname2', '/name')
-
-    f1.close_file()
-    f2.close_file()
-    """
-
-    def __init__(self, filename, max_num_slices=5000):
-        """Instantiates a CXI Writer, managing one file.
-
-        Instantiates a CXI Writer, responsible for writing data into one file.
-
-        Args:
-
-            filename (str): name of the file managed by the CXI Writer
-
-            max_num_slices (int): maximum number of slices for the stacks in the file (default 5000)
-        """
-
-        self._cxi_stacks = {}
-        self._pending_simple_entries = []
-        self._simple_entries = {}
-        self._intialized = False
-        self._curr_slice = 0
-        self._max_num_slices = max_num_slices
-        self._file_is_open = False
-        self._initialized = False
-
-        try:
-            self._fh = h5py.File(filename, 'w')
-            self._file_is_open = True
-
-        except OSError:
-            raise RuntimeError('Error opening the cxi file: ', filename)
-
-    def _write_simple_entry(self, entry):
-
-        if entry.path in self._fh:
-            if entry.overwrite is True:
-                del self._fh[entry.path]
-            else:
-                raise RuntimeError('Cannot write the entry. Data is already present at the specified path.')
-
-        self._fh.create_dataset(entry.path, data=entry.data)
-
-    def add_stack_to_writer(self, name, path, initial_data, axes=None, compression=True, chunk_size=None,
-                            overwrite=True):
-        """Adds a new stack to the file.
-
-        Adds a new stack to the CXI Writer instance. The user must provide a name for the stack, that will identify
-        the stack in all subsequents operations. The user must also provide the data that will be written as the
-        initial entry in the stack (initial_data). This initial entry is used to set the size and type of data that the
-        stack will hold and these parameters are in turn be used to validate all data that is subsequently appended to
-        the stack.
-
-        Args:
-
-            name (str): stack name.
-
-            path (str): path in the hdf5 file where the stack will be written.
-
-            initial_data (Union[numpy.ndarray, bytes, int, float]: initial entry in the stack. It gets written to the
-            stack as slice 0. Its characteristics are used to validate all data subsequently appended to the stack.
-
-            axes (bytes): the 'axes' attribute for the stack, as defined by the CXIDB file format.
-
-            compression (Union[None, bool,str]): compression parameter for the stack. This parameters works in the same
-            way as the normal compression parameter from h5py. The default value of this parameter is True.
-
-            chunk_size (Union[None, tuple]): HDF5 chuck size for the stack. If this parameter is set to None, the
-            CXI writer will compute a chuck size automatically (this is the default behavior). Otherwise, the writer
-            will use the provided tuple to set the chunk size.
-
-            overwrite (bool): if set to True, a stack already existing at the same location will be overwritten. If set
-            to False, an attempt to overwrite a stack will raise an error.
-        """
-
-        _validate_data(initial_data)
-
-        if name in self._cxi_stacks:
-            raise RuntimeError('A stack with the provided name already exists.')
-
-        if self._initialized is True:
-            raise RuntimeError('Adding stacks to the writer is not possible after initialization.')
-
-        for entry in self._cxi_stacks:
-            if path == self._cxi_stacks[entry].path:
-                if overwrite is True:
-                    del self._cxi_stacks[entry]
-                else:
-                    raise RuntimeError('Cannot write the entry. Data is already present at the specified path.')
-
-        new_stack = _Stack(path, initial_data, axes, compression, chunk_size)
-        self._cxi_stacks[name] = new_stack
-
-    def write_simple_entry(self, name, path, data, overwrite=False):
-        """Writes a simple, non-stack entry in the file.
-
-        Writes a simple, non-stack entry in the file, at the specified path. A simple entry can be written at all times,
-        before or after the stack initialization. The user must provide a name that identifies the entry for further
-        operations (for example, creating a link).
-
-        Args:
-
-            name (str): entry name
-
-            path (str): path in the hdf5 file where the entry will be written.
-
-            data (Union[numpy.ndarray, bytes, int, float]): data to write
-
-            overwrite (bool): if set to True, an entry already existing at the same location will be overwritten. If set
-            to False, an attempt to overwrite an entry will raise an error.
-        """
-
-        _validate_data(data)
-
-        if name in self._simple_entries:
-            raise RuntimeError('An entry with the provided name already exists.')
-
-        if path in self._fh:
-            if overwrite is True:
-                del self._fh[path]
-            else:
-                raise RuntimeError('Cannot create the the entry. An entry already exists at the specified path.')
-
-        new_entry = _CXISimpleEntry(path, data, overwrite)
-
-        if self._initialized is not True:
-            self._pending_simple_entries.append(new_entry)
-        else:
-            self._write_simple_entry(new_entry)
-
-        self._simple_entries[name] = new_entry
-
-    def create_link(self, name, path, overwrite=False):
-        """Creates a link to a stack or entry.
-
-        Creates a link in the file, at the path specified, pointing to the stack or the entry identified by the
-        provided name. If a link or entry already exists at the specified path, it is deleted and replaced only if the
-        value of the overwrite parameter is True.
-
-        Args:
-
-            name (str): name of the stack or entry to which the link points.
-
-            path (str): path in the hdf5 where the link is created.
-
-            overwrite (bool): if set to True, an entry already existing at the same location will be overwritten. If set
-            to False, an attempt to overwrite an entry will raise an error.
-        """
-
-        if path in self._fh:
-            if overwrite is True:
-                del self._fh[path]
-            else:
-                raise RuntimeError('Cannot create the link. An entry already exists at the specified path.')
-
-        try:
-            link_target = self._fh[self._cxi_stacks[name].path]
-        except KeyError:
-            try:
-                link_target = self._fh[self._simple_entries[name].path]
-            except:
-                raise RuntimeError('Cannot find an entry or stack with the proveded name.')
-
-        self._fh[path] = link_target
-
-    def create_link_to_group(self, group, path, overwrite=False):
-        """Creates a link to an HDF5 group.
-
-        Creates a link to an HDF5 group (as opposed to a simple entry or stack). If a link or entry already exists at
-        the specified path, it is deleted and replaced only if the value of the overwrite parameter is True.
-
-        Args:
-
-            group (str): internal HDF5 path of the group to which the link points.
-
-            path (str): path in the hdf5 where the link is created.
-
-            overwrite (bool): if set to True, an entry already existing at the same location will be overwritten. If set
-            to False, an attempt to overwrite an entry will raise an error.
-        """
-
-        if path in self._fh:
-            if overwrite is True:
-                del self._fh[path]
-            else:
-                raise RuntimeError('Cannot create the link. An entry already exists at the specified path.')
-
-        try:
-            link_target = self._fh[group]
-        except KeyError:
-            raise RuntimeError('Cannot create the link. The group to which the link points does not exist.')
-
-        self._fh[path] = link_target
-
-
-    def create_softlink_to_group(self, group, path, overwrite=False):
-        """Creates a link to an HDF5 group.
-        
-        Creates a soft link to an HDF5 group (as opposed to a simple entry or stack). If a link or entry already exists at
-        the specified path, it is deleted and replaced only if the value of the overwrite parameter is True.
-
-        Args: 
-
-            group (str): internal HDF5 path of the group to which the link points.
-        
-            path (str): path in the hdf5 where the link is created.
-             
-            overwrite (bool): if set to True, an entry already existing at the same location will be overwritten. If set
-            to False, an attempt to overwrite an entry will raise an error.
-        """
-
-        if path in self._fh:
-            if overwrite is True:
-                del (self._fh[path])
-            else:
-                raise RuntimeError('Cannot create the link. An entry already exists at the specified path.')
-
-        self._fh[path] = h5py.SoftLink(group)
-
-    def initialize_stacks(self):
-        """Initializes the stacks.
-
-        Initializes the stacks in the CXI Writer instance. This fixes the number and type of stacks in the file. No
-        stacks can be added to the CXI Writer after initialization.
-        """
-
-        if self._file_is_open is not True:
-            raise RuntimeError('The file is closed. Cannot initialize the file.')
-
-        if self._initialized is True:
-            raise RuntimeError('The file is already initialized. Cannot initialize file.')
-
-        for entry in self._cxi_stacks.values():
-            entry.write_initial_slice(self._fh, self._max_num_slices)
-
-        self._curr_slice += 1
-
-        for entry in self._pending_simple_entries:
-            self._write_simple_entry(entry)
-        self._pending_simple_entries = []
-
-        self._initialized = True
-
-    def append_data_to_stack(self, name, data):
-        """Appends data to a stack.
-
-        Appends data to a stack, validating the data to make sure that the data type and size match the previous entries
-        in the stack. Only one entry can be appended to each stack before writing a slice across all stacks with
-        the write_slice_and_increment.
-
-        Args:
-
-            name (str): stack name, defining the stack to which the data will be appended.
-
-            data (Union[numpy.ndarray, bytes, int, float]: data to write. The data will be validated against the type
-            and size of previous entries in the stack.
-        """
-
-        _validate_data(data)
-
-        if self._initialized is False:
-            raise RuntimeError('Cannot append to a stack before initialization of the file.')
-
-        if name not in self._cxi_stacks:
-            raise RuntimeError('Cannot append to stack {}. The stack does not exists.'.format(name))
-
-        try:
-            self._cxi_stacks[name].append_data(data)
-        except RuntimeError as e:
-            raise RuntimeError('Error appending to stack {}: {}'.format(name, e))
-
-    def write_stack_slice_and_increment(self):
-        """Writes a slice across all stacks and resets the writer for the next slice.
-
-        Writes a slice across all stacks in the file. It checks that an entry has been appended to each stack, and
-        writes all the entries on top of the relevant stacks in one go. If an entry is missing in a stack, the function
-        will raise an error. After writing the slice, the function resets the writer to allow again appending data to
-        the stacks.
-        """
-
-        if self._file_is_open is not True:
-            raise RuntimeError('The file is closed. The slice cannot be written.')
-
-        if self._initialized is False:
-            raise RuntimeError('Cannot write slice. The file is not initialized.')
-
-        if self._curr_slice >= self._max_num_slices:
-            raise RuntimeError('The file already holds the maximum allowed number of slices, and should be closed')
-
-        for entry in self._cxi_stacks.values():
-            if entry.is_there_data_to_write is False:
-                raise RuntimeError('The slice is incomplete and will not be written. The following stack is not '
-                                   'present in the current slice:', entry.path)
-
-        for entry in self._cxi_stacks.values():
-            entry.write_slice(self._fh, self._curr_slice)
-
-        self._curr_slice += 1
-
-    def get_file_handle(self):
-        """Access to the naked h5py file handle.
-
-        This function allows access to the a naked h5py handle for the file managed by the CXI Writer. This allowa
-        operations on the file that are not covered by  CXI Writer API. Use it at your own risk.
-
-        Returns:
-
-            fh (h5py.File): an h5py file handle to the file managed by the writer.
-        """
-
-        if self._file_is_open is not True:
-            raise RuntimeError('The file is closed. Cannot get the file handle.')
-
-        return self._fh
-
-    def stacks_are_initialized(self):
-        """Checks if stacks are initialized.
-
-        Checks the status of the stacks in the file and returns the status to the user.
-
-        Returns:
-
-            status (bool): True if the stacks are initialized, False otherwise
-        """
-
-        return self._initialized
-
-    def is_entry_in_file(self, path):
-        """Checks if an entry is already present in the file.
-
-        Checks if an entry is already present in the file at the path provided by the user. It will return True if
-        either a dataset or a group are present at the specified path
-
-        Args:
-
-            path (str): the path where to check for a dataset or group
-
-        Results:
-
-            ret (bool): True if a group or dataset can be found in the file, False otherwise
-
-        """
-
-        return path in self._fh
-
-    def file_is_full(self):
-        """Checks if the file is full.
-
-        Checks if the file is full (i.e. the maximum number of slices have already been written), and returns the
-        information to the user.
-
-        Returns:
-
-            status (bool): True if the file is full, False otherwise.
-        """
-
-        return self._curr_slice >= self._max_num_slices
-
-    def num_slices_in_file(self):
-        """Returns the number of slices already written in the file
-
-        Returns the number of slices that have already been written in the file.
-
-        Returns:
-
-            status (num_slices): number of writter slices
-        """
-
-        num_slices = self._curr_slice - 1
-
-        return num_slices
-
-    def close_file(self):
-        """Closes the file.
-
-        Closes the file for writing, ending all writing operations.
-        """
-
-        if self._file_is_open is not True:
-            raise RuntimeError('The file is already closed. Cannot close the file.')
-
-        for entry in self._cxi_stacks.values():
-            entry.finalize(self._fh, self._curr_slice)
-
-        self._fh.close()
-
-        self._file_is_open = False
diff --git a/cfel_fabio.py b/cfel_fabio.py
deleted file mode 100644
index eac468bccf0c46d75a580ddf2b9ee17c1b6d1ad2..0000000000000000000000000000000000000000
--- a/cfel_fabio.py
+++ /dev/null
@@ -1,86 +0,0 @@
-#    This file is part of cfelpyutils.
-#
-#    cfelpyutils is free software: you can redistribute it and/or modify
-#    it under the terms of the GNU General Public License as published by
-#    the Free Software Foundation, either version 3 of the License, or
-#    (at your option) any later version.
-#
-#    cfelpyutils is distributed in the hope that it will be useful,
-#    but WITHOUT ANY WARRANTY; without even the implied warranty of
-#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-#    GNU General Public License for more details.
-#
-#    You should have received a copy of the GNU General Public License
-#    along with cfelpyutils.  If not, see <http://www.gnu.org/licenses/>.
-"""
-Utilities based on the fabio python module.
-
-This module contains utilities based on the fabio python module.
-files.
-"""
-
-from __future__ import (absolute_import, division, print_function,
-                        unicode_literals)
-
-import fabio.cbfimage
-import numpy
-
-
-def read_cbf_from_stream(stream):
-    """Reads a cbfimage object out of a data string buffer.
-
-    Read a data string buffer received as a payload from the PETRAIII P11 sender, and creates a cbfimage object from
-    it (See the documentation of the fabio python module).
-
-    Args:
-
-       stream (str): a data string buffer received from the PETRAIII P11 sender.
-
-    Returns:
-
-        cbf_obj (fabio.cbfimage): a cbfimage object containing the data extracted
-            from the string buffer.
-    """
-
-    cbf_obj = fabio.cbfimage.cbfimage()
-
-    cbf_obj.header = {}
-    cbf_obj.resetvals()
-
-    infile = stream
-    cbf_obj._readheader(infile)
-    if cbf_obj.CIF_BINARY_BLOCK_KEY not in cbf_obj.cif:
-        err = "Not key %s in CIF, no CBF image in stream" % fabio.cbfobj.CIF_BINARY_BLOCK_KEY
-        logger.error(err)
-        for kv in cbf_obj.cif.items():
-            print("%s: %s" % kv)
-        raise RuntimeError(err)
-    if cbf_obj.cif[cbf_obj.CIF_BINARY_BLOCK_KEY] == "CIF Binary Section":
-        cbf_obj.cbs += infile.read(len(cbf_obj.STARTER) + int(cbf_obj.header["X-Binary-Size"]) -
-                                   len(cbf_obj.cbs) + cbf_obj.start_binary)
-    else:
-        if len(cbf_obj.cif[cbf_obj.CIF_BINARY_BLOCK_KEY]) > int(
-                cbf_obj.header["X-Binary-Size"]) + cbf_obj.start_binary + len(cbf_obj.STARTER):
-            cbf_obj.cbs = cbf_obj.cif[cbf_obj.CIF_BINARY_BLOCK_KEY][:int(cbf_obj.header["X-Binary-Size"]) +
-                                                                    cbf_obj.start_binary +
-                                                                    len(cbf_obj.STARTER)]
-        else:
-            cbf_obj.cbs = cbf_obj.cif[cbf_obj.CIF_BINARY_BLOCK_KEY]
-    binary_data = cbf_obj.cbs[cbf_obj.start_binary + len(cbf_obj.STARTER):]
-
-    if "Content-MD5" in cbf_obj.header:
-        ref = numpy.string_(cbf_obj.header["Content-MD5"])
-        obt = fabio.cbfimage.md5sum(binary_data)
-        if ref != obt:
-            logger.error("Checksum of binary data mismatch: expected %s, got %s" % (ref, obt))
-
-    if cbf_obj.header["conversions"] == "x-CBF_BYTE_OFFSET":
-        cbf_obj.data = cbf_obj._readbinary_byte_offset(binary_data).astype(cbf_obj.bytecode).reshape(
-            (cbf_obj.dim2, cbf_obj.dim1))
-    else:
-        raise Exception(IOError, "Compression scheme not yet supported, please contact the author")
-
-    cbf_obj.resetvals()
-    # ensure the PIL image is reset
-    cbf_obj.pilimage = None
-    return cbf_obj
diff --git a/cfel_geom.py b/cfel_geom.py
deleted file mode 100644
index 4a864829384625296857ffdf57cd132c3f87c93f..0000000000000000000000000000000000000000
--- a/cfel_geom.py
+++ /dev/null
@@ -1,210 +0,0 @@
-#    This file is part of cfelpyutils.
-#
-#    cfelpyutils is free software: you can redistribute it and/or modify
-#    it under the terms of the GNU General Public License as published by
-#    the Free Software Foundation, either version 3 of the License, or
-#    (at your option) any later version.
-#
-#    cfelpyutils is distributed in the hope that it will be useful,
-#    but WITHOUT ANY WARRANTY; without even the implied warranty of
-#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-#    GNU General Public License for more details.
-#
-#    You should have received a copy of the GNU General Public License
-#    along with cfelpyutils.  If not, see <http://www.gnu.org/licenses/>.
-"""
-Utilities for CrystFEL-style geometry files.
-
-This module contains utilities for the processing of CrystFEL-style geometry
-files.
-"""
-
-from __future__ import (absolute_import, division, print_function,
-                        unicode_literals)
-
-from collections import namedtuple
-
-import numpy
-
-from cfelpyutils.cfel_crystfel import load_crystfel_geometry
-
-PixelMaps = namedtuple('PixelMaps', ['x', 'y', 'r'])
-ImageShape = namedtuple('ImageShape', ['ss', 'fs'])
-
-
-def _find_minimum_image_shape(x, y):
-    # find the smallest size of cspad_geom that contains all
-    # xy values but is symmetric about the origin
-    n = 2 * int(max(abs(y.max()), abs(y.min()))) + 2
-    m = 2 * int(max(abs(x.max()), abs(x.min()))) + 2
-
-    return n, m
-
-
-def apply_geometry_from_file(data_as_slab, geometry_filename):
-    """Parses a geometry file and applies the geometry to data.
-
-    Parses a geometry file and applies the geometry to detector data in 'slab' format. Turns a 2d array of pixel
-    values into an array containing a representation of the physical layout of the detector, keeping the origin of
-    the reference system at the beam interaction point.
-
-    Args:
-
-        data_as_slab (numpy.ndarray): the pixel values to which geometry is to be applied.
-
-        geometry_filename (str): geometry filename.
-
-    Returns:
-
-        im_out (numpy.ndarray data_as_slab.dtype): Array containing a representation of the physical layout of the
-        detector, with the origin of the  reference system at the beam interaction point.
-    """
-
-    x, y, _, img_shape = pixel_maps_for_image_view(geometry_filename)
-    im_out = numpy.zeros(img_shape, dtype=data_as_slab.dtype)
-
-    im_out[y, x] = data_as_slab.ravel()
-    return im_out
-
-
-def apply_geometry_from_pixel_maps(data_as_slab, y, x, im_out=None):
-    """Applies geometry in pixel map format to data.
-
-    Applies geometry, in the form of pixel maps, to detector data in 'slab' format. Turns a 2d array of pixel values
-    into an array containing a representation of the physical layout of the detector, keeping the origin of the
-    reference system at the beam interaction point.
-
-    Args:
-
-        data_as_slab (numpy.ndarray): the pixel values to which geometry is to be applied.
-
-        y (numpy.ndarray): the y pixel map describing the geometry of the detector
-
-        x (numpy.ndarray): the x pixel map describing the geometry of the detector
-
-        im_out (Optional[numpy.ndarray]): array to hold the output; if not provided, one will be generated
-        automatically.
-
-    Returns:
-
-        im_out (numpy.ndarray data_as_slab.dtype): Array containing a representation of the physical layout of the
-        detector, with the origin of the  reference system at the beam interaction point.
-    """
-
-    if im_out is None:
-        im_out = numpy.zeros(data_as_slab.shape, dtype=data_as_slab.dtype)
-
-    im_out[y, x] = data_as_slab.ravel()
-    return im_out
-
-
-def pixel_maps_for_image_view(geometry_filename):
-    """Parses a geometry file and creates pixel maps for pyqtgraph visualization.
-
-    Parses the geometry file and creates pixel maps for an  array in 'slab' format containing pixel values. The pixel
-    maps can be used to create a representation of the physical layout of the detector in a pyqtgraph ImageView
-    widget (i.e. they apply the detector geometry setting the origin of the reference system is in the top left corner
-    of the output array). The representation is centered at the point where the beam hits the detector according to
-    the geometry in the file.
-
-    Args:
-
-        geometry_filename (str): geometry filename.
-
-    Returns:
-
-        x (numpy.ndarray int): pixel map for x coordinate
-
-        y (numpy.ndarray int): pixel map for x coordinate
-    """
-
-    pixm = pixel_maps_from_geometry_file(geometry_filename)
-    x, y = pixm.x, pixm.y
-
-    n, m = _find_minimum_image_shape(x, y)
-
-    # convert y x values to i j values
-    i = numpy.array(y, dtype=numpy.int) + n // 2 - 1
-    j = numpy.array(x, dtype=numpy.int) + m // 2 - 1
-
-    y = i.flatten()
-    x = j.flatten()
-
-    return PixelMaps(x, y, None)
-
-
-def get_image_shape(geometry_filename):
-    """Parses a geometry file and returns the minimum size of an image that can represent the detector.
-
-    Parses the geometry file and return a numpy shape object representing the minimum size of an image that
-    can contain the physical representation of the detector. The representation is centered at the point where the beam
-    hits the detector according to the geometry in the file.
-
-    Args:
-
-        geometry_filename (str): geometry filename.
-
-    Returns:
-
-        img_shape tuple (int, int): shape of the array needed to contain the representation of the physical layout
-        of the detector.
-    """
-
-    pixm = pixel_maps_from_geometry_file(geometry_filename)
-    x, y = pixm.x, pixm.y
-
-    n, m = _find_minimum_image_shape(x, y)
-
-    img_shape = ImageShape(n, m)
-    return img_shape
-
-
-def pixel_maps_from_geometry_file(fnam):
-    """Parses a geometry file and creates pixel maps.
-
-    Extracts pixel maps from a CrystFEL-style geometry file. The pixel maps can be used to create a representation of
-    the physical layout of the detector, keeping the origin of the  reference system at the beam interaction
-    point.
-
-    Args:
-
-        fnam (str): geometry filename.
-
-    Returns:
-
-        x,y,r (numpy.ndarray float, numpy.ndarray float, numpy.ndarray float): slab-like pixel maps with
-        respectively x, y coordinates of the pixel and distance of the pixel from the center of the reference system.
-    """
-
-    detector = load_crystfel_geometry(fnam)
-
-    max_slab_fs = numpy.array([detector['panels'][k]['max_fs'] for k in detector['panels']]).max()
-    max_slab_ss = numpy.array([detector['panels'][k]['max_ss'] for k in detector['panels']]).max()
-
-    x = numpy.zeros((max_slab_ss + 1, max_slab_fs + 1), dtype=numpy.float32)
-    y = numpy.zeros((max_slab_ss + 1, max_slab_fs + 1), dtype=numpy.float32)
-
-    for p in detector['panels']:
-        # get the pixel coords for this asic
-        i, j = numpy.meshgrid(
-            numpy.arange(detector['panels'][p]['max_ss'] - detector['panels'][p]['min_ss'] + 1),
-            numpy.arange(detector['panels'][p]['max_fs'] - detector['panels'][p]['min_fs'] + 1),
-            indexing='ij'
-        )
-
-        # make the y-x ( ss, fs ) vectors, using complex notation
-        dx = detector['panels'][p]['fsy'] + 1J * detector['panels'][p]['fsx']
-        dy = detector['panels'][p]['ssy'] + 1J * detector['panels'][p]['ssx']
-        r_0 = detector['panels'][p]['cny'] + 1J * detector['panels'][p]['cnx']
-
-        r = i * dy + j * dx + r_0
-
-        y[detector['panels'][p]['min_ss']: detector['panels'][p]['max_ss'] + 1,
-          detector['panels'][p]['min_fs']: detector['panels'][p]['max_fs'] + 1] = r.real
-
-        x[detector['panels'][p]['min_ss']: detector['panels'][p]['max_ss'] + 1,
-          detector['panels'][p]['min_fs']: detector['panels'][p]['max_fs'] + 1] = r.imag
-
-    r = numpy.sqrt(numpy.square(x) + numpy.square(y))
-
-    return PixelMaps(x, y, r)
diff --git a/cfel_optarg.py b/cfel_optarg.py
deleted file mode 100644
index f85245f10d9900152c96e82214c5da34b41c69dd..0000000000000000000000000000000000000000
--- a/cfel_optarg.py
+++ /dev/null
@@ -1,116 +0,0 @@
-#    This file is part of cfelpyutils.
-#
-#    cfelpyutils is free software: you can redistribute it and/or modify
-#    it under the terms of the GNU General Public License as published by
-#    the Free Software Foundation, either version 3 of the License, or
-#    (at your option) any later version.
-#
-#    cfelpyutils is distributed in the hope that it will be useful,
-#    but WITHOUT ANY WARRANTY; without even the implied warranty of
-#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-#    GNU General Public License for more details.
-#
-#    You should have received a copy of the GNU General Public License
-#    along with cfelpyutils.  If not, see <http://www.gnu.org/licenses/>.
-"""
-Utilities for parsing command line options and configuration files.
-
-This module contains utilities for parsing of command line options and
-configuration files.
-"""
-
-from __future__ import (absolute_import, division, print_function,
-                        unicode_literals)
-
-import ast
-
-
-def parse_parameters(config):
-    """Sets correct types for parameter dictionaries.
-
-    Reads a parameter dictionary returned by the ConfigParser python module, and assigns correct types to parameters,
-    without changing the structure of the dictionary.
-
-    The parser tries to interpret each entry in the dictionary according to the following rules:
-
-    - If the entry starts and ends with a single quote or double quote, it is
-      interpreted as a string.
-    - If the entry starts and ends with a square bracket, it is interpreted as a list.
-    - If the entry starts and ends with a brace, it is interpreted as a dictionary.
-    - If the entry is the word None, without quotes, then the entry is
-      interpreted as NoneType.
-    - If the entry is the word False, without quotes, then the entry is
-      interpreted as a boolean False.
-    - If the entry is the word True, without quotes, then the entry is
-      interpreted as a boolean True.
-    - If none of the previous options match the content of the entry,
-      the parser tries to interpret the entry in order as:
-
-        - An integer number.
-        - A float number.
-        - A string.
-
-      The first choice that succeeds determines the entry type.
-
-    Args:
-
-        config (class RawConfigParser): ConfigParser instance.
-
-    Returns:
-
-        monitor_params (dict): dictionary with the same structure as the input dictionary, but with correct types
-        assigned to each entry.
-    """
-
-    monitor_params = {}
-
-    for sect in config.sections():
-        monitor_params[sect] = {}
-        for op in config.options(sect):
-            monitor_params[sect][op] = config.get(sect, op)
-            if monitor_params[sect][op].startswith("'") and monitor_params[sect][op].endswith("'"):
-                monitor_params[sect][op] = monitor_params[sect][op][1:-1]
-                continue
-            if monitor_params[sect][op].startswith('"') and monitor_params[sect][op].endswith('"'):
-                monitor_params[sect][op] = monitor_params[sect][op][1:-1]
-                continue
-            if monitor_params[sect][op].startswith("[") and monitor_params[sect][op].endswith("]"):
-                try:
-                    monitor_params[sect][op] = ast.literal_eval(config.get(sect, op))
-                    continue
-                except (SyntaxError, ValueError):
-                    raise RuntimeError('Error parsing parameter {0} in section [{1}]. Make sure that the syntax is '
-                                       'correct: list elements must be separated by commas and dict entries must '
-                                       'contain the colon symbol. Strings must be quoted, even in lists and '
-                                       'dicts.'.format(op, sect))
-            if monitor_params[sect][op].startswith("{") and monitor_params[sect][op].endswith("}"):
-                try:
-                    monitor_params[sect][op] = ast.literal_eval(config.get(sect, op))
-                    continue
-                except (SyntaxError, ValueError):
-                    raise RuntimeError('Error parsing parameter {0} in section [{1}]. Make sure that the syntax is '
-                                       'correct: list elements must be separated by commas and dict entries must '
-                                       'contain the colon symbol. Strings must be quoted, even in lists and '
-                                       'dicts.'.format(op, sect))
-            if monitor_params[sect][op] == 'None':
-                monitor_params[sect][op] = None
-                continue
-            if monitor_params[sect][op] == 'False':
-                monitor_params[sect][op] = False
-                continue
-            if monitor_params[sect][op] == 'True':
-                monitor_params[sect][op] = True
-                continue
-            try:
-                monitor_params[sect][op] = int(monitor_params[sect][op])
-                continue
-            except ValueError:
-                try:
-                    monitor_params[sect][op] = float(monitor_params[sect][op])
-                    continue
-                except ValueError:
-                    raise RuntimeError('Error parsing parameter {0} in section [{1}]. Make sure that the syntax is '
-                                       'correct: list elements must be separated by commas and dict entries must '
-                                       'contain the colon symbol. Strings must be quoted, even in lists and '
-                                       'dicts.'.format(op, sect))
-    return monitor_params
diff --git a/cfel_psana.py b/cfel_psana.py
deleted file mode 100644
index 736db618d62cd65b053e29177f5cd77ea9d8d774..0000000000000000000000000000000000000000
--- a/cfel_psana.py
+++ /dev/null
@@ -1,113 +0,0 @@
-#    This file is part of cfelpyutils.
-#
-#    cfelpyutils is free software: you can redistribute it and/or modify
-#    it under the terms of the GNU General Public License as published by
-#    the Free Software Foundation, either version 3 of the License, or
-#    (at your option) any later version.
-#
-#    cfelpyutils is distributed in the hope that it will be useful,
-#    but WITHOUT ANY WARRANTY; without even the implied warranty of
-#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-#    GNU General Public License for more details.
-#
-#    You should have received a copy of the GNU General Public License
-#    along with cfelpyutils.  If not, see <http://www.gnu.org/licenses/>.
-"""
-Utilities based on the psana python module.
-
-This module provides utilities that build on the functionality provided by the psana python module.
-"""
-
-from __future__ import (absolute_import, division, print_function,
-                        unicode_literals)
-
-
-def psana_obj_from_string(name):
-    """Converts a string into a psana object type.
-
-    Takes a string and returns the python object type described by the string.
-
-    Args:
-
-        name (str): a string describing a python type.
-
-    Returns:
-
-        mod (type): the python type described by the string.
-    """
-
-    components = name.split('.')
-    mod = __import__(components[0])
-    for comp in components[1:]:
-        mod = getattr(mod, comp)
-    return mod
-
-
-def psana_event_inspection(source):
-    """Prints the structure of psana events.
-
-    Takes a psana source string (e.g. exp=CXI/cxix....) and inspects the structure of the first event in the data,
-    printing information about the the content of the event.
-
-    Args:
-
-        source (str): a psana source string (e.g. exp=CXI/cxix....).
-    """
-
-    import psana
-
-    print('\n\n')
-    print('data source :', source)
-
-    print('\n')
-    print('Opening dataset...')
-    ds = psana.DataSource(source)
-
-    print('\n')
-    print('Epics pv Names (the confusing ones):')
-    print(ds.env().epicsStore().pvNames())
-
-    print('\n')
-    print('Epics aliases (the not so confusing ones):')
-    print(ds.env().epicsStore().aliases())
-
-    print('\n')
-    print('Event structure:')
-    itr = ds.events()
-    evt = itr.next()
-    for k in evt.keys():
-        print('Type: {0}   Source: {1}   Alias: {2}   Key: {3}'.format(k.type(), k.src(), k.alias(), k.key()))
-        print('')
-
-    for k in evt.keys():
-        print(k)
-
-    beam = evt.get(psana.Bld.BldDataEBeamV7, psana.Source('BldInfo(EBeam)'))
-    print('Photon energy: %f' % beam.ebeamPhotonEnergy())
-
-
-def dirname_from_source_runs(source):
-    """Returns a directory name based on a psana source string.
-
-    Takes a psana source string (e.g exp=CXI/cxix....) and returns a string that can be used as a subdirectory name or
-    a prefix for files and directories.
-
-    Args:
-
-        source (str): a psana source string (e.g. exp=CXI/cxi....).
-
-    Returns:
-
-        dirname (str): a string that can be used as a filename or a prefix .
-    """
-
-    start = source.find('run=') + 4
-    stop = source.find(':idx')
-    if stop == -1:
-        stop = len(source)
-    runs = source[start:stop]
-    nums = runs.split(',')
-    if not nums:
-        nums = runs
-    dirname = 'run_' + '_'.join(nums)
-    return dirname
diff --git a/cfel_vtk.py b/cfel_vtk.py
deleted file mode 100644
index a2726e203646c8d3b60556871aee9898aa5d5842..0000000000000000000000000000000000000000
--- a/cfel_vtk.py
+++ /dev/null
@@ -1,616 +0,0 @@
-#    This file is part of cfelpyutils.
-#
-#    cfelpyutils is free software: you can redistribute it and/or modify
-#    it under the terms of the GNU General Public License as published by
-#    the Free Software Foundation, either version 3 of the License, or
-#    (at your option) any later version.
-#
-#    cfelpyutils is distributed in the hope that it will be useful,
-#    but WITHOUT ANY WARRANTY; without even the implied warranty of
-#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-#    GNU General Public License for more details.
-#
-#    You should have received a copy of the GNU General Public License
-#    along with cfelpyutils.  If not, see <http://www.gnu.org/licenses/>.
-"""
-Utilities for 3d data visualization using the Visualization Toolkit (VTK).
-"""
-
-
-import numpy
-
-import vtk
-
-
-VTK_VERSION = vtk.vtkVersion().GetVTKMajorVersion()
-
-
-def get_lookup_table(minimum_value, maximum_value, log=False, colorscale="jet", number_of_colors=1000):
-    """Create a vtkLookupTable with a specified range, and colorscale.
-
-    Args:
-
-        minimum_value (float): Lowest value the lookup table can display, lower values will be displayed as this value
-
-        maximum_value (float): Highest value the lookup table can display, higher values will be displayed as this value
-
-        log (Optional[bool]): True if the scale is logarithmic
-
-        colorscale (Optional[string]): Accepts the name of any matplotlib colorscale. The lookuptable will
-            replicate this scale.
-
-        number_of_colors (Optional[int]): The length of the table. Higher values corresponds to a smoother color scale.
-
-    Returns:
-
-        lookup_table (vtk.vtkLookupTable): A vtk lookup table
-    """
-
-    import matplotlib
-    import matplotlib.cm
-    if log:
-        lut = vtk.vtkLogLookupTable()
-    else:
-        lut = vtk.vtkLookupTable()
-    lut.SetTableRange(minimum_value, maximum_value)
-    lut.SetNumberOfColors(number_of_colors)
-    lut.Build()
-    for i in range(number_of_colors):
-        color = matplotlib.cm.cmap_d[colorscale](float(i) / float(number_of_colors))
-        lut.SetTableValue(i, color[0], color[1], color[2], 1.)
-    lut.SetUseBelowRangeColor(True)
-    lut.SetUseAboveRangeColor(True)
-    return lut
-
-
-def array_to_float_array(array_in, dtype=None):
-    """Convert a numpy array into a vtkFloatArray of vtkDoubleArray, depending on the type of the input.
-    This flattens the array and thus the shape is lost.
-
-    Args:
-
-        array_in (numpy.ndarray): The array to convert.
-
-        dtype (Optional[type]): Optionaly convert the array to the specified data. Otherwise the original
-        type will be preserved.
-
-    Returns:
-
-        float_array (vtk.vtkFloatArray): A float array of the specified type.
-    """
-
-    if dtype is None:
-        dtype = array_in.dtype
-    if dtype == "float32":
-        float_array = vtk.vtkFloatArray()
-    elif dtype == "float64":
-        float_array = vtk.vtkDoubleArray()
-    else:
-        raise ValueError("Wrong format of input array, must be float32 or float64")
-    if len(array_in.shape) == 2:
-        float_array.SetNumberOfComponents(array_in.shape[1])
-    elif len(array_in.shape) == 1:
-        float_array.SetNumberOfComponents(1)
-    else:
-        raise ValueError("Wrong shape of array must be 1D or 2D.")
-    float_array.SetVoidArray(numpy.ascontiguousarray(array_in, dtype), numpy.product(array_in.shape), 1)
-    return float_array
-
-
-def array_to_vtk(array_in, dtype=None):
-    """Convert a numpy array into a vtk array of the specified type. This flattens the array and thus the shape is lost.
-
-    Args:
-
-        array_in (numpy.ndarray): The array to convert.
-
-        dtype (Optional[type]): Optionaly convert the array to the specified data. Otherwise the original type
-        will be preserved.
-
-    Returns:
-
-        vtk_array (vtk.vtkFloatArray): A float array of the specified type.
-    """
-
-    if dtype is None:
-        dtype = numpy.dtype(array_in.dtype)
-    else:
-        dtype = numpy.dtype(dtype)
-    if dtype == numpy.float32:
-        vtk_array = vtk.vtkFloatArray()
-    elif dtype == numpy.float64:
-        vtk_array = vtk.vtkDoubleArray()
-    elif dtype == numpy.uint8:
-        vtk_array = vtk.vtkUnsignedCharArray()
-    elif dtype == numpy.int8:
-        vtk_array = vtk.vtkCharArray()
-    else:
-        raise ValueError("Wrong format of input array, must be float32 or float64")
-    if len(array_in.shape) != 1 and len(array_in.shape) != 2:
-        raise ValueError("Wrong shape: array must be 1D")
-    vtk_array.SetNumberOfComponents(1)
-    vtk_array.SetVoidArray(numpy.ascontiguousarray(array_in.flatten(), dtype), numpy.product(array_in.shape), 1)
-    return vtk_array
-
-
-def array_to_image_data(array_in, dtype=None):
-    """Convert a numpy array to vtkImageData. Image data is a 3D object, thus the input must be 3D.
-
-    Args:
-
-        array_in (numpy.ndarray): Array to convert to vtkImageData. Must be 3D.
-
-        dtype (Optional[type]): Optionaly convert the array to the specified data. Otherwise the original
-        type will be preserved.
-
-    Returns:
-
-        image_data (vtk.vtkImageData): Image data containing the data from the array.
-    """
-
-    if len(array_in.shape) != 3:
-        raise ValueError("Array must be 3D for conversion to vtkImageData")
-    array_flat = array_in.flatten()
-    float_array = array_to_float_array(array_flat, dtype)
-    image_data = vtk.vtkImageData()
-    image_data.SetDimensions(*array_in.shape)
-    image_data.GetPointData().SetScalars(float_array)
-    return image_data
-
-
-def window_to_png(render_window, file_name, magnification=1):
-    """Take a screen shot of a specific vt render window and save it to file.
-
-    Args:
-
-        render_window (vtk.vtkRenderWindow): The render window window to capture.
-
-        file_name (string): A png file with this name will be created from the provided window.
-
-        magnification (Optional[int]): Increase the resolution of the output file by this factor
-    """
-
-    magnification = int(magnification)
-    window_to_image_filter = vtk.vtkWindowToImageFilter()
-    window_to_image_filter.SetInput(render_window)
-    window_to_image_filter.SetMagnification(magnification)
-    window_to_image_filter.SetInputBufferTypeToRGBA()
-    window_to_image_filter.Update()
-
-    writer = vtk.vtkPNGWriter()
-    writer.SetFileName(file_name)
-    writer.SetInputConnection(window_to_image_filter.GetOutputPort())
-    writer.Write()
-
-
-def poly_data_to_actor(poly_data, lut):
-    """Create a vtkActor from a vtkPolyData. This circumvents the need to create a vtkMapper by internally
-    using a very basic vtkMapper
-
-    Args:
-
-        poly_data (vtk.vtkPolyData): vtkPolyData object.
-
-        lut (vtk.vtkLookupTable): The vtkLookupTable specifies the colorscale to use for the maper.
-
-    Returns:
-
-        actor (vtk.vtkActor): Actor to display the provided vtkPolyData
-    """
-
-    mapper = vtk.vtkPolyDataMapper()
-    mapper.SetInputData(poly_data)
-    mapper.SetLookupTable(lut)
-    mapper.SetUseLookupTableScalarRange(True)
-    actor = vtk.vtkActor()
-    actor.SetMapper(mapper)
-    return actor
-
-
-class IsoSurface(object):
-    """Create and plot isosurfaces.
-
-    Args:
-
-        volume (numpy.ndimage): 3D floating point array.
-
-        level (float or list of float): The threshold level for the isosurface, or a list of such levels.
-    """
-
-    def __init__(self, volume, level=None):
-        self._surface_algorithm = None
-        self._renderer = None
-        self._actor = None
-        self._mapper = None
-        self._volume_array = None
-
-        self._float_array = vtk.vtkFloatArray()
-        self._image_data = vtk.vtkImageData()
-        self._image_data.GetPointData().SetScalars(self._float_array)
-        self._setup_data(volume)
-
-        self._surface_algorithm = vtk.vtkMarchingCubes()
-        self._surface_algorithm.SetInputData(self._image_data)
-        self._surface_algorithm.ComputeNormalsOn()
-
-        if level is not None:
-            try:
-                self.set_multiple_levels(iter(level))
-            except TypeError:
-                self.set_level(0, level)
-
-        self._mapper = vtk.vtkPolyDataMapper()
-        self._mapper.SetInputConnection(self._surface_algorithm.GetOutputPort())
-        self._mapper.ScalarVisibilityOn()
-        self._actor = vtk.vtkActor()
-        self._actor.SetMapper(self._mapper)
-
-    def _setup_data(self, volume):
-        """Create the numpy array self._volume_array and vtk array self._float_array and make them share data.
-
-        Args:
-
-            volume (numpy.ndimage): This data will populate both the created numpy and vtk objects.
-        """
-
-        self._volume_array = numpy.zeros(volume.shape, dtype="float32", order="C")
-        self._volume_array[:] = volume
-        self._float_array.SetNumberOfValues(numpy.product(volume.shape))
-        self._float_array.SetNumberOfComponents(1)
-        self._float_array.SetVoidArray(self._volume_array, numpy.product(volume.shape), 1)
-        self._image_data.SetDimensions(*self._volume_array.shape)
-
-    def set_renderer(self, renderer):
-        """Set the renderer of the isosurface and remove any existing renderer.
-
-        Args:
-
-            renderer (vtk.vtkRenderer): Give this renderer controll over all the surface actors.
-        """
-
-        if self._actor is None:
-            raise RuntimeError("Actor does not exist.")
-        if self._renderer is not None:
-            self._renderer.RemoveActor(self._actor)
-        self._renderer = renderer
-        self._renderer.AddActor(self._actor)
-
-    def set_multiple_levels(self, levels):
-        """Remova any current surface levels and add the ones from the provided list.
-
-        Args:
-
-            levels (list of float): Levels for the isosurface, in absolute values (not e.g. ratios)
-        """
-
-        self._surface_algorithm.SetNumberOfContours(0)
-        for index, this_level in enumerate(levels):
-            self._surface_algorithm.SetValue(index, this_level)
-        self._render()
-
-    def get_levels(self):
-        """Return a list of the current surface levels.
-
-        Returns:
-
-            levels (list of floats): The current surface levels.
-        """
-
-        return [self._surface_algorithm.GetValue(index)
-                for index in range(self._surface_algorithm.GetNumberOfContours())]
-
-    def add_level(self, level):
-        """Add a single surface level.
-
-        Args:
-
-            level (float): The level of the new surface.
-        """
-
-        self._surface_algorithm.SetValue(self._surface_algorithm.GetNumberOfContours(), level)
-        self._render()
-
-    def remove_level(self, index):
-        """Remove a singel surface level at the provided index.
-
-        Args:
-
-            index (int): The index of the level. If levels were added one by one this corresponds
-            to the order in which they were added.
-        """
-
-        for idx in range(index, self._surface_algorithm.GetNumberOfContours()-1):
-            self._surface_algorithm.SetValue(idx, self._surface_algorithm.GetValue(idx+1))
-        self._surface_algorithm.SetNumberOfContours(self._surface_algorithm.GetNumberOfContours()-1)
-        self._render()
-
-    def set_level(self, index, level):
-        """Change the value of an existing surface level.
-
-        Args:
-            index (int): The index of the level to change. If levels were added one by one this corresponds to
-                the order in which they were added.
-            level (float): The new level of the surface.
-        """
-
-        self._surface_algorithm.SetValue(index, level)
-        self._render()
-
-    def set_cmap(self, cmap):
-        """Set the colormap. The color is a function of surface level and mainly of relevance when plotting multiple
-        surfaces.
-
-        Args:
-
-            cmap (string): Name of the colormap to use. Supports all colormaps provided by matplotlib.
-        """
-
-        self._mapper.ScalarVisibilityOn()
-        self._mapper.SetLookupTable(get_lookup_table(self._volume_array.min(), self._volume_array.max(),
-                                                     colorscale=cmap))
-        self._render()
-
-    def set_color(self, color):
-        """Plot all surfaces in this provided color.
-
-        Args:
-
-            color (length 3 iterable): The RGB value of the color.
-        """
-
-        self._mapper.ScalarVisibilityOff()
-        self._actor.GetProperty().SetColor(color[0], color[1], color[2])
-        self._render()
-
-    def set_opacity(self, opacity):
-        """Set the opacity of all surfaces. (seting it individually for each surface is not supported)
-
-        Args:
-
-            opacity (float): Value between 0. and 1. where 0. is completely transparent and 1. is completely opaque.
-        """
-
-        self._actor.GetProperty().SetOpacity(opacity)
-        self._render()
-
-    def _render(self):
-        """Render if a renderer is set, otherwise do nothing."""
-
-        if self._renderer is not None:
-            self._renderer.GetRenderWindow().Render()
-
-    def set_data(self, volume):
-        """Change the data displayed.
-
-        Args:
-
-            volume (numpy.ndarray): The new array. Must have the same shape as the old array."""
-
-        if volume.shape != self._volume_array.shape:
-            raise ValueError("New volume must be the same shape as the old one")
-        self._volume_array[:] = volume
-        self._float_array.Modified()
-        self._render()
-
-
-def plot_isosurface(volume, level=None, opacity=1.):
-    """Plot isosurfaces of the provided module.
-
-    Args:
-
-        volume (numpy.ndarray): The 3D numpy array that will be plotted.
-
-        level (float or list of floats): Levels can be iterable or singel value.
-
-        opacity (float): Float between 0. and 1. where 0. is completely transparent and 1. is completely opaque.
-    """
-
-    surface_object = IsoSurface(volume, level)
-    surface_object.set_opacity(opacity)
-
-    renderer = vtk.vtkRenderer()
-    if opacity != 1.:
-        renderer.SetUseDepthPeeling(True)
-    render_window = vtk.vtkRenderWindow()
-    render_window.AddRenderer(renderer)
-    interactor = vtk.vtkRenderWindowInteractor()
-    interactor.SetRenderWindow(render_window)
-    interactor.SetInteractorStyle(vtk.vtkInteractorStyleRubberBandPick())
-
-    surface_object.set_renderer(renderer)
-
-    renderer.SetBackground(0., 0., 0.)
-    render_window.SetSize(800, 800)
-    interactor.Initialize()
-    render_window.Render()
-    interactor.Start()
-
-
-def plot_planes(array_in, log=False, cmap=None):
-    """Plot the volume at two interactive planes that cut the volume.
-
-    Args:
-
-        array_in (numpy.ndarray): Input array must be 3D.
-
-        log (bool): If true the data will be plotted in logarithmic scale.
-
-        cmap (string): Name of the colormap to use. Supports all colormaps provided by matplotlib.
-    """
-
-    array_in = numpy.float64(array_in)
-    renderer = vtk.vtkRenderer()
-    render_window = vtk.vtkRenderWindow()
-    render_window.AddRenderer(renderer)
-    interactor = vtk.vtkRenderWindowInteractor()
-    interactor.SetRenderWindow(render_window)
-    interactor.SetInteractorStyle(vtk.vtkInteractorStyleRubberBandPick())
-
-    if cmap is None:
-        import matplotlib as _matplotlib
-        cmap = _matplotlib.rcParams["image.cmap"]
-    lut = get_lookup_table(max(0., array_in.min()), array_in.max(), log=log, colorscale=cmap)
-    picker = vtk.vtkCellPicker()
-    picker.SetTolerance(0.005)
-
-    image_data = array_to_image_data(array_in)
-
-    def setup_plane():
-        """Create and setup a singel plane."""
-        plane = vtk.vtkImagePlaneWidget()
-        if VTK_VERSION >= 6:
-            plane.SetInputData(image_data)
-        else:
-            plane.SetInput(image_data)
-        plane.UserControlledLookupTableOn()
-        plane.SetLookupTable(lut)
-        plane.DisplayTextOn()
-        plane.SetPicker(picker)
-        plane.SetLeftButtonAction(1)
-        plane.SetMiddleButtonAction(2)
-        plane.SetRightButtonAction(0)
-        plane.SetInteractor(interactor)
-        return plane
-
-    plane_1 = setup_plane()
-    plane_1.SetPlaneOrientationToXAxes()
-    plane_1.SetSliceIndex(array_in.shape[0]//2)
-    plane_1.SetEnabled(1)
-    plane_2 = setup_plane()
-    plane_2.SetPlaneOrientationToYAxes()
-    plane_2.SetSliceIndex(array_in.shape[1]//2)
-    plane_2.SetEnabled(1)
-
-    renderer.SetBackground(0., 0., 0.)
-    render_window.SetSize(800, 800)
-    interactor.Initialize()
-    render_window.Render()
-    interactor.Start()
-
-
-def setup_window(size=(400, 400), background=(1., 1., 1.)):
-    """Create a renderer, render_window and interactor and setup connections between them.
-
-    Args:
-
-        size (Optional[length 2 iterable of int]): The size of the window in pixels.
-
-        background (Optional[length 3 iterable of float]): RGB value of the background color.
-
-    Returns:
-
-        renderer (vtk.vtkRenderer): A standard renderer connected to the window.
-
-        render_window (vtk.vtkRenderWindow): With dimensions given in the arguments, or oterwise 400x400 pixels.
-
-        interactor (vtk.vtkRenderWindowInteractor): The interactor will be given the rubber band pick interactor style.
-    """
-
-    renderer = vtk.vtkRenderer()
-    render_window = vtk.vtkRenderWindow()
-    render_window.AddRenderer(renderer)
-    interactor = vtk.vtkRenderWindowInteractor()
-    interactor.SetInteractorStyle(vtk.vtkInteractorStyleRubberBandPick())
-    interactor.SetRenderWindow(render_window)
-
-    renderer.SetBackground(background[0], background[1], background[2])
-    render_window.SetSize(size[0], size[1])
-
-    interactor.Initialize()
-    render_window.Render()
-    return renderer, render_window, interactor
-
-
-def scatterplot_3d(data, color=None, point_size=None, point_shape=None):
-    """3D scatter plot.
-
-    Args:
-
-        data (numpy.ndimage): The array must have shape Nx3 where N is the number of points.
-
-        color (Optional[numpy.ndimage]): 1D Array of floating points with same length as the data array.
-        These numbers give the color of each point.
-
-        point_size (Optional[float]): The size of each points. Behaves differently depending on the point_shape.
-        If shape is spheres the size is relative to the scene and if squares the size is relative to the window.
-
-        point_shape (Optional["spheres" or "squares"]): "spheres" plots each point as a sphere, recommended for
-        small data sets. "squares" plot each point as a square without any 3D structure, recommended for
-        large data sets.
-    """
-
-    if len(data.shape) != 2 or data.shape[1] != 3:
-        raise ValueError("data must have shape (n, 3) where n is the number of points.")
-    if point_shape is None:
-        if len(data) <= 1000:
-            point_shape = "spheres"
-        else:
-            point_shape = "squares"
-    data = numpy.float32(data)
-    data_vtk = array_to_float_array(data)
-    point_data = vtk.vtkPoints()
-    point_data.SetData(data_vtk)
-    points_poly_data = vtk.vtkPolyData()
-    points_poly_data.SetPoints(point_data)
-
-    if color is not None:
-        lut = get_lookup_table(color.min(), color.max())
-        color_scalars = array_to_vtk(numpy.float32(color.copy()))
-        color_scalars.SetLookupTable(lut)
-        points_poly_data.GetPointData().SetScalars(color_scalars)
-
-    if point_shape == "spheres":
-        if point_size is None:
-            point_size = numpy.array(data).std() / len(data)**(1./3.) / 3.
-        glyph_filter = vtk.vtkGlyph3D()
-        glyph_filter.SetInputData(points_poly_data)
-        sphere_source = vtk.vtkSphereSource()
-        sphere_source.SetRadius(point_size)
-        glyph_filter.SetSourceConnection(sphere_source.GetOutputPort())
-        glyph_filter.SetScaleModeToDataScalingOff()
-        if color is not None:
-            glyph_filter.SetColorModeToColorByScalar()
-        else:
-            glyph_filter.SetColorMode(0)
-        glyph_filter.Update()
-    elif point_shape == "squares":
-        if point_size is None:
-            point_size = 3
-        glyph_filter = vtk.vtkVertexGlyphFilter()
-        glyph_filter.SetInputData(points_poly_data)
-        glyph_filter.Update()
-    else:
-        raise ValueError("{0} is not a valid entry for points".format(point_shape))
-
-    poly_data = vtk.vtkPolyData()
-    poly_data.ShallowCopy(glyph_filter.GetOutput())
-
-    renderer, render_window, interactor = setup_window()
-
-    mapper = vtk.vtkPolyDataMapper()
-    mapper.SetInputData(poly_data)
-    if color is not None:
-        mapper.SetLookupTable(lut)
-        mapper.SetUseLookupTableScalarRange(True)
-
-    points_actor = vtk.vtkActor()
-    points_actor.SetMapper(mapper)
-    points_actor.GetProperty().SetPointSize(point_size)
-    points_actor.GetProperty().SetColor(0., 0., 0.)
-
-    axes_actor = vtk.vtkCubeAxesActor()
-    axes_actor.SetBounds(points_actor.GetBounds())
-    axes_actor.SetCamera(renderer.GetActiveCamera())
-    axes_actor.SetFlyModeToStaticTriad()
-    axes_actor.GetXAxesLinesProperty().SetColor(0., 0., 0.)
-    axes_actor.GetYAxesLinesProperty().SetColor(0., 0., 0.)
-    axes_actor.GetZAxesLinesProperty().SetColor(0., 0., 0.)
-    for i in range(3):
-        axes_actor.GetLabelTextProperty(i).SetColor(0., 0., 0.)
-        axes_actor.GetTitleTextProperty(i).SetColor(0., 0., 0.)
-
-    renderer.AddActor(points_actor)
-    renderer.AddActor(axes_actor)
-
-    render_window.Render()
-    interactor.Start()
diff --git a/cfel_crystfel.py b/crystfel_utils.py
similarity index 56%
rename from cfel_crystfel.py
rename to crystfel_utils.py
index 7c4a07e1bfb42d30a1c0567efe15fc7f48e089fd..b848ed255892ab26fe48471a13d95ba6532b6e47 100644
--- a/cfel_crystfel.py
+++ b/crystfel_utils.py
@@ -21,23 +21,39 @@ This module contains reimplementation of Crystfel functions and utilities.
 from __future__ import (absolute_import, division, print_function,
                         unicode_literals)
 
+import collections
+import copy
+import math
 import re
-from collections import OrderedDict
-from copy import deepcopy   
-from math import sqrt
 
 
 def _assplode_algebraic(value):
-    items = [item for item in re.split('([+-])', value.strip()) if item != '']
+    # Reimplementation of assplode_algegraic from
+    # libcrystfel/src/detector.c.
+
+    items = [
+        item for item in re.split(
+            pattern='([+-])', string=value.strip()
+        ) if item != ''
+    ]
 
     if items and items[0] not in ('+', '-'):
-        items.insert(0, '+')
+        items.insert(index=0, object='+')
 
-    return [''.join((items[x], items[x + 1])) for x in range(0, len(items), 2)]
+    return [
+        ''.join(
+            iterable=(items[x], items[x + 1])
+        ) for x in range(start=0, stop=len(items), step=2)
+    ]
 
 
 def _dir_conv(direction_x, direction_y, direction_z, value):
-    direction = [direction_x, direction_y, direction_z]
+    # Reimplementation of dir_conv from
+    # libcrystfel/src/detector.c.
+
+    direction = [
+        direction_x, direction_y, direction_z
+    ]
 
     items = _assplode_algebraic(value)
 
@@ -48,7 +64,9 @@ def _dir_conv(direction_x, direction_y, direction_z, value):
         axis = item[-1]
 
         if axis != 'x' and axis != 'y' and axis != 'z':
-            raise RuntimeError('Invalid Symbol: {} (must be x, y or z).'.format(axis))
+            raise RuntimeError(
+                'Invalid Symbol: {} (must be x, y or z).'.format(axis)
+            )
 
         if item[:-1] == '+':
             value = '1.0'
@@ -68,6 +86,9 @@ def _dir_conv(direction_x, direction_y, direction_z, value):
 
 
 def _set_dim_structure_entry(key, value, panel):
+    # Reimplementation of set_dim_structure_entry from
+    # libcrystfel/src/events.c.
+
     if panel['dim_structure'] is not None:
         dim = panel['dim_structure']
     else:
@@ -76,7 +97,7 @@ def _set_dim_structure_entry(key, value, panel):
     dim_index = int(key[3])
 
     if dim_index > len(dim) - 1:
-        for _ in range(len(dim), dim_index + 1):
+        for _ in range(start=len(dim), stop=dim_index + 1):
             dim.append(None)
 
     if value == 'ss' or value == 'fs' or value == '%':
@@ -88,6 +109,9 @@ def _set_dim_structure_entry(key, value, panel):
 
 
 def _parse_field_for_panel(key, value, panel):
+    # Reimplementation of parse_field_for_panel from
+    # libcrystfel/src/detector.c.
+
     if key == 'min_fs':
         panel['origin_min_fs'] = int(value)
         panel['min_fs'] = int(value)
@@ -112,12 +136,14 @@ def _parse_field_for_panel(key, value, panel):
 
     elif key == 'rail_direction':
         try:
-            panel['rail_x'], panel['rail_y'], panel['rail_z'] = _dir_conv(panel['rail_x'],
-                                                                          panel['rail_y'],
-                                                                          panel['rail_z'],
-                                                                          value)
-        except RuntimeError as e:
-            raise RuntimeError('Invalid rail direction. ', e)
+            panel['rail_x'], panel['rail_y'], panel['rail_z'] = _dir_conv(
+                direction_x=panel['rail_x'],
+                direction_y=panel['rail_y'],
+                direction_z=panel['rail_z'],
+                value=value
+            )
+        except RuntimeError as exc:
+            raise RuntimeError('Invalid rail direction. ', exc)
 
     elif key == 'clen_for_centering':
         panel['clen_for_centering'] = float(value)
@@ -188,39 +214,54 @@ def _parse_field_for_panel(key, value, panel):
 
     elif key == 'fs':
         try:
-            panel['fsx'], panel['fsy'], panel['fsz'] = _dir_conv(panel['fsx'], panel['fsy'],
-                                                                 panel['fsz'], value)
+            panel['fsx'], panel['fsy'], panel['fsz'] = _dir_conv(
+                direction_x=panel['fsx'],
+                direction_y=panel['fsy'],
+                direction_z=panel['fsz'],
+                value=value
+            )
 
-        except RuntimeError as e:
-            raise RuntimeError('Invalid fast scan direction. ', e)
+        except RuntimeError as exc:
+            raise RuntimeError('Invalid fast scan direction. ', exc)
 
     elif key == 'ss':
         try:
-            panel['ssx'], panel['ssy'], panel['ssz'] = _dir_conv(panel['ssx'], panel['ssy'],
-                                                                 panel['ssz'], value)
+            panel['ssx'], panel['ssy'], panel['ssz'] = _dir_conv(
+                direction_x=panel['ssx'],
+                direction_y=panel['ssy'],
+                direction_z=panel['ssz'],
+                value=value
+            )
 
-        except RuntimeError as e:
-            raise RuntimeError('Invalid slow scan direction. ', e)
+        except RuntimeError as exc:
+            raise RuntimeError('Invalid slow scan direction. ', exc)
 
     elif key.startswith('dim'):
-        _set_dim_structure_entry(key, value, panel)
+        _set_dim_structure_entry(
+            key=key,
+            value=value,
+            panel=panel
+        )
 
     else:
         raise RuntimeError('Unrecognised field: {}'.format(key))
 
 
-def _parse_top_level(key, value, detector, beam, panel):
+def _parse_toplevel(key, value, detector, beam, panel):
+    # Reimplementation of parse_toplevel from
+    # libcrystfel/src/detector.c.
+
     if key == 'mask_bad':
         try:
             detector['mask_bad'] = int(value)
         except ValueError:
-            detector['mask_bad'] = int(value, 16)
+            detector['mask_bad'] = int(x=value, base=16)
 
     elif key == 'mask_good':
         try:
             detector['mask_good'] = int(value)
         except ValueError:
-            detector['mask_good'] = int(value, 16)
+            detector['mask_good'] = int(x=value, base=16)
 
     elif key == 'coffset':
         panel['coffset'] = float(value)
@@ -239,17 +280,26 @@ def _parse_top_level(key, value, detector, beam, panel):
     elif key == 'peak_info_location':
         detector['peak_info_location'] = value
 
-    elif key.startswith('rigid_group') and not key.startswith('rigid_group_collection'):
+    elif (
+            key.startswith('rigid_group') and not
+            key.startswith('rigid_group_collection')
+    ):
         detector['rigid_groups'][key[12:]] = value.split(',')
 
     elif key.startswith('rigid_group_collection'):
         detector['rigid_group_collections'][key[23:]] = value.split(',')
 
     else:
-        _parse_field_for_panel(key, value, panel)
+        _parse_field_for_panel(
+            key=key,
+            value=value,
+            panel=panel
+        )
 
 
 def _check_bad_fsss(bad_region, is_fsss):
+    # Reimplementation of check_bad_fsss from
+    # libcrystfel/src/detector.c.
     if bad_region['is_fsss'] == 99:
         bad_region['is_fsss'] = is_fsss
         return
@@ -261,30 +311,32 @@ def _check_bad_fsss(bad_region, is_fsss):
 
 
 def _parse_field_bad(key, value, bad):
+    # Reimplementation of parse_field_bad from
+    # libcrystfel/src/detector.c.
     if key == 'min_x':
-        bad['min_x'] = float(value)
-        _check_bad_fsss(bad, False)
+        bad['min_x'] = float(x=value)
+        _check_bad_fsss(bad_region=bad, is_fsss=False)
     elif key == 'max_x':
-        bad['max_x'] = float(value)
-        _check_bad_fsss(bad, False)
+        bad['max_x'] = float(x=value)
+        _check_bad_fsss(bad_region=bad, is_fsss=False)
     elif key == 'min_y':
-        bad['min_y'] = float(value)
-        _check_bad_fsss(bad, False)
+        bad['min_y'] = float(x=value)
+        _check_bad_fsss(bad_region=bad, is_fsss=False)
     elif key == 'max_y':
-        bad['max_y'] = float(value)
-        _check_bad_fsss(bad, False)
+        bad['max_y'] = float(x=value)
+        _check_bad_fsss(bad_region=bad, is_fsss=False)
     elif key == 'min_fs':
-        bad['min_fs'] = int(value)
-        _check_bad_fsss(bad, True)
+        bad['min_fs'] = int(x=value)
+        _check_bad_fsss(bad_region=bad, is_fsss=True)
     elif key == 'max_fs':
-        bad['max_fs'] = int(value)
-        _check_bad_fsss(bad, True)
+        bad['max_fs'] = int(x=value)
+        _check_bad_fsss(bad_region=bad, is_fsss=True)
     elif key == 'min_ss':
-        bad['min_ss'] = int(value)
-        _check_bad_fsss(bad, True)
+        bad['min_ss'] = int(x=value)
+        _check_bad_fsss(bad_region=bad, is_fsss=True)
     elif key == 'max_ss':
-        bad['max_ss'] = int(value)
-        _check_bad_fsss(bad, True)
+        bad['max_ss'] = int(x=value)
+        _check_bad_fsss(bad_region=bad, is_fsss=True)
     elif key == 'panel':
         bad['panel'] = value
     else:
@@ -293,59 +345,106 @@ def _parse_field_bad(key, value, bad):
     return
 
 
-def _check_point(name, panel, fs, ss, min_d, max_d, detector):
-    xs = fs * panel['fsx'] + ss * panel['ssx']
-    ys = fs * panel['fsy'] + ss * panel['ssy']
+def _check_point(name, panel, fs_, ss_, min_d, max_d, detector):
+    # Reimplementation of check_point from
+    # libcrystfel/src/detector.c.
 
-    rx = (xs + panel['cnx']) / panel['res']
-    ry = (ys + panel['cny']) / panel['res']
+    xs_ = fs_ * panel['fsx'] + ss_ * panel['ssx']
+    ys_ = fs_ * panel['fsy'] + ss_ * panel['ssy']
 
-    dist = sqrt(rx * rx + ry * ry)
+    rx_ = (xs_ + panel['cnx']) / panel['res']
+    ry_ = (ys_ + panel['cny']) / panel['res']
+
+    dist = math.sqrt(x=rx_ * rx_ + ry_ * ry_)
 
     if dist > max_d:
         detector['furthest_out_panel'] = name
-        detector['furthest_out_fs'] = fs
-        detector['furthest_out_ss'] = ss
+        detector['furthest_out_fs'] = fs_
+        detector['furthest_out_ss'] = ss_
         max_d = dist
     elif dist < min_d:
         detector['furthest_in_panel'] = name
-        detector['furthest_in_fs'] = fs
-        detector['furthest_in_ss'] = ss
+        detector['furthest_in_fs'] = fs_
+        detector['furthest_in_ss'] = ss_
         min_d = dist
 
     return min_d, max_d
 
 
 def _find_min_max_d(detector):
+    # Reimplementation of find_min_max_d from
+    # libcrystfel/src/detector.c.
+
     min_d = float('inf')
     max_d = 0.0
 
     for name, panel in detector['panels'].items():
-        min_d, max_d = _check_point(name, panel, 0, 0, min_d, max_d, detector)
-        min_d, max_d = _check_point(name, panel, panel['w'], 0, min_d, max_d, detector)
-        min_d, max_d = _check_point(name, panel, 0, panel['h'], min_d, max_d, detector)
-        min_d, max_d = _check_point(name, panel, panel['w'], panel['h'], min_d, max_d, detector)
+
+        min_d, max_d = _check_point(
+            name=name,
+            panel=panel,
+            fs_=0,
+            ss_=0,
+            min_d=min_d,
+            max_d=max_d,
+            detector=detector
+        )
+
+        min_d, max_d = _check_point(
+            name=name,
+            panel=panel,
+            fs_=panel['w'],
+            ss_=0,
+            min_d=min_d,
+            max_d=max_d,
+            detector=detector
+
+        )
+        min_d, max_d = _check_point(
+            name=name,
+            panel=panel,
+            fs_=0,
+            ss_=panel['h'],
+            min_d=min_d,
+            max_d=max_d,
+            detector=detector
+        )
+
+        min_d, max_d = _check_point(
+            name=name,
+            panel=panel,
+            fs_=panel['w'],
+            ss_=panel['h'],
+            min_d=min_d,
+            max_d=max_d,
+            detector=detector
+        )
 
 
 def load_crystfel_geometry(filename):
-    """Loads a CrystFEL geometry file into a dictionary.
+    """Load a CrystFEL geometry file into a dictionary.
 
-    Reimplements the get_detector_geometry_2 function from CrystFEL amost verbatim. Returns a dictionary with the
-    geometry information. Entries in the geometry file appears as keys in the returned dictionary. For a full
-    documentation on the CrystFEL geometry format, see:
+    Reimplementation of the get_detector_geometry_2 function from CrystFEL
+    almost verbatim. Returns a dictionary with the geometry information.
+    Entries in the geometry file appears as keys in the returned dictionary.
+    For a full documentation on the CrystFEL geometry format, see:
 
     tfel/manual-crystfel_geometry.html
 
+    he code of this function is synced with the code of the function
+    'get_detector_geometry_2' in CrystFEL at commit
+    41a8fa9819010fe8ddeb66676fee717f5226c7b8.
+
     Args:
 
-        filename (str): filename of the geometry file
+        filename (str): filename of the geometry file.
 
     Returns:
 
-        detector (dict): dictionary with the geometry loaded from the file
+        detector (dict): dictionary with the geometry loaded from the file.
     """
 
-    fh = open(filename, 'r')
+    fhandle = open(file=filename, mode='r')
 
     beam = {
         'photon_energy': 0.0,
@@ -354,8 +453,8 @@ def load_crystfel_geometry(filename):
     }
 
     detector = {
-        'panels': OrderedDict(),
-        'bad': OrderedDict(),
+        'panels': collections.OrderedDict(),
+        'bad': collections.OrderedDict(),
         'mask_good': 0,
         'mask_bad': 0,
         'rigid_groups': {},
@@ -382,7 +481,7 @@ def load_crystfel_geometry(filename):
         'clen_for_centering': None,
         'adu_per_eV': None,
         'adu_per_photon': None,
-        'max_adu': float('inf'),
+        'max_adu': float(x='inf'),
         'mask': None,
         'mask_file': None,
         'satmap': None,
@@ -407,18 +506,20 @@ def load_crystfel_geometry(filename):
 
     default_dim = ['ss', 'fs']
 
-    fhlines = fh.readlines()
+    fhlines = fhandle.readlines()
 
     for line in fhlines:
 
         if line.startswith(';'):
             continue
 
-        line_without_comments = line.strip().split(';')[0]
-        line_items = re.split('([ \t])', line_without_comments)
-        line_items = [item for item in line_items if item not in ('', ' ', '\t')]
+        line_without_comments = line.strip().split(sep=';')[0]
+        line_items = re.split(pattern='([ \t])', string=line_without_comments)
+        line_items = [
+            item for item in line_items if item not in ('', ' ', '\t')
+        ]
 
-        if len(line_items) < 3:
+        if len(s=line_items) < 3:
             continue
 
         value = ''.join(line_items[2:])
@@ -429,8 +530,14 @@ def load_crystfel_geometry(filename):
         path = re.split('(/)', line_items[0])
         path = [item for item in path if item not in '/']
 
-        if len(path) < 2:
-            _parse_top_level(line_items[0], value, detector, beam, default_panel)
+        if len(s=path) < 2:
+            _parse_toplevel(
+                key=line_items[0],
+                value=value,
+                detector=detector,
+                beam=beam,
+                panel=default_panel
+            )
             continue
 
         curr_bad = None
@@ -441,7 +548,7 @@ def load_crystfel_geometry(filename):
             if path[0] in detector['bad']:
                 curr_bad = detector['bad'][path[0]]
             else:
-                curr_bad = deepcopy(default_bad_region)
+                curr_bad = copy.deepcopy(default_bad_region)
                 detector['bad'][path[0]] = curr_bad
 
         else:
@@ -449,13 +556,21 @@ def load_crystfel_geometry(filename):
             if path[0] in detector['panels']:
                 curr_panel = detector['panels'][path[0]]
             else:
-                curr_panel = deepcopy(default_panel)
+                curr_panel = copy.deepcopy(default_panel)
                 detector['panels'][path[0]] = curr_panel
 
         if curr_panel is not None:
-            _parse_field_for_panel(path[1], value, curr_panel)
+            _parse_field_for_panel(
+                key=path[1],
+                value=value,
+                panel=curr_panel
+            )
         else:
-            _parse_field_bad(path[1], value, curr_bad)
+            _parse_field_bad(
+                key=path[1],
+                value=value,
+                bad=curr_bad
+            )
 
     if not detector['panels']:
         raise RuntimeError("No panel descriptions in geometry file.")
@@ -473,7 +588,10 @@ def load_crystfel_geometry(filename):
             num_placeholders_in_panels = curr_num_placeholders
         else:
             if curr_num_placeholders != num_placeholders_in_panels:
-                raise RuntimeError('All panels\' data and mask entries must have the same number of placeholders.')
+                raise RuntimeError(
+                    'All panels\' data and mask entries must have the same '
+                    'number of placeholders.'
+                )
 
     num_placeholders_in_masks = None
 
@@ -488,17 +606,23 @@ def load_crystfel_geometry(filename):
             num_placeholders_in_masks = curr_num_placeholders
         else:
             if curr_num_placeholders != num_placeholders_in_masks:
-                raise RuntimeError('All panels\' data and mask entries must have the same number of placeholders.')
+                raise RuntimeError(
+                    'All panels\' data and mask entries must have the same '
+                    'number of placeholders.'
+                )
 
     if num_placeholders_in_masks > num_placeholders_in_panels:
-        raise RuntimeError('Number of placeholders in mask cannot be larget than for data.')
+        raise RuntimeError(
+            'Number of placeholders in mask cannot be larger the number '
+            'than for data.'
+        )
 
     dim_length = None
 
     for panel in detector['panels'].values():
 
         if panel['dim_structure'] is None:
-            panel['dim_structure'] = deepcopy(default_dim)
+            panel['dim_structure'] = copy.deepcopy(default_dim)
 
         found_ss = False
         found_fs = False
@@ -506,61 +630,98 @@ def load_crystfel_geometry(filename):
 
         for entry in panel['dim_structure']:
             if entry is None:
-                raise RuntimeError('Not all dim entries are defined for all panels.')
+                raise RuntimeError(
+                    'Not all dim entries are defined for all panels.'
+                )
             elif entry == 'ss':
                 if found_ss is True:
-                    raise RuntimeError('Only one slow scan dim coordinate is allowed.')
+                    raise RuntimeError(
+                        'Only one slow scan dim coordinate is allowed.'
+                    )
                 else:
                     found_ss = True
             elif entry == 'fs':
                 if found_fs is True:
-                    raise RuntimeError('Only one fast scan dim coordinate is allowed.')
+                    raise RuntimeError(
+                        'Only one fast scan dim coordinate is allowed.'
+                    )
                 else:
                     found_fs = True
             elif entry == '%':
                 if found_placeholder is True:
-                    raise RuntimeError('Only one placeholder dim coordinate is allowed.')
+                    raise RuntimeError(
+                        'Only one placeholder dim coordinate is allowed.'
+                    )
                 else:
                     found_placeholder = True
 
         if dim_length is None:
             dim_length = len(panel['dim_structure'])
         elif dim_length != len(panel['dim_structure']):
-            raise RuntimeError('Number of dim coordinates must be the same for all panels.')
+            raise RuntimeError(
+                'Number of dim coordinates must be the same for all panels.'
+            )
 
         if dim_length == 1:
-            raise RuntimeError('Number of dim coordinates must be at least two.')
+            raise RuntimeError(
+                'Number of dim coordinates must be at least two.'
+            )
 
     for panel in detector['panels'].values():
 
         if panel['origin_min_fs'] < 0:
-            raise RuntimeError('Please specify the minimum fs coordinate for panel {}.'.format(panel['name']))
+            raise RuntimeError(
+                'Please specify the minimum fs coordinate for '
+                'panel {}.'.format(panel['name'])
+            )
 
         if panel['origin_max_fs'] < 0:
-            raise RuntimeError('Please specify the maximum fs coordinate for panel {}.'.format(panel['name']))
+            raise RuntimeError(
+                'Please specify the maximum fs coordinate for '
+                'panel {}.'.format(panel['name'])
+            )
 
         if panel['origin_min_ss'] < 0:
-            raise RuntimeError('Please specify the minimum ss coordinate for panel {}.'.format(panel['name']))
+            raise RuntimeError(
+                'Please specify the minimum ss coordinate for '
+                'panel {}.'.format(panel['name'])
+            )
 
         if panel['origin_max_ss'] < 0:
-            raise RuntimeError('Please specify the maximum ss coordinate for panel {}.'.format(panel['name']))
+            raise RuntimeError(
+                'Please specify the maximum ss coordinate for '
+                'panel {}.'.format(panel['name'])
+            )
 
         if panel['cnx'] is None:
-            raise RuntimeError('Please specify the corner X coordinate for panel {}.'.format(panel['name']))
+            raise RuntimeError(
+                'Please specify the corner X coordinate for '
+                'panel {}.'.format(panel['name'])
+            )
 
         if panel['clen'] is None and panel['clen_from'] is None:
-            raise RuntimeError('Please specify the camera length for panel {}.'.format(panel['name']))
+            raise RuntimeError(
+                'Please specify the camera length for '
+                'panel {}.'.format(panel['name'])
+            )
 
         if panel['res'] < 0:
-            raise RuntimeError('Please specify the resolution or panel {}.'.format(panel['name']))
+            raise RuntimeError(
+                'Please specify the resolution or '
+                'panel {}.'.format(panel['name'])
+            )
 
         if panel['adu_per_eV'] is None and panel['adu_per_photon'] is None:
-            raise RuntimeError('Please specify either adu_per_eV or adu_per_photon for '
-                               'panel {}.'.format(panel['name']))
+            raise RuntimeError(
+                'Please specify either adu_per_eV or adu_per_photon '
+                'for panel {}.'.format(panel['name'])
+            )
 
         if panel['clen_for_centering'] is None and panel['rail_x'] is not None:
-            raise RuntimeError('You must specify clen_for_centering if you specify the rail direction '
-                               '(panel {})'.format(panel['name']))
+            raise RuntimeError(
+                'You must specify clen_for_centering if you specify the'
+                'rail direction (panel {})'.format(panel['name'])
+            )
 
         if panel['rail_x'] is None:
             panel['rail_x'] = 0.0
@@ -575,32 +736,41 @@ def load_crystfel_geometry(filename):
 
     for bad_region in detector['bad'].values():
         if bad_region['is_fsss'] == 99:
-            raise RuntimeError('Please specify the coordinate ranges for bad region {}.'.format(bad_region['name']))
+            raise RuntimeError(
+                'Please specify the coordinate ranges for bad '
+                'region {}.'.format(bad_region['name'])
+            )
 
     for group in detector['rigid_groups']:
         for name in detector['rigid_groups'][group]:
             if name not in detector['panels']:
-                raise RuntimeError('Cannot add panel to rigid_group. Panel not found: {}'.format(name))
+                raise RuntimeError(
+                    'Cannot add panel to rigid_group. Panel not'
+                    'found: {}'.format(name)
+                )
 
     for group_collection in detector['rigid_group_collections']:
         for name in detector['rigid_group_collections'][group_collection]:
             if name not in detector['rigid_groups']:
-                raise RuntimeError('Cannot add rigid_group to collection. Rigid group not found: {}'.format(name))
+                raise RuntimeError(
+                    'Cannot add rigid_group to collection. Rigid group '
+                    'not found: {}'.format(name)
+                )
 
     for panel in detector['panels'].values():
 
-        d = panel['fsx'] * panel['ssy'] - panel['ssx'] * panel['fsy']
+        d__ = panel['fsx'] * panel['ssy'] - panel['ssx'] * panel['fsy']
 
-        if d == 0.0:
+        if d__ == 0.0:
             raise RuntimeError('Panel {} transformation is singluar.')
 
-        panel['xfs'] = panel['ssy'] / d
-        panel['yfs'] = panel['ssx'] / d
-        panel['xss'] = panel['fsy'] / d
-        panel['yss'] = panel['fsx'] / d
+        panel['xfs'] = panel['ssy'] / d__
+        panel['yfs'] = panel['ssx'] / d__
+        panel['xss'] = panel['fsy'] / d__
+        panel['yss'] = panel['fsx'] / d__
 
     _find_min_max_d(detector)
-    fh.close()
+    fhandle.close()
 
     # The code of this function is synced with the code of the function
     # 'get_detector_geometry_2' in CrystFEL at commit 41a8fa9819010fe8ddeb66676fee717f5226c7b8
diff --git a/geometry_utils.py b/geometry_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..ea31b2cbb3ac10f84a7e1be07ca92b70a99c459d
--- /dev/null
+++ b/geometry_utils.py
@@ -0,0 +1,232 @@
+#    This file is part of cfelpyutils.
+#
+#    cfelpyutils is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License as published by
+#    the Free Software Foundation, either version 3 of the License, or
+#    (at your option) any later version.
+#
+#    cfelpyutils is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with cfelpyutils.  If not, see <http://www.gnu.org/licenses/>.
+"""
+Utilities for CrystFEL-style geometry files.
+
+This module contains utilities for the processing of CrystFEL-style geometry
+files.
+"""
+
+from __future__ import (absolute_import, division, print_function,
+                        unicode_literals)
+
+import collections
+
+import numpy
+
+
+PixelMaps = collections.namedtuple('PixelMaps', ['x', 'y', 'r'])
+ImageShape = collections.namedtuple('ImageShape', ['ss', 'fs'])
+
+
+def compute_pixel_maps(geometry):
+    """Create pixel maps from a CrystFEL geometry object.
+
+    Compute pixel maps from a CrystFEL-style geometry object (A dictionary
+    returned by the load_crystfel_geometry function from the crystfel_utils
+    module). The pixel maps can be used to create a representation of
+    the physical layout of the geometry, keeping the origin of the reference
+    system at the beam interaction point.
+
+    Args:
+
+        geometry (dict): A CrystFEL geometry object (A dictionary returned by
+            the load_crystfel_geometry function from the crystfel_utils
+            module).
+
+    Returns:
+
+        tuple: a tuple containing three float32 numpy arrays ('slab'-like pixel
+            maps) with respectively x, y coordinates of the data pixels and
+            distance of each pixel from the center of the reference system.
+    """
+
+    # Determine the max fs and ss in the geometry object.
+    max_slab_fs = numpy.array(
+        [geometry['panels'][k]['max_fs'] for k in geometry['panels']]
+    ).max()
+
+    max_slab_ss = numpy.array(
+        [geometry['panels'][k]['max_ss'] for k in geometry['panels']]
+    ).max()
+
+    # Create the empty arrays that will contain the pixel maps.
+    x_map = numpy.zeros(
+        shape=(max_slab_ss + 1, max_slab_fs + 1),
+        dtype=numpy.float32
+    )
+    y_map = numpy.zeros(
+        shape=(max_slab_ss + 1, max_slab_fs + 1),
+        dtype=numpy.float32
+    )
+
+    # Iterate over the panels.
+    for pan in geometry['panels']:
+
+        # Determine the pixel coordinates for the current panel.
+        i, j = numpy.meshgrid(
+            numpy.arange(
+                geometry['panels'][pan]['max_ss'] -
+                geometry['panels'][pan]['min_ss'] +
+                1
+            ),
+            numpy.arange(
+                geometry['panels'][pan]['max_fs'] -
+                geometry['panels'][pan]['min_fs'] +
+                1
+            ),
+            indexing='ij'
+        )
+
+        # Compute the x,y vectors, using complex notation.
+        d_x = (
+            geometry['panels'][pan]['fsy'] +
+            1J * geometry['panels'][pan]['fsx']
+        )
+        d_y = (
+            geometry['panels'][pan]['ssy'] +
+            1J * geometry['panels'][pan]['ssx']
+        )
+        r_0 = (
+            geometry['panels'][pan]['cny'] +
+            1J * geometry['panels'][pan]['cnx']
+        )
+
+        cmplx = i * d_y + j * d_x + r_0
+
+        # Fill maps that will be returned with the computed
+        # values (x and y maps).
+        y_map[
+            geometry['panels'][pan]['min_ss']:
+            geometry['panels'][pan]['max_ss'] + 1,
+            geometry['panels'][pan]['min_fs']:
+            geometry['panels'][pan]['max_fs'] + 1
+        ] = cmplx.real
+
+        x_map[
+            geometry['panels'][pan]['min_ss']:
+            geometry['panels'][pan]['max_ss'] + 1,
+            geometry['panels'][pan]['min_fs']:
+            geometry['panels'][pan]['max_fs'] + 1
+        ] = cmplx.imag
+
+    # Compute the values for the radius pixel maps that will be returned.
+    r_map = numpy.sqrt(numpy.square(x_map) + numpy.square(y_map))
+
+    # Return the pixel maps as a tuple.
+    return PixelMaps(x_map, y_map, r_map)
+
+
+def apply_pixel_maps(data_as_slab, pixel_maps, output_array=None):
+    """Apply geometry in pixel map format to the input data.
+
+    Applies geometry, described by pixel maps, to input data in 'slab' format.
+    Turns a 2d array of pixel values into an array containing a representation
+    of the physical layout of the geometry, keeping the origin of the reference
+    system at the beam interaction point.
+
+    Args:
+
+        data_as_slab (ndarray): the pixel values on which to apply the
+            geometry, in 'slab' format.
+
+        pixel_maps (tuple): pixel maps, as returned by the
+            compute_pixel_maps function in this module.
+
+        output_array (Optional[numpy.ndarray]): array to hold the output.
+            If the array is not provided, one will be generated automatically.
+            Defaults to None (No array provided).
+
+    Returns:
+
+        ndarray: Array with the same dtype as the input data containing a
+            representation of the physical layout of the geometry (i.e.: the
+            geometry information applied to the input data).
+    """
+
+    # If no array was provided, generate one.
+    if output_array is None:
+        output_array = numpy.zeros(
+            shape=data_as_slab.shape,
+            dtype=data_as_slab.dtype
+        )
+
+    # Apply the pixel map geometry to the data.
+    output_array[pixel_maps.y, pixel_maps.x] = data_as_slab.ravel()
+
+    # Return the output array.
+    return output_array
+
+
+def compute_minimum_image_size(pixel_maps):
+    """
+    Compute the minimum size of an image that can represent the geometry.
+
+    Compute the minimum size of an image that can contain a
+    representation of the geometry described by the pixel maps, assuming
+    that the image is center at the center of the reference system.
+
+    Args:
+
+        pixel_maps (tuple): pixel maps, as returned by the
+        compute_pixel_maps function in this module.
+
+    Returns:
+
+        tuple: numpy shape object describing the minimum image size.
+    """
+
+    # Recover the x and y pixel maps.
+    x_map, y_map = pixel_maps.x, pixel_maps.x.y
+
+    # Find the largest absolute values of x and y in the maps.
+    y_largest = 2 * int(max(abs(y_map.max()), abs(y_map.min()))) + 2
+    x_largest = 2 * int(max(abs(x_map.max()), abs(x_map.min()))) + 2
+
+    # Return a tuple with the computed shape.
+    return ImageShape(y_largest, x_largest)
+
+
+def adjust_pixel_maps_for_pyqtgraph(pixel_maps):
+    """
+    Adjust pixel maps for visualization of the data in a pyqtgraph widget.
+
+    Adjust the pixel maps for use in a Pyqtgraph's ImageView widget.
+    Essentially, the origin of the reference system is moved to the
+    top-left of the image.
+
+    Args:
+
+        pixel_maps (tuple): pixel maps, as returned by the
+            compute_pixel_maps function in this module.
+
+    Returns:
+
+        tuple: a three-element tuple containing two float32 numpy arrays
+            ('slab'-like pixel maps) with respectively x, y coordinates of the
+            data pixels as the first two elements, and None as the third.
+    """
+
+    # Compute the minimum image shape needed to represent the coordinates
+    min_ss, min_fs = compute_minimum_image_size(pixel_maps)
+
+    # convert y x values to i j values
+    i = numpy.array(pixel_maps.y, dtype=numpy.int) + min_ss // 2 - 1
+    j = numpy.array(pixel_maps.x, dtype=numpy.int) + min_fs // 2 - 1
+
+    y_map = i.flatten()
+    x_map = j.flatten()
+
+    return PixelMaps(x_map, y_map, None)
diff --git a/cfel_hdf5.py b/hdf5_utils.py
similarity index 73%
rename from cfel_hdf5.py
rename to hdf5_utils.py
index 5daeb79811ff4e1ec76d033d62072c8154577488..eff5e1708ce389b178d6dd5de3719fedced33bf8 100644
--- a/cfel_hdf5.py
+++ b/hdf5_utils.py
@@ -27,24 +27,33 @@ import numpy
 
 
 def load_nparray_from_hdf5_file(data_filename, data_group):
-    """Loads a numpy.ndarray from an HDF5 file.
+    """Load a numpy.ndarray from an HDF5 file.
 
     Args:
 
        data_filename (str): filename of the file to read.
 
-       data_group (str): internal HDF5 path of the data block to read.
+       data_group (str): internal HDF5 path of the data block containing
+       the array to load.
 
     Returns:
 
-       nparray (numpy.ndarray): numpy array with the data read from the file.
+       ndarray: numpy array with the data read from the file.
     """
     try:
 
+        # Open the h5py file, and try to read the data.
         with h5py.File(data_filename, 'r') as hdfile:
             nparray = numpy.array(hdfile[data_group])
             hdfile.close()
     except:
-        raise RuntimeError('Error reading file {0} (data block: {1}).'.format(data_filename, data_group))
+
+        # Raise an exception in case of error.
+        raise RuntimeError(
+            'Error reading file {0} (data block: {1}).'.format(
+                data_filename,
+                data_group
+            )
+        )
     else:
         return nparray
diff --git a/parameter_utils.py b/parameter_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..b7d6d7a4968470dedf378d83ec50b1f3e304a12a
--- /dev/null
+++ b/parameter_utils.py
@@ -0,0 +1,179 @@
+#    This file is part of cfelpyutils.
+#
+#    cfelpyutils is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License as published by
+#    the Free Software Foundation, either version 3 of the License, or
+#    (at your option) any later version.
+#
+#    cfelpyutils is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with cfelpyutils.  If not, see <http://www.gnu.org/licenses/>.
+"""
+Utilities for parsing command line options and configuration files.
+
+This module contains utilities for parsing of command line options and
+configuration files.
+"""
+
+from __future__ import (absolute_import, division, print_function,
+                        unicode_literals)
+
+import ast
+
+
+def _parsing_error(section, option):
+    # Raise an exception after a parsing error.
+
+    raise RuntimeError(
+        'Error parsing parameter {0} in section [{1}]. Make sure that the '
+        'syntax is correct: list elements must be separated by commas and '
+        'dict entries must contain the colon symbol. Strings must be quoted, '
+        'even in lists and dicts.'.format(
+            option,
+            section
+        )
+    )
+
+
+def convert_parameters(config_dict):
+    """Convert strings in parameter dictionaries to the corrent data type.
+
+    Read a parameter dictionary returned by the ConfigParser python module,
+    and assign correct types to the parameters, without changing the structure
+    of the dictionary.
+
+    Try to interpret each entry in the dictionary according to the following
+    rules. The first rule that returns an valid result determines the type in
+    which the entry will be converted.
+
+    - If the entry starts and ends with a single quote or double quote, it is
+      interpreted as a string.
+    - If the entry starts and ends with a square bracket, it is interpreted as
+      a list.
+    - If the entry starts and ends with a brace, it is interpreted as a
+      dictionary.
+    - If the entry is the word None, without quotes, then the entry is
+      interpreted as NoneType.
+    - If the entry is the word False, without quotes, then the entry is
+      interpreted as a boolean False.
+    - If the entry is the word True, without quotes, then the entry is
+      interpreted as a boolean True.
+    - If none of the previous options match the content of the entry,
+      the parser tries to interpret the entry in order as:
+
+        - An integer number.
+        - A float number.
+        - A string.
+
+    Args:
+
+        config (dict): a dictionary containing the parameters as strings
+            (the dictionary as returned by COnfig Parser).
+
+    Returns:
+
+        dict: dictionary with the same structure as the input
+            dictionary, but with correct types assigned to each entry.
+    """
+
+    # Create the dictionary that will be returned.
+    monitor_params = {}
+
+    # Iterate over the first level of the configuration dictionary.
+    for section in config_dict.keys():
+
+        # Add the section to the dictionary that will be returned.
+        monitor_params[section] = {}
+
+        # Iterate over the content of each section (the second level in the
+        # configuration dictonary).
+        for option in config_dict['section'].keys():
+
+            # Get the option from the dictionary (None is returned if the
+            # dictionary does not contain the option).
+            recovered_option = config_dict['section'].get(option)
+
+            # Check if the option is a string delimited by single quotes.
+            if (
+                    recovered_option.startswith("'") and
+                    recovered_option.endswith("'")
+            ):
+                monitor_params[section][option] = recovered_option[1:-1]
+                continue
+
+            # Check if the option is a string delimited by double quotes.
+            if (
+                    recovered_option.startswith('"') and
+                    recovered_option.endswith('"')
+            ):
+                monitor_params[section][option] = recovered_option[1:-1]
+                continue
+
+            # Check if the option is a list. If it is, interpret it using the
+            # literal_eval function.
+            if (
+                    recovered_option.startswith("[") and
+                    recovered_option.endswith("]")
+            ):
+                try:
+                    monitor_params[section][option] = ast.literal_eval(
+                        recovered_option
+                    )
+                    continue
+                except (SyntaxError, ValueError):
+                    _parsing_error(section, option)
+
+            # Check if the option is a dictionary or a set. If it is,
+            # interpret it using the literal_eval function.
+            if (
+                    recovered_option.startswith("{") and
+                    recovered_option.endswith("}")
+            ):
+                try:
+                    monitor_params[section][option] = ast.literal_eval(
+                        recovered_option
+                    )
+                    continue
+                except (SyntaxError, ValueError):
+                    _parsing_error(section, option)
+
+            # Check if the option is the special string 'None' (without
+            # quotes).
+            if recovered_option == 'None':
+                monitor_params[section][option] = None
+                continue
+
+            # Check if the option is the special string 'False' (without
+            # quotes).
+            if recovered_option == 'False':
+                monitor_params[section][option] = False
+                continue
+
+            # Check if the option is the special string 'True' (without
+            # quotes).
+            if recovered_option == 'True':
+                monitor_params[section][option] = True
+                continue
+
+            # Check if the option is an int by trying to convert it to an int.
+            try:
+                monitor_params[section][option] = int(recovered_option)
+                continue
+            except ValueError:
+                # If the conversion to int falied, try to conver it to a float.
+                try:
+                    monitor_params[section][option] = float(
+                        recovered_option
+                    )
+                    continue
+                except ValueError:
+                    # If the conversion to float also failed, return a parsing
+                    # error.
+                    _parsing_error(section, option)
+
+    # Returned the converted dictionary.
+    return monitor_params
diff --git a/psana_utils.py b/psana_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..b963b2317d9024e539d5ec23d41f4b8900000562
--- /dev/null
+++ b/psana_utils.py
@@ -0,0 +1,88 @@
+#    This file is part of cfelpyutils.
+#
+#    cfelpyutils is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License as published by
+#    the Free Software Foundation, either version 3 of the License, or
+#    (at your option) any later version.
+#
+#    cfelpyutils is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with cfelpyutils.  If not, see <http://www.gnu.org/licenses/>.
+"""
+Utilities for the psana python module.
+
+This module provides utilities that build on the functionality provided by the
+psana python module (developed at the SLAC National Laboratories).
+"""
+
+from __future__ import (absolute_import, division, print_function,
+                        unicode_literals)
+
+
+def first_event_inspection(source):
+    """Inspect the content of the first psana event.
+
+    Takes  psana source string (e.g. exp=CXI/cxix....) and inspect the
+    content in the first event in the data described by the string.
+    Print information about the the content of the event.
+
+    Args:
+
+        source (str): a psana source string (e.g. exp=CXI/cxix....).
+    """
+
+    # Import the psana module.
+    import psana
+
+    # Print the name of the source.
+    print('\n\n')
+    print('data source :{}'.format(source))
+
+    print('\n')
+    print('Opening dataset...')
+
+    # Open the psana data source.
+    dsource = psana.DataSource(source)
+
+    # Print the content of the Epics portion of the data.
+
+    # First the Epics names.
+    print('\n')
+    print('Epics pv Names:')
+    print(dsource.env().epicsStore().pvNames())
+
+    # Then the Epics aliases.
+    print('\n')
+    print('Epics aliases (the not so confusing ones):')
+    print(dsource.env().epicsStore().aliases())
+
+    # Move to the first event in the data.
+    print('\n')
+    print('Event structure:')
+    itr = dsource.events()
+    evt = itr.next()
+
+    # Print the content of the first event.
+    for k in evt.keys():
+        print(
+            'Type: {0}   Source: {1}   Alias: {2}'
+            'Key: {3}'.format(
+                k.type(),
+                k.src(),
+                k.alias(),
+                k.key()
+            )
+        )
+
+        print('')
+
+    for k in evt.keys():
+        print(k)
+
+    # Extract and print the photon energy.
+    beam = evt.get(psana.Bld.BldDataEBeamV7, psana.Source('BldInfo(EBeam)'))
+    print('Photon energy: %f' % beam.ebeamPhotonEnergy())