Newer
Older
"""Write EXDF files based on EXtra-data SourceData objects.
Building on the EXDF DataFile API, SourceDataWriter builds
EXDf-structured HDF5 files based on a collection of EXtra-data
SourceData objects. It bypasses the DataCollection interface used in
extra_data.write to allow for more flexibility with data selection and
stricter validation when combining data from different locations.
"""
from functools import reduce
from itertools import accumulate
from logging import getLogger
from operator import or_
from os.path import basename
from time import perf_counter
import numpy as np
from extra_data import FileAccess
from .datafile import DataFile, get_pulse_offsets
log = getLogger('exdf.write.SourceDataWriter')
class SourceDataWriter:
def get_data_format_version(self):
"""Determine the data format version."""
return '1.3'
def with_origin(self):
"""Determine whether to write INDEX/origin data."""
return True
def with_attrs(self):
"""Determine whether to write key attributes."""
return True
def create_instrument_key(self, source, key, orig_dset, kwargs):
"""Determine creation arguments for INSTRUMENT key.
Args:
source (str): Source name.
key (str): Key name.
orig_dset (h5py.Dataset): Original dataset sample.
kwargs (dict of Any): Keyword arguments passed to
h5py.Group.create_dataset.
(dict of Any): Chunk size to use for output.
return kwargs
def mask_instrument_data(self, source, index_group, train_ids, counts):
"""Mask INSTRUMENT data.
Each mask array must have the same length as the original data
counts.
Args:
source (str): Source name.
index_group (str): Index group.
train_ids (ndarray): Train IDs.
counts (ndarray): Data counts per train.
Returns:
(Iterable of ndarray): Boolean masks for each passed
train ID and equal in length to respective data counts,
or None to perform no masking.
"""
return
def copy_instrument_data(self, source, key, dest, train_ids, data):
"""Copy INSTRUMENT data from input to output.
The destination dataset is guaranteed to align with the shape of
train_ids and data.
Args:
source (str): Source name.
key (str): Key name.
dset (h5py.Dataset): Destination dataset.
train_ids (ndarray): Train ID coordinates.
data (ndarray): Source data.
Returns:
None
"""
dest[:] = data
def write_sequence(self, output_path, sources, sequence=0):
"""Write iterable of SourceData to file.
Any train or key selection of each SourceData file is taken into
account.
Args:
output_path (Path, str): Path to write to.
sources (Iterable of extra_data.SourceData): Data sources.
sequence (int, optional): Sequence number, 0 by default.
Returns:
None
"""
with DataFile(output_path, 'w', driver='core') as f:
after_open = perf_counter()
self.write_base(f, sources, sequence)
self.write_control(
f, [sd for sd in sources if sd.is_control])
self.write_instrument(
f, [sd for sd in sources if sd.is_instrument])
after_instrument = perf_counter()
after_close = perf_counter()
log.debug('Sequence {} written (total={:.3g}, open={:.3g}, '
'base={:.3g}, control={:.3g}, instrument={:.3g}, '
'close={:.3g})'.format(
basename(output_path),
after_close - start, after_open - start,
after_base - after_open, after_control - after_base,
after_instrument - after_control, after_close - after_instrument))
def write_base(self, f, sources, sequence):
"""Write METADATA, INDEX and source groups.
Args:
f (exdf.DataFile): Output file.
sources (Iterable of extra_data.SourceData): Data sources.
sequence (int, optional): Sequence number, 0 by default.
Returns:
None
"""
train_ids, *index_dsets = get_index_root_data(sources)
control_indices, instrument_indices = build_sources_index(sources)
f.create_metadata(
like=sources[0],
sequence=sequence,
data_format_version=self.get_data_format_version(),
control_sources=control_indices.keys(),
instrument_channels=[
f'{source}/{index_group}'
for source, index_group_counts in instrument_indices.items()
for index_group in index_group_counts.keys()])
f.create_dataset('METADATA/dataWriter', data=b'exdf-tools', shape=(1,))
if not self.with_origin():
index_dsets = (*index_dsets[:-1], None)
f.create_index(train_ids, *index_dsets)
for source, counts in control_indices.items():
control_src = f.create_control_source(source)
control_src.create_index(len(train_ids), per_train=True)
for source, index_group_counts in instrument_indices.items():
# May be overwritten later as a result of masking.
instrument_src = f.create_instrument_source(source)
instrument_src.create_index(**index_group_counts)
def write_control(self, f, sources):
"""Write CONTROL and RUN data.
This method assumes the source datasets already exist.
Args:
f (exdf.DataFile): Output file.
sources (Iterable of extra_data.SourceData): Data sources,
should not contain instrument sources.
Returns:
None
"""
for sd in sources:
h5source = f.source[sd.source]
attrs = get_key_attributes(sd) if self.with_attrs() else {}
run_data_leafs = {}
for path, value in sd.run_values().items():
key = path[:path.rfind('.')]
leaf = path[path.rfind('.')+1:]
run_data_leafs.setdefault(key, {})[leaf] = value
# Write CONTROL keys and their RUN keys.
for key in sd.keys(False):
run_entry = run_data_leafs.pop(key, False)
if run_entry:
run_entry = (run_entry['value'], run_entry['timestamp'])
ctrl_values = sd[f'{key}.value'].ndarray()
ctrl_timestamps = sd[f'{key}.timestamp'].ndarray()
key, values=ctrl_values, timestamps=ctrl_timestamps,
run_entry=run_entry, attrs=attrs.pop(key, None))
# Write remaining RUN-only keys.
for key, leafs in run_data_leafs.items():
h5source.create_run_key(
key, **leafs, attrs=attrs.pop(key, None))
# Fill in the missing attributes for nodes.
for path, attrs in attrs.items():
h5source.run_key[path].attrs.update(attrs)
h5source.key[path].attrs.update(attrs)
def write_instrument(self, f, sources):
"""Write INSTRUMENT data.
This method assumes the INDEX and source datasets already exist.
Args:
f (exdf.DataFile): Output file.
sources (Iterable of extra_data.SourceData): Data sources,
should not contain control sources.
Returns:
None
"""
file_base = basename(f.filename)
# Must be re-read at this point, as additional trains could have
# been introduced in this sequence.
train_ids = np.array(f['INDEX/trainId'])
masks = {}
for sd in sources:
attrs = get_key_attributes(sd) if self.with_attrs() else {}
h5source = f.source[sd.source]
keys = sd.keys()
for index_group in sd.index_groups:
# Must be re-read same as train IDs.
h5index = f[f'INDEX/{sd.source}/{index_group}']
counts = np.array(h5index['count'])
# Obtain mask for this index group.
masks_by_train = self.mask_instrument_data(
sd.source, index_group, train_ids, counts)
if masks_by_train is not None:
masks[index_group] = mask_index(
h5index, counts, masks_by_train)
num_entries = masks[index_group].sum()
else:
num_entries = counts.sum()
for key in iter_index_group_keys(keys, index_group):
kd = sd[key]
shape = (num_entries, *kd.entry_shape)
orig_dset = kd.files[0].file[kd.hdf5_data_path]
kwargs = {
'shape': shape, 'maxshape': (None,) + shape[1:],
'chunks': orig_dset.chunks, 'dtype': kd.dtype,
'attrs': attrs.pop(key, None)}
h5source.create_key(key, **self.create_instrument_key(
sd.source, key, orig_dset, kwargs))
# Update tableSize to the correct number of records.
h5source[index_group].attrs['tableSize'] = num_entries
for path, attrs in attrs.items():
h5source.key[path].attrs.update(attrs)
# Copy INSTRUMENT data.
for sd in sources:
h5source = f.source[sd.source]
start_source = perf_counter()
for index_group in sd.index_groups:
mask = masks.get(index_group, np.s_[:])
for key in iter_index_group_keys(keys, index_group):
# TODO: Copy by chunk / file if too large
start_key = perf_counter()
full_data = sd[key].ndarray()
after_read = perf_counter()
masked_data = full_data[mask]
after_mask = perf_counter()
self.copy_instrument_data(
sd.source, key, h5source.key[key],
sd[key].train_id_coordinates()[mask],
masked_data)
after_copy = perf_counter()
log.debug('INSTRUMENT/{}/{} written to {} (total={:.3g}, '
'read={:.3g} @ {:.3g}M, mask={:.3g}, '
'write={:.3g} @ {:.3g}M)'.format(
sd.source, key, file_base,
after_copy - start_key, after_read - start_key,
full_data.nbytes / 2**20 / (after_read - start_key),
after_mask - after_read, after_copy - after_mask,
masked_data.nbytes / 2**20 / (after_copy - after_mask)))
after_source = perf_counter()
log.debug('INSTRUMENT/{} written to {} (total={:.3g})'.format(
sd.source, file_base, after_source - start_source))
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
def get_index_root_data(sources):
"""Get train ID, timestamp, flag and origin data."""
# Collect train IDs for this sequence.
train_ids = np.zeros(0, dtype=np.uint64)
for sd in sources:
train_ids = np.union1d(train_ids, sd.train_ids)
# Collect input files by index keys (source / index_group).
files_by_index_keys = {}
for sd in sources:
for key in sd.keys():
kd = sd[key]
index_key = f'{sd.source}/{kd.index_group}'
if index_key not in files_by_index_keys:
files_by_index_keys[index_key] = set(kd.source_file_paths)
inp_files = reduce(or_, files_by_index_keys.values(), set())
# {trainId: ({item: timestamp}, {item: flag}, {item: origin})
root_data = {}
for inp_file in inp_files:
fa = FileAccess(inp_file)
sel_trains, sel_rows, _ = np.intersect1d(fa.train_ids, train_ids,
return_indices=True)
if 'INDEX/timestamp' in fa.file:
sel_timestamps = np.array(fa.file['INDEX/timestamp'][sel_rows])
else:
sel_timestamps = np.zeros_like(sel_trains, dtype=np.uint64)
# Missing INDEX/flag dataset handled on the EXtra-data side.
sel_flag = fa.validity_flag[sel_rows]
if 'INDEX/origin' in fa.file:
sel_origin = np.array(fa.file['INDEX/origin'][sel_rows])
else:
sel_origin = np.ones_like(sel_trains, dtype=np.int32)
sel_origin[sel_flag] = 0
item = (fa.data_category, fa.aggregator)
# Collect timestamp, flag and origin as a function of train ID
# and item while ensuring there is no mismatch for the same
# train ID and item.
for train_id, timestamp, flag, origin in zip(
sel_trains, sel_timestamps, sel_flag, sel_origin
):
new_entry = (timestamp, flag, origin)
old_entry = root_data \
.setdefault(train_id, dict()) \
.setdefault(item, new_entry)
if old_entry != new_entry:
raise ValueError(f'INDEX root data mismatch for item {item}: '
f'{new_entry} (from train {train_id}) vs '
f'{old_entry}')
if not np.array_equal(sorted(root_data.keys()), train_ids):
# This is not supposed to happen.
raise ValueError('input files missing index data for selected trains')
timestamps = []
flags = []
origins = []
# Decide which value to choose for each train.
for train_id, entries in root_data.items():
values, counts = np.unique(
np.fromiter(entries.values(), dtype=object, count=len(entries)),
return_counts=True)
if len(values) > 1:
values_str = ', '.join([f'{c}x {v}' for v, c
in zip(values, counts)])
log.warn(f'INDEX root data mismatch across items on train '
f'{train_id}: {values_str}')
entry = values[counts.argmax()]
timestamps.append(entry[0])
flags.append(entry[1])
origins.append(entry[2])
return train_ids, np.array(timestamps), np.array(flags), np.array(origins)
def build_sources_index(sources):
"""Build data count and offsets."""
# Build sources and indices.
control_indices = {}
instrument_indices = {}
for sd in sources:
if sd.is_control:
control_indices[sd.source] = sd.data_counts(labelled=False)
else:
instrument_indices[sd.source] = {
grp: sd.data_counts(labelled=False, index_group=grp)
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
return control_indices, instrument_indices
def get_key_attributes(sd):
source_attrs = dict()
def build_path(a, b):
return f'{a}.{b}'
for key in sd.keys(inc_timestamps=False):
paths = accumulate(key.split('.'), func=build_path)
for path in paths:
if path in source_attrs:
# Skip this path, already parent of some other key.
continue
for source_file in sd[key].source_file_paths:
fa = FileAccess(source_file)
hdf_path = f'{sd.section}/{sd.source}/{path.replace(".", "/")}'
path_attrs = dict(fa.file[hdf_path].attrs)
existing_attrs = source_attrs.setdefault(path, path_attrs)
if existing_attrs is not path_attrs:
same_keys = existing_attrs.keys() == path_attrs.keys()
same_values = all([
(
np.array_equal(v1, v2)
if isinstance(v1, np.ndarray)
else v1 == v2
)
Philipp Schmidt
committed
for (k, v1), v2
in zip(existing_attrs.items(), path_attrs.values())
if k != 'tableSize'])
if not same_keys or not same_values:
log.debug(f'Attributes for {sd.source}.{path} in '
f'{source_file} are {path_attrs}, but got '
f'{source_attrs[path]} from previous file')
raise ValueError(f'attribute mismatch on '
f'{sd.source}.{path}')
return source_attrs
def iter_index_group_keys(keys, index_group):
for key in keys:
if key[:key.index('.')] == index_group:
yield key
def mask_index(g, counts, masks_by_train):
full_mask = np.concatenate(masks_by_train)
num_entries = counts.sum()
assert len(full_mask) == num_entries, \
'incompatible INSTRUMENT mask shape'
# Modify INDEX entry if necessary.
if full_mask.sum() != num_entries:
g.create_dataset(
'original/first', data=get_pulse_offsets(counts))
'original/count', data=counts)
data=np.concatenate([np.flatnonzero(mask)
for mask in masks_by_train]))
# Compute new data counts.
counts = [mask.sum() for mask in masks_by_train]
g['first'][:] = get_pulse_offsets(counts)
g['count'][:] = counts
return full_mask