Skip to content

Commit cf4b6be

Browse files
committed
Merge sync_timestamps into master with changes made for v1.15.1
A merge was necessary because the histories were misaligned.
2 parents 2378141 + c8fef0e commit cf4b6be

8 files changed

Lines changed: 191 additions & 91 deletions

File tree

.gitignore

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -102,3 +102,6 @@ venv.bak/
102102

103103
# mypy
104104
.mypy_cache/
105+
106+
# PyCharm
107+
.idea

README.md

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,10 @@ Python library for importing Extensible Data Format (XDF)
33

44
Python importer for [xdf](https://github.com/sccn/xdf).
55

6+
Install with `pip install pyxdf`.
7+
8+
For the latest version, use `pip install git+https://github.com/xdf-modules/xdf-Python.git`.
9+
610
## For maintainers
711

812
1. For pypi

azure-pipelines.yml

Lines changed: 54 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,54 @@
1+
# Run tests, build wheel, and if this is a tagged commit
2+
# beginning with v then deploy to pypi (pypi test for now).
3+
4+
trigger:
5+
tags:
6+
include:
7+
- v*
8+
branches:
9+
include:
10+
- master
11+
12+
pool:
13+
vmImage: 'Ubuntu-16.04'
14+
strategy:
15+
matrix:
16+
#Python27:
17+
# python.version: '2.7'
18+
#Python35:
19+
# python.version: '3.5'
20+
Python36:
21+
python.version: '3.6'
22+
#Python37:
23+
# python.version: '3.7'
24+
25+
steps:
26+
- task: UsePythonVersion@0
27+
inputs:
28+
versionSpec: '$(python.version)'
29+
displayName: 'Use Python $(python.version)'
30+
31+
- script: |
32+
python -m pip install --upgrade pip wheel twine setuptools_scm
33+
pip install numpy
34+
displayName: 'Install Dependencies'
35+
36+
- script: |
37+
pip install pytest pytest-azurepipelines
38+
pytest
39+
displayName: 'pytest'
40+
41+
- script: |
42+
python setup.py sdist bdist_wheel
43+
displayName: 'Build Wheel'
44+
45+
- task: TwineAuthenticate@0
46+
condition: and(succeeded(), contains(variables['Build.SourceBranch'], 'refs/tags/'))
47+
inputs:
48+
externalSources: '5ba26794-ed5b-43b0-8414-7bb1416ec0ad' # -r pypi
49+
# externalSources: '025cb3c4-642b-4fac-829c-fe8634f1a504' # -r testpypi
50+
51+
- script: 'python -m twine upload -r pypi --config-file $(PYPIRC_PATH) --skip-existing dist/*'
52+
condition: and(succeeded(), contains(variables['Build.SourceBranch'], 'refs/tags/'))
53+
displayName: 'Deploy to PyPI'
54+

pyxdf/__init__.py

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1,8 @@
1-
from .pyxdf import load_xdf
1+
from pkg_resources import get_distribution, DistributionNotFound
2+
try:
3+
__version__ = get_distribution(__name__).version
4+
except DistributionNotFound:
5+
# package is not installed
6+
__version__ = None
7+
from .pyxdf import load_xdf
8+
Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,14 @@
11
import os
22
import logging
33
import pyxdf
4+
import sys
45

56

67
logging.basicConfig(level=logging.DEBUG) # Use logging.INFO to reduce output.
7-
fname = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'xdf_sample.xdf'))
8+
if len(sys.argv) > 1:
9+
fname = sys.argv[1]
10+
else:
11+
fname = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'xdf_sample.xdf'))
812
streams, fileheader = pyxdf.load_xdf(fname)
913

1014
print("Found {} streams:".format(len(streams)))

pyxdf/pyxdf.py

Lines changed: 100 additions & 83 deletions
Original file line numberDiff line numberDiff line change
@@ -12,15 +12,55 @@
1212
import xml.etree.ElementTree as ET
1313
from collections import OrderedDict, defaultdict
1414
import logging
15+
from pathlib import Path
1516

1617
import numpy as np
1718

1819
__all__ = ['load_xdf']
19-
__version__ = '1.14.0'
2020

2121
logger = logging.getLogger(__name__)
2222

2323

24+
class StreamData:
25+
"""Temporary per-stream data."""
26+
27+
def __init__(self, xml):
28+
"""Init a new StreamData object from a stream header."""
29+
fmts = dict([
30+
('double64', np.float64),
31+
('float32', np.float32),
32+
('string', np.object),
33+
('int32', np.int32),
34+
('int16', np.int16),
35+
('int8', np.int8),
36+
('int64', np.int64)
37+
])
38+
# number of channels
39+
self.nchns = int(xml['info']['channel_count'][0])
40+
# nominal sampling rate in Hz
41+
self.srate = round(float(xml['info']['nominal_srate'][0]))
42+
# format string (int8, int16, int32, float32, double64, string)
43+
self.fmt = xml['info']['channel_format'][0]
44+
# list of time-stamp chunks (each an ndarray, in seconds)
45+
self.time_stamps = []
46+
# list of time-series chunks (each an ndarray or list of lists)
47+
self.time_series = []
48+
# list of clock offset measurement times (in seconds)
49+
self.clock_times = []
50+
# list of clock offset measurement values (in seconds)
51+
self.clock_values = []
52+
# last observed time stamp, for delta decompression
53+
self.last_timestamp = 0.0
54+
# nominal sampling interval, in seconds, for delta decompression
55+
self.tdiff = 1.0 / self.srate if self.srate > 0 else 0.0
56+
self.effective_srate = 0.0
57+
# pre-calc some parsing parameters for efficiency
58+
if self.fmt != 'string':
59+
self.dtype = np.dtype(fmts[self.fmt])
60+
# number of bytes to read from stream to handle one sample
61+
self.samplebytes = self.nchns * self.dtype.itemsize
62+
63+
2464
def load_xdf(filename,
2565
on_chunk=None,
2666
synchronize_clocks=True,
@@ -189,39 +229,6 @@ def load_xdf(filename,
189229
190230
"""
191231

192-
class StreamData:
193-
"""Temporary per-stream data."""
194-
def __init__(self, xml):
195-
"""Init a new StreamData object from a stream header."""
196-
fmt2char = {'int8': 'b', 'int16': 'h', 'int32': 'i', 'int64': 'q',
197-
'float32': 'f', 'double64': 'd'}
198-
fmt2nbytes = {'int8': 1, 'int16': 2, 'int32': 4, 'int64': 8,
199-
'float32': 4, 'double64': 8}
200-
# number of channels
201-
self.nchns = int(xml['info']['channel_count'][0])
202-
# nominal sampling rate in Hz
203-
self.srate = round(float(xml['info']['nominal_srate'][0]))
204-
# format string (int8, int16, int32, float32, double64, string)
205-
self.fmt = xml['info']['channel_format'][0]
206-
# list of time-stamp chunks (each an ndarray, in seconds)
207-
self.time_stamps = []
208-
# list of time-series chunks (each an ndarray or list of lists)
209-
self.time_series = []
210-
# list of clock offset measurement times (in seconds)
211-
self.clock_times = []
212-
# list of clock offset measurement values (in seconds)
213-
self.clock_values = []
214-
# last observed time stamp, for delta decompression
215-
self.last_timestamp = 0.0
216-
# nominal sampling interval, in seconds, for delta decompression
217-
self.tdiff = 1.0 / self.srate if self.srate > 0 else 0.0
218-
# pre-calc some parsing parameters for efficiency
219-
if self.fmt != 'string':
220-
# number of bytes to read from stream to handle one sample
221-
self.samplebytes = self.nchns * fmt2nbytes[self.fmt]
222-
# format string to pass to struct.unpack() to handle one sample
223-
self.structfmt = '<%s%s' % (self.nchns, fmt2char[self.fmt])
224-
225232
logger.info('Importing XDF file %s...' % filename)
226233
if not os.path.exists(filename):
227234
raise Exception('file %s does not exist.' % filename)
@@ -236,8 +243,13 @@ def __init__(self, xml):
236243
filesize = os.path.getsize(filename)
237244

238245
# read file contents ([SomeText] below refers to items in the XDF Spec)
239-
with gzip.GzipFile(filename, 'rb') if filename.endswith('.xdfz') else open(filename, 'rb') as f:
246+
filename = Path(filename) # convert to pathlib object
247+
if filename.suffix == '.xdfz' or filename.suffixes == ['.xdf', '.gz']:
248+
f_open = gzip.open
249+
else:
250+
f_open = open
240251

252+
with f_open(filename, 'rb') as f:
241253
# read [MagicCode]
242254
if f.read(4) != b'XDF:':
243255
raise Exception('not a valid XDF file: %s' % filename)
@@ -288,48 +300,14 @@ def __init__(self, xml):
288300
# read [Samples] chunk...
289301
# noinspection PyBroadException
290302
try:
291-
# read [NumSampleBytes], [NumSamples]
292-
nsamples = _read_varlen_int(f)
293-
# allocate space
294-
stamps = np.zeros((nsamples,))
295-
if temp[StreamId].fmt == 'string':
296-
# read a sample comprised of strings
297-
values = [[None] * temp[StreamId].nchns
298-
for _ in range(nsamples)]
299-
# for each sample...
300-
for k in range(nsamples):
301-
# read or deduce time stamp
302-
if struct.unpack('B', f.read(1))[0]:
303-
stamps[k] = struct.unpack('<d', f.read(8))[0]
304-
else:
305-
stamps[k] = (temp[StreamId].last_timestamp +
306-
temp[StreamId].tdiff)
307-
temp[StreamId].last_timestamp = stamps[k]
308-
# read the values
309-
for ch in range(temp[StreamId].nchns):
310-
raw = f.read(_read_varlen_int(f))
311-
values[k][ch] = raw.decode(errors='replace')
312-
else:
313-
# read a sample comprised of numeric values
314-
values = np.zeros((nsamples, temp[StreamId].nchns))
315-
# for each sample...
316-
for k in range(nsamples):
317-
# read or deduce time stamp
318-
if struct.unpack('B', f.read(1))[0]:
319-
stamps[k] = struct.unpack('<d', f.read(8))[0]
320-
else:
321-
stamps[k] = (temp[StreamId].last_timestamp +
322-
temp[StreamId].tdiff)
323-
temp[StreamId].last_timestamp = stamps[k]
324-
# read the values
325-
raw = f.read(temp[StreamId].samplebytes)
326-
values[k, :] = struct.unpack(temp[StreamId].structfmt, raw)
303+
nsamples, stamps, values = _read_chunk3(f, temp[StreamId])
304+
327305
logger.debug(' reading [%s,%s]' % (temp[StreamId].nchns,
328306
nsamples))
329307
# optionally send through the on_chunk function
330308
if on_chunk is not None:
331309
values, stamps, streams[StreamId] = on_chunk(values, stamps,
332-
streams[StreamId], s)
310+
streams[StreamId], StreamId)
333311
# append to the time series...
334312
temp[StreamId].time_series.append(values)
335313
temp[StreamId].time_stamps.append(stamps)
@@ -409,19 +387,59 @@ def __init__(self, xml):
409387

410388

411389
streams = [s for s in streams.values()]
412-
sort_data = [s['info']['name'][0] for s in streams]
413-
streams = [x for _, x in sorted(zip(sort_data, streams))]
414390
return streams, fileheader
415391

416392

393+
def _read_chunk3(f, s):
394+
# read [NumSampleBytes], [NumSamples]
395+
nsamples = _read_varlen_int(f)
396+
# allocate space
397+
stamps = np.zeros((nsamples,))
398+
if s.fmt == 'string':
399+
# read a sample comprised of strings
400+
values = [[None] * s.nchns
401+
for _ in range(nsamples)]
402+
# for each sample...
403+
for k in range(nsamples):
404+
# read or deduce time stamp
405+
if f.read(1) != b'\x00':
406+
stamps[k] = struct.unpack('<d', f.read(8))[0]
407+
else:
408+
stamps[k] = (s.last_timestamp + s.tdiff)
409+
s.last_timestamp = stamps[k]
410+
# read the values
411+
for ch in range(s.nchns):
412+
raw = f.read(_read_varlen_int(f))
413+
values[k][ch] = raw.decode(errors='replace')
414+
else:
415+
# read a sample comprised of numeric values
416+
values = np.zeros((nsamples, s.nchns), dtype=s.dtype)
417+
# for each sample...
418+
for k in range(values.shape[0]):
419+
# read or deduce time stamp
420+
if f.read(1) != b'\x00':
421+
stamps[k] = struct.unpack('<d', f.read(8))[0]
422+
else:
423+
stamps[k] = s.last_timestamp + s.tdiff
424+
s.last_timestamp = stamps[k]
425+
# read the values
426+
raw = f.read(s.nchns * values.dtype.itemsize)
427+
# no fromfile(), see
428+
# https://github.com/numpy/numpy/issues/13319
429+
values[k, :] = np.frombuffer(raw,
430+
dtype=s.dtype,
431+
count=s.nchns)
432+
return nsamples, stamps, values
433+
434+
417435
def _read_varlen_int(f):
418436
"""Read a variable-length integer."""
419-
nbytes = struct.unpack('B', f.read(1))[0]
420-
if nbytes == 1:
421-
return struct.unpack('B', f.read(1))[0]
422-
elif nbytes == 4:
437+
nbytes = f.read(1)
438+
if nbytes == b'\x01':
439+
return ord(f.read(1))
440+
elif nbytes == b'\x04':
423441
return struct.unpack('<I', f.read(4))[0]
424-
elif nbytes == 8:
442+
elif nbytes == b'\x08':
425443
return struct.unpack('<Q', f.read(8))[0]
426444
else:
427445
raise RuntimeError('invalid variable-length integer encountered.')
@@ -437,8 +455,7 @@ def _xml2dict(t):
437455

438456

439457
def _scan_forward(f):
440-
"""Scan forward through the given file object until after the next
441-
boundary chunk."""
458+
"""Scan forward through file object until after the next boundary chunk."""
442459
blocklen = 2**20
443460
signature = bytes([0x43, 0xA5, 0x46, 0xDC, 0xCB, 0xF5, 0x41, 0x0F,
444461
0xB3, 0x0E, 0xD5, 0x46, 0x73, 0x83, 0xCB, 0xE4])
@@ -447,7 +464,7 @@ def _scan_forward(f):
447464
block = f.read(blocklen)
448465
matchpos = block.find(signature)
449466
if matchpos != -1:
450-
f.seek(curpos + matchpos + 15)
467+
f.seek(curpos + matchpos + len(signature))
451468
logger.debug(' scan forward found a boundary chunk.')
452469
break
453470
if len(block) < blocklen:
@@ -572,7 +589,7 @@ def _jitter_removal(streams,
572589
indices = np.arange(range_i[0], range_i[1] + 1, 1)[:, None]
573590
X = np.concatenate((np.ones_like(indices), indices), axis=1)
574591
y = stream.time_stamps[indices]
575-
mapping = np.linalg.lstsq(X, y, rcond=None)[0]
592+
mapping = np.linalg.lstsq(X, y, rcond=-1)[0]
576593
stream.time_stamps[indices] = (mapping[0] + mapping[1] *
577594
indices)
578595
# Store num_samples and segment duration

pyxdf/test/test_library_basic.py

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
import pyxdf
2+
import pytest
3+
4+
5+
#%% test
6+
def test_load_xdf_present():
7+
"""
8+
Check that pyxdf has the all important load_xdf.
9+
This is nothing more than a placeholder so the CI system has a test to pass.
10+
"""
11+
assert(hasattr(pyxdf, 'load_xdf'))
12+

0 commit comments

Comments
 (0)