Skip to content

Commit daea9ca

Browse files
committed
Format with ruff
1 parent 7e21a4d commit daea9ca

15 files changed

Lines changed: 690 additions & 544 deletions

src/sdf/__init__.py

Lines changed: 45 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -7,14 +7,14 @@
77

88
from . import hdf5
99

10-
__version__ = '0.3.6'
10+
__version__ = "0.3.6"
1111

12-
_object_name_pattern = re.compile('[a-zA-Z][a-zA-Z0-9_]*')
12+
_object_name_pattern = re.compile("[a-zA-Z][a-zA-Z0-9_]*")
1313

1414

1515
@define(eq=False)
1616
class Group:
17-
""" SDF Group """
17+
"""SDF Group"""
1818

1919
name: str = None
2020
comment: str = None
@@ -41,7 +41,7 @@ def __iter__(self):
4141

4242
@define(eq=False)
4343
class Dataset:
44-
""" SDF Dataset """
44+
"""SDF Dataset"""
4545

4646
name: str = None
4747
comment: str = None
@@ -87,7 +87,7 @@ def d(self):
8787

8888

8989
def validate(obj: Group | Dataset) -> list[str]:
90-
""" Validate an sdf.Group or sdf.Dataset """
90+
"""Validate an sdf.Group or sdf.Dataset"""
9191

9292
problems = []
9393

@@ -102,11 +102,12 @@ def validate(obj: Group | Dataset) -> list[str]:
102102

103103

104104
def _validate_group(group, is_root=False):
105-
106105
problems = []
107106

108107
if not is_root and not _object_name_pattern.match(group.name):
109-
problems.append("Object names must only contain letters, digits, and underscores (\"_\") and must start with a letter.")
108+
problems.append(
109+
'Object names must only contain letters, digits, and underscores ("_") and must start with a letter.'
110+
)
110111

111112
for child_group in group.groups:
112113
problems += _validate_dataset(child_group)
@@ -118,62 +119,79 @@ def _validate_group(group, is_root=False):
118119

119120

120121
def _validate_dataset(ds: Dataset) -> list[str]:
121-
122122
if type(ds.data) is not np.ndarray:
123-
return ['Dataset.data must be a numpy.ndarray']
123+
return ["Dataset.data must be a numpy.ndarray"]
124124

125125
elif ds.data.size < 1:
126-
return ['Dataset.data must not be empty']
126+
return ["Dataset.data must not be empty"]
127127

128128
elif not np.issubdtype(ds.data.dtype, np.float64):
129-
return ['Dataset.data.dtype must be numpy.float64']
129+
return ["Dataset.data.dtype must be numpy.float64"]
130130

131131
if ds.is_scale:
132132
if len(ds.data.shape) != 1:
133-
return ['Scales must be one-dimensional']
133+
return ["Scales must be one-dimensional"]
134134
if np.any(np.diff(ds.data) <= 0):
135-
return ['Scales must be strictly monotonic increasing']
135+
return ["Scales must be strictly monotonic increasing"]
136136
else:
137-
if (len(ds.data.shape) >= 1) and (ds.data.shape[0] > 0) and not (len(ds.data.shape) == len(ds.scales)):
138-
return ['The number of scales does not match the number of dimensions']
137+
if (
138+
(len(ds.data.shape) >= 1)
139+
and (ds.data.shape[0] > 0)
140+
and not (len(ds.data.shape) == len(ds.scales))
141+
):
142+
return ["The number of scales does not match the number of dimensions"]
139143

140144
return []
141145

142146

143-
def load(filename: str | PathLike, objectname: str = '/', unit: str = None, scale_units: list[str] = None) -> Dataset | Group:
144-
""" Load a Dataset or Group from an SDF file """
147+
def load(
148+
filename: str | PathLike,
149+
objectname: str = "/",
150+
unit: str = None,
151+
scale_units: list[str] = None,
152+
) -> Dataset | Group:
153+
"""Load a Dataset or Group from an SDF file"""
145154

146-
if filename.endswith('.mat'):
155+
if filename.endswith(".mat"):
147156
from . import dsres
157+
148158
obj = dsres.load(filename, objectname)
149159
else:
150160
obj = hdf5.load(filename, objectname)
151161

152162
if isinstance(obj, Dataset):
153-
154163
# check the unit
155164
if unit is not None and unit != obj.unit:
156-
raise Exception("Dataset '%s' has the wrong unit. Expected '%s' but was '%s'." % (obj.name, unit, obj.unit))
165+
raise Exception(
166+
"Dataset '%s' has the wrong unit. Expected '%s' but was '%s'."
167+
% (obj.name, unit, obj.unit)
168+
)
157169

158170
# check the number of the scale units
159171
if scale_units is not None:
160-
161172
if len(scale_units) != obj.data.ndim:
162-
raise Exception("The number of scale units must be equal to the number of dimensions. " +
163-
"Dataset '%s' has %d dimension(s) but %d scale units where given."
164-
% (obj.name, obj.data.ndim, len(scale_units)))
173+
raise Exception(
174+
"The number of scale units must be equal to the number of dimensions. "
175+
+ "Dataset '%s' has %d dimension(s) but %d scale units where given."
176+
% (obj.name, obj.data.ndim, len(scale_units))
177+
)
165178

166179
# check the scale units
167180
for i, scale_unit in enumerate(scale_units):
168181
scale = obj.scales[i]
169182
if scale.unit != scale_unit:
170-
raise Exception(("The scale for dimension %d of '%s' has the wrong unit. " +
171-
"Expected '%s' but was '%s'.") % (i + 1, obj.name, scale_unit, scale.unit))
183+
raise Exception(
184+
(
185+
"The scale for dimension %d of '%s' has the wrong unit. "
186+
+ "Expected '%s' but was '%s'."
187+
)
188+
% (i + 1, obj.name, scale_unit, scale.unit)
189+
)
172190

173191
return obj
174192

175193

176194
def save(filename: str | PathLike, group: Group):
177-
""" Save an SDF group to a file """
195+
"""Save an SDF group to a file"""
178196

179197
hdf5.save(filename, group)

src/sdf/dsres.py

Lines changed: 67 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -7,120 +7,118 @@
77

88
# extract strings from the matrix
99
def strMatNormal(a):
10-
return [''.join(s).rstrip() for s in a]
10+
return ["".join(s).rstrip() for s in a]
1111

12-
def strMatTrans(a):
13-
return [''.join(s).rstrip() for s in zip(*a)]
1412

13+
def strMatTrans(a):
14+
return ["".join(s).rstrip() for s in zip(*a)]
1515

16-
def _split_description(comment: str) -> tuple[str | None, str | None, str | None, dict[str, str]]:
1716

17+
def _split_description(
18+
comment: str,
19+
) -> tuple[str | None, str | None, str | None, dict[str, str]]:
1820
unit = None
1921
display_unit = None
2022
info = dict()
2123

22-
if comment.endswith(']'):
23-
i = comment.rfind('[')
24-
unit = comment[i + 1:-1]
24+
if comment.endswith("]"):
25+
i = comment.rfind("[")
26+
unit = comment[i + 1 : -1]
2527
comment = comment[0:i].strip()
2628

2729
if unit is not None:
28-
29-
if ':#' in unit:
30-
segments = unit.split(':#')
30+
if ":#" in unit:
31+
segments = unit.split(":#")
3132
unit = segments[0]
3233
for segment in segments[1:]:
33-
key, value = segment[1:-1].split('=')
34+
key, value = segment[1:-1].split("=")
3435
info[key] = value
3536

36-
if '|' in unit:
37-
unit, display_unit = unit.split('|')
37+
if "|" in unit:
38+
unit, display_unit = unit.split("|")
3839

3940
return unit, display_unit, comment, info
4041

4142

4243
def load(filename: str | PathLike, objectname: str) -> Dataset | Group:
43-
4444
g_root = _load_mat(filename)
4545

46-
if objectname == '/':
46+
if objectname == "/":
4747
return g_root
4848
else:
4949
obj = g_root
50-
segments = objectname.split('/')
50+
segments = objectname.split("/")
5151
for s in segments:
5252
if s:
5353
obj = obj[s]
5454
return obj
5555

5656

5757
def _load_mat(filename: str) -> Group:
58-
5958
mat = scipy.io.loadmat(filename, chars_as_strings=False)
6059

6160
_vars = {}
6261
_blocks = []
6362

6463
try:
65-
fileInfo = strMatNormal(mat['Aclass'])
64+
fileInfo = strMatNormal(mat["Aclass"])
6665
except KeyError:
67-
raise Exception('File structure not supported!')
66+
raise Exception("File structure not supported!")
6867

69-
if fileInfo[1] == '1.1':
70-
if fileInfo[3] == 'binTrans':
68+
if fileInfo[1] == "1.1":
69+
if fileInfo[3] == "binTrans":
7170
# usually files from OpenModelica or Dymola auto saved,
7271
# all methods rely on this structure since this was the only
7372
# one understand by earlier versions
74-
names = strMatTrans(mat['name']) # names
75-
descr = strMatTrans(mat['description']) # descriptions
73+
names = strMatTrans(mat["name"]) # names
74+
descr = strMatTrans(mat["description"]) # descriptions
7675

77-
cons = mat['data_1']
78-
traj = mat['data_2']
76+
cons = mat["data_1"]
77+
traj = mat["data_2"]
7978

80-
d = mat['dataInfo'][0, :]
81-
x = mat['dataInfo'][1, :]
79+
d = mat["dataInfo"][0, :]
80+
x = mat["dataInfo"][1, :]
8281

83-
elif fileInfo[3] == 'binNormal':
82+
elif fileInfo[3] == "binNormal":
8483
# usually files from dymola, save as...,
8584
# variables are mapped to the structure above ('binTrans')
86-
names = strMatNormal(mat['name']) # names
87-
descr = strMatNormal(mat['description']) # descriptions
85+
names = strMatNormal(mat["name"]) # names
86+
descr = strMatNormal(mat["description"]) # descriptions
8887

89-
cons = mat['data_1'].T
90-
traj = mat['data_2'].T
88+
cons = mat["data_1"].T
89+
traj = mat["data_2"].T
9190

92-
d = mat['dataInfo'][:, 0]
93-
x = mat['dataInfo'][:, 1]
91+
d = mat["dataInfo"][:, 0]
92+
x = mat["dataInfo"][:, 1]
9493
else:
95-
raise Exception('File structure not supported!')
94+
raise Exception("File structure not supported!")
9695

9796
c = np.abs(x) - 1 # column
9897
s = np.sign(x) # sign
9998

10099
vars = zip(names, descr, d, c, s)
101-
elif fileInfo[1] == '1.0':
100+
elif fileInfo[1] == "1.0":
102101
# files generated with dymola, save as..., only plotted ...
103102
# fake the structure of a 1.1 transposed file
104-
names = strMatNormal(mat['names']) # names
103+
names = strMatNormal(mat["names"]) # names
105104
_blocks.append(0)
106-
mat['data_0'] = mat['data'].transpose()
107-
del mat['data']
108-
_absc = (names[0], '')
105+
mat["data_0"] = mat["data"].transpose()
106+
del mat["data"]
107+
_absc = (names[0], "")
109108
for i in range(1, len(names)):
110-
_vars[names[i]] = ('', 0, i, 1)
109+
_vars[names[i]] = ("", 0, i, 1)
111110
else:
112-
raise Exception('File structure not supported!')
111+
raise Exception("File structure not supported!")
113112

114113
# build the SDF tree
115-
g_root = Group('/')
114+
g_root = Group("/")
116115

117116
ds_time = None
118117

119118
for name, desc, d, c, s in vars:
120-
121119
unit, display_unit, comment, info = _split_description(desc)
122120

123-
path = name.split('.')
121+
path = name.split(".")
124122

125123
g_parent = g_root
126124

@@ -138,17 +136,36 @@ def _load_mat(filename: str) -> Group:
138136
else:
139137
data = traj[c, :] * s
140138

141-
if 'type' in info:
142-
if info['type'] == 'Integer' or 'Boolean':
139+
if "type" in info:
140+
if info["type"] == "Integer" or "Boolean":
143141
data = np.asarray(data, dtype=np.int32)
144142

145143
if d == 0:
146-
ds = Dataset(path[-1], comment="Simulation time", unit=unit, display_unit=display_unit, data=data)
144+
ds = Dataset(
145+
path[-1],
146+
comment="Simulation time",
147+
unit=unit,
148+
display_unit=display_unit,
149+
data=data,
150+
)
147151
ds_time = ds
148152
elif d == 1:
149-
ds = Dataset(path[-1], comment=comment, unit=unit, display_unit=display_unit, data=data)
153+
ds = Dataset(
154+
path[-1],
155+
comment=comment,
156+
unit=unit,
157+
display_unit=display_unit,
158+
data=data,
159+
)
150160
else:
151-
ds = Dataset(path[-1], comment=comment, unit=unit, display_unit=display_unit, data=data, scales=[ds_time])
161+
ds = Dataset(
162+
path[-1],
163+
comment=comment,
164+
unit=unit,
165+
display_unit=display_unit,
166+
data=data,
167+
scales=[ds_time],
168+
)
152169

153170
g_parent.datasets.append(ds)
154171

src/sdf/examples/excel2sdf.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -9,11 +9,11 @@
99

1010

1111
# name of the Excel file to import
12-
filename = 'time_series.xlsx'
12+
filename = "time_series.xlsx"
1313

1414
# open the workbook
1515
book = xlrd.open_workbook(filename)
16-
16+
1717
# get the first sheet
1818
sh = book.sheet_by_index(0)
1919

@@ -33,14 +33,14 @@
3333
u = array(col_u)
3434

3535
# create the datasets
36-
ds_t = sdf.Dataset(n_t, data=t, unit=u_t, is_scale=True, display_name='Time')
36+
ds_t = sdf.Dataset(n_t, data=t, unit=u_t, is_scale=True, display_name="Time")
3737
ds_u = sdf.Dataset(n_u, data=u, unit=u_u, scales=[ds_t])
3838

3939
# create the root group
40-
g = sdf.Group('/', comment='Imported from ' + filename, datasets=[ds_t, ds_u])
40+
g = sdf.Group("/", comment="Imported from " + filename, datasets=[ds_t, ds_u])
4141

4242
# change the file extension
43-
outfile = os.path.splitext(filename)[0] + '.sdf'
43+
outfile = os.path.splitext(filename)[0] + ".sdf"
4444

4545
# write the SDF file
4646
sdf.save(outfile, g)

0 commit comments

Comments
 (0)