Skip to content

Commit d8d75cc

Browse files
committed
Address style issues.
1 parent d7e4483 commit d8d75cc

10 files changed

Lines changed: 42 additions & 24 deletions

File tree

docs/conf.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,10 @@
11
# Configuration file for the Sphinx documentation builder.
22
# https://www.sphinx-doc.org/en/master/usage/configuration.html
33

4-
from datetime import datetime
4+
from datetime import UTC, datetime
55

66
project = 'ModelArrayIO'
7-
copyright = f'2017-{datetime.today().strftime("%Y")}, PennLINC developers'
7+
copyright = f'2017-{datetime.now(tz=UTC).strftime("%Y")}, PennLINC developers'
88
author = 'PennLINC developers'
99

1010
extensions = [

src/modelarrayio/cifti.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,8 @@
2828
from .tiledb_storage import write_column_names as tdb_write_column_names
2929
from .tiledb_storage import write_rows_in_column_stripes as tdb_write_stripes
3030

31+
logger = logging.getLogger(__name__)
32+
3133

3234
def _cohort_to_long_dataframe(cohort_df, scalar_columns=None):
3335
scalar_columns = [col for col in (scalar_columns or []) if col]
@@ -448,9 +450,9 @@ def _h5_to_ciftis(example_cifti, h5_file, analysis_name, cifti_output_dir):
448450

449451
try:
450452
results_names = names_data.tolist()
451-
except Exception:
453+
except (AttributeError, OSError, TypeError, ValueError):
452454
print("Unable to read column names, using 'componentNNN' instead")
453-
results_names = ['component%03d' % (n + 1) for n in range(results_matrix.shape[0])]
455+
results_names = [f'component{n + 1:03d}' for n in range(results_matrix.shape[0])]
454456

455457
# Make output directory if it does not exist
456458
if not op.isdir(cifti_output_dir):
@@ -497,7 +499,7 @@ def h5_to_ciftis():
497499

498500
# Get an example cifti
499501
if args.example_cifti is None:
500-
logging.warning(
502+
logger.warning(
501503
'No example cifti file provided, using the first cifti file from the cohort file'
502504
)
503505
cohort_df = pd.read_csv(args.cohort_file)

src/modelarrayio/fixels.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -455,9 +455,9 @@ def h5_to_mifs(example_mif, h5_file, analysis_name, fixel_output_dir):
455455

456456
try:
457457
results_names = names_data.tolist()
458-
except Exception:
458+
except (AttributeError, OSError, TypeError, ValueError):
459459
print("Unable to read column names, using 'componentNNN' instead")
460-
results_names = ['component%03d' % (n + 1) for n in range(results_matrix.shape[0])]
460+
results_names = [f'component{n + 1:03d}' for n in range(results_matrix.shape[0])]
461461

462462
# Make output directory if it does not exist
463463
if not op.isdir(fixel_output_dir):

src/modelarrayio/h5_storage.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ def resolve_compression(compression, compression_level, shuffle):
2828
if comp == 'gzip':
2929
try:
3030
gzip_level = int(compression_level)
31-
except Exception:
31+
except (TypeError, ValueError):
3232
gzip_level = 4
3333
gzip_level = max(0, min(9, gzip_level))
3434
return comp, gzip_level, use_shuffle

src/modelarrayio/s3_utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ def _make_s3_client():
2727
except ImportError:
2828
raise ImportError(
2929
'boto3 is required for s3:// paths. Install with: pip install modelarrayio[s3]'
30-
)
30+
) from None
3131
anon = os.environ.get('MODELARRAYIO_S3_ANON', '').lower() in ('1', 'true', 'yes')
3232
if anon:
3333
from botocore import UNSIGNED

src/modelarrayio/tiledb_storage.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ def _build_filter_list(compression: str | None, compression_level: int | None, s
2929
level = None
3030
try:
3131
level = int(compression_level) if compression_level is not None else None
32-
except Exception:
32+
except (TypeError, ValueError):
3333
level = None
3434
if comp == 'zstd':
3535
filters.append(tiledb.ZstdFilter(level=level if level is not None else 5))
@@ -129,7 +129,7 @@ def create_scalar_matrix_array(
129129
if sources_list is not None:
130130
try:
131131
A.meta['column_names'] = json.dumps(list(sources_list))
132-
except Exception:
132+
except (TypeError, ValueError, tiledb.TileDBError):
133133
# Fallback without metadata if serialization fails
134134
logger.warning('Failed to write column_names metadata for %s', uri)
135135
logger.info('Finished writing array %s', uri)
@@ -182,7 +182,7 @@ def create_empty_scalar_matrix_array(
182182
try:
183183
with tiledb.open(uri, 'w') as A:
184184
A.meta['column_names'] = json.dumps(list(map(str, sources_list)))
185-
except Exception:
185+
except (TypeError, ValueError, tiledb.TileDBError):
186186
logger.warning('Failed to write column_names metadata for %s', uri)
187187
return uri
188188

@@ -258,5 +258,5 @@ def write_column_names(base_uri: str, scalar: str, sources: Sequence[str]):
258258
try:
259259
with tiledb.Group(group_uri, 'w') as G:
260260
G.meta['column_names'] = json.dumps(sources)
261-
except Exception:
261+
except (TypeError, ValueError, tiledb.TileDBError):
262262
logger.warning('Failed to write column_names metadata for group %s', group_uri)

src/modelarrayio/voxels.py

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
import argparse
2+
import logging
23
import os
34
import os.path as op
45
from collections import defaultdict
@@ -25,6 +26,8 @@
2526
from .tiledb_storage import create_empty_scalar_matrix_array as tdb_create_empty
2627
from .tiledb_storage import write_rows_in_column_stripes as tdb_write_stripes
2728

29+
logger = logging.getLogger(__name__)
30+
2831

2932
def _load_cohort_voxels(cohort_df, group_mask_matrix, relative_root, s3_workers):
3033
"""Load all voxel rows from the cohort, optionally in parallel.
@@ -137,7 +140,7 @@ def _decode_names(arr):
137140
s = s.rstrip('\x00').strip()
138141
out.append(s)
139142
return out
140-
except Exception:
143+
except (AttributeError, OSError, TypeError, ValueError):
141144
return None
142145

143146
results_names = None
@@ -146,7 +149,7 @@ def _decode_names(arr):
146149
names_attr = results_matrix.attrs.get('colnames', None)
147150
if names_attr is not None:
148151
results_names = _decode_names(names_attr)
149-
except Exception:
152+
except (OSError, RuntimeError, TypeError, ValueError):
150153
results_names = None
151154

152155
# 2) Fallback to dataset-based column names (new format)
@@ -162,13 +165,14 @@ def _decode_names(arr):
162165
results_names = _decode_names(names_ds)
163166
if results_names:
164167
break
165-
except Exception:
168+
except (KeyError, OSError, RuntimeError, TypeError, ValueError):
169+
logger.debug('Could not read column names from %s', p, exc_info=True)
166170
continue
167171

168172
# 3) Final fallback to generated names
169173
if not results_names:
170174
print("Unable to read column names, using 'componentNNN' instead")
171-
results_names = ['component%03d' % (n + 1) for n in range(results_matrix.shape[0])]
175+
results_names = [f'component{n + 1:03d}' for n in range(results_matrix.shape[0])]
172176

173177
# # Make output directory if it does not exist # has been done in h5_to_volumes_wrapper()
174178
# if op.isdir(volume_output_dir) == False:

test/test_cifti_cli.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,9 @@ def test_concifti_cli_creates_expected_hdf5(tmp_path):
7676
'1.0',
7777
]
7878
env = os.environ.copy()
79-
proc = subprocess.run(cmd, cwd=str(tmp_path), env=env, capture_output=True, text=True)
79+
proc = subprocess.run(
80+
cmd, cwd=str(tmp_path), env=env, capture_output=True, text=True, check=False
81+
)
8082
assert proc.returncode == 0, f'concifti failed: {proc.stdout}\n{proc.stderr}'
8183
assert op.exists(out_h5)
8284

@@ -103,7 +105,9 @@ def test_concifti_cli_creates_expected_hdf5(tmp_path):
103105
# Column names exist and match subjects count
104106
grp = h5['scalars/THICK']
105107
assert 'column_names' in grp
106-
colnames = [x.decode('utf-8') if isinstance(x, bytes) else str(x) for x in grp['column_names'][...]]
108+
colnames = [
109+
x.decode('utf-8') if isinstance(x, bytes) else str(x) for x in grp['column_names'][...]
110+
]
107111
assert len(colnames) == 2
108112

109113
# Spot-check a couple values

test/test_voxels_cli.py

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ def test_convoxel_cli_creates_expected_hdf5(tmp_path):
6161
with cohort_csv.open('w', newline='') as f:
6262
writer = csv.DictWriter(f, fieldnames=['scalar_name', 'source_file', 'source_mask_file'])
6363
writer.writeheader()
64-
for sidx, (scalar_name, mask_name) in enumerate(subjects):
64+
for _sidx, (scalar_name, mask_name) in enumerate(subjects):
6565
writer.writerow(
6666
{
6767
'scalar_name': 'FA',
@@ -98,7 +98,9 @@ def test_convoxel_cli_creates_expected_hdf5(tmp_path):
9898
'1.0',
9999
]
100100
env = os.environ.copy()
101-
proc = subprocess.run(cmd, cwd=str(tmp_path), env=env, capture_output=True, text=True)
101+
proc = subprocess.run(
102+
cmd, cwd=str(tmp_path), env=env, capture_output=True, text=True, check=False
103+
)
102104
assert proc.returncode == 0, f'convoxel failed: {proc.stdout}\n{proc.stderr}'
103105
assert op.exists(out_h5)
104106

@@ -122,7 +124,9 @@ def test_convoxel_cli_creates_expected_hdf5(tmp_path):
122124
# Column names exist and match subjects count
123125
grp = h5['scalars/FA']
124126
assert 'column_names' in grp
125-
colnames = [x.decode('utf-8') if isinstance(x, bytes) else str(x) for x in grp['column_names'][...]]
127+
colnames = [
128+
x.decode('utf-8') if isinstance(x, bytes) else str(x) for x in grp['column_names'][...]
129+
]
126130
assert len(colnames) == 2
127131

128132
# Spot-check a voxel mapping (pick the third voxel)

test/test_voxels_s3.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -43,14 +43,15 @@ def group_mask_path(tmp_path_factory):
4343
boto3 = pytest.importorskip('boto3')
4444
from botocore import UNSIGNED
4545
from botocore.config import Config
46+
from botocore.exceptions import BotoCoreError
4647

4748
tmp = tmp_path_factory.mktemp('s3_group_mask')
4849
dest = tmp / 'group_mask.nii.gz'
4950
s3 = boto3.client('s3', config=Config(signature_version=UNSIGNED))
5051
key = f'{_PREFIX}/func_mask/{OHSU_SUBJECTS[0]}_func_mask.nii.gz'
5152
try:
5253
s3.download_file(_BUCKET, key, str(dest))
53-
except Exception as exc:
54+
except (OSError, BotoCoreError) as exc:
5455
pytest.skip(f'S3 download unavailable: {exc}')
5556
return dest
5657

@@ -102,7 +103,9 @@ def test_convoxel_s3_parallel(tmp_path, group_mask_path):
102103
'4',
103104
]
104105
env = {**os.environ, 'MODELARRAYIO_S3_ANON': '1'}
105-
proc = subprocess.run(cmd, cwd=str(tmp_path), capture_output=True, text=True, env=env)
106+
proc = subprocess.run(
107+
cmd, cwd=str(tmp_path), capture_output=True, text=True, env=env, check=False
108+
)
106109
assert proc.returncode == 0, f'convoxel failed:\n{proc.stdout}\n{proc.stderr}'
107110
assert out_h5.exists()
108111

@@ -169,6 +172,7 @@ def test_convoxel_s3_serial_matches_parallel(tmp_path, group_mask_path):
169172
capture_output=True,
170173
text=True,
171174
env=env,
175+
check=False,
172176
)
173177
assert proc.returncode == 0, f'convoxel failed (workers={workers}):\n{proc.stderr}'
174178

0 commit comments

Comments
 (0)