Skip to content

Commit 9375467

Browse files
committed
Update docstrings.
1 parent 4a10923 commit 9375467

5 files changed

Lines changed: 239 additions & 96 deletions

File tree

src/modelarrayio/cifti.py

Lines changed: 85 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -76,28 +76,22 @@ def _build_scalar_sources(long_df):
7676

7777

7878
def extract_cifti_scalar_data(cifti_file, reference_brain_names=None):
79-
"""
80-
Load a scalar cifti file and get its data and mapping
81-
82-
Parameters:
83-
-----------
79+
"""Load a scalar cifti file and get its data and mapping
8480
85-
cifti_file: pathlike
81+
Parameters
82+
----------
83+
cifti_file : :obj:`str`
8684
CIFTI2 file on disk
87-
88-
reference_brain_names: np.ndarray
85+
reference_brain_names : :obj:`numpy.ndarray`
8986
Array of vertex names
90-
Returns:
91-
--------
9287
93-
cifti_scalar_data: np.ndarray
88+
Returns
89+
-------
90+
cifti_scalar_data: :obj:`numpy.ndarray`
9491
The scalar data from the cifti file
95-
96-
brain_structures: np.ndarray
92+
brain_structures: :obj:`numpy.ndarray`
9793
The per-greyordinate brain structures as strings
98-
9994
"""
100-
10195
cifti = cifti_file if hasattr(cifti_file, 'get_fdata') else nb.load(cifti_file)
10296
cifti_hdr = cifti.header
10397
axes = [cifti_hdr.get_axis(i) for i in range(cifti.ndim)]
@@ -128,13 +122,22 @@ def extract_cifti_scalar_data(cifti_file, reference_brain_names=None):
128122

129123
return cifti_data, brain_names
130124

131-
# vertex_table = pd.DataFrame(
132-
# dict(
133-
# vertex_id=np.arange(cifti_data.shape[0]),
134-
# structure_name=brain_names)
135-
136125

137126
def brain_names_to_dataframe(brain_names):
127+
"""Convert brain names to a dataframe.
128+
129+
Parameters
130+
----------
131+
brain_names : :obj:`numpy.ndarray`
132+
Array of brain names
133+
134+
Returns
135+
-------
136+
greyordinate_df : :obj:`pandas.DataFrame`
137+
DataFrame with vertex_id and structure_id
138+
structure_name_strings : :obj:`list`
139+
List of structure names
140+
"""
138141
# Make a lookup table for greyordinates
139142
structure_ids, structure_names = pd.factorize(brain_names)
140143
# Make them a list of strings
@@ -156,11 +159,20 @@ def _load_cohort_cifti(cohort_long, relative_root, s3_workers):
156159
Threads share memory so reference_brain_names is accessed directly with
157160
no copying overhead.
158161
162+
Parameters
163+
----------
164+
cohort_long : :obj:`pandas.DataFrame`
165+
Long-format cohort dataframe
166+
relative_root : :obj:`str`
167+
Root to which all paths are relative
168+
s3_workers : :obj:`int`
169+
Number of workers to use for parallel loading
170+
159171
Returns
160172
-------
161-
scalars : dict[str, list[np.ndarray]]
173+
scalars : :obj:`dict`
162174
Per-scalar ordered list of 1-D subject arrays, ready for stripe-write.
163-
reference_brain_names : np.ndarray
175+
reference_brain_names : :obj:`numpy.ndarray`
164176
Brain structure names from the first file, for building greyordinate table.
165177
"""
166178
# Assign stable per-scalar subject indices in cohort order
@@ -238,22 +250,54 @@ def write_storage(
238250
scalar_columns=None,
239251
s3_workers=1,
240252
):
241-
"""
242-
Load all fixeldb data.
253+
"""Load all CIFTI data and write to an HDF5 file with configurable storage.
254+
243255
Parameters
244-
-----------
245-
index_file: str
246-
path to a Nifti2 index file
247-
directions_file: str
248-
path to a Nifti2 directions file
249-
cohort_file: str
250-
path to a csv with demographic info and paths to data
251-
output_h5: str
252-
path to a new .h5 file to be written
253-
relative_root: str
254-
path to which index_file, directions_file and cohort_file (and its contents) are relative
255-
"""
256+
----------
257+
cohort_file : :obj:`str`
258+
Path to a csv with demographic info and paths to data
259+
backend : :obj:`str`
260+
Backend to use for storage
261+
output_h5 : :obj:`str`
262+
Path to a new .h5 file to be written
263+
output_tdb : :obj:`str`
264+
Path to a new .tdb file to be written
265+
relative_root : :obj:`str`
266+
Root to which all paths are relative
267+
storage_dtype : :obj:`str`
268+
Floating type to store values
269+
compression : :obj:`str`
270+
HDF5 compression filter
271+
compression_level : :obj:`int`
272+
Gzip compression level (0-9)
273+
shuffle : :obj:`bool`
274+
Enable HDF5 shuffle filter
275+
chunk_voxels : :obj:`int`
276+
Chunk size along the voxel axis
277+
target_chunk_mb : :obj:`float`
278+
Target chunk size in MiB when auto-computing chunk_voxels
279+
tdb_compression : :obj:`str`
280+
TileDB compression filter
281+
tdb_compression_level : :obj:`int`
282+
TileDB compression level
283+
tdb_shuffle : :obj:`bool`
284+
Enable TileDB shuffle filter
285+
tdb_tile_voxels : :obj:`int`
286+
Tile size along the voxel axis
287+
tdb_target_tile_mb : :obj:`float`
288+
Target tile size in MiB when auto-computing tdb_tile_voxels
289+
tdb_workers : :obj:`int`
290+
Number of workers to use for parallel loading
291+
scalar_columns : :obj:`list`
292+
List of scalar columns to use
293+
s3_workers : :obj:`int`
294+
Number of workers to use for parallel loading
256295
296+
Returns
297+
-------
298+
status : :obj:`int`
299+
Status of the operation. 0 if successful, 1 if failed.
300+
"""
257301
cohort_path = op.join(relative_root, cohort_file)
258302
cohort_df = pd.read_csv(cohort_path)
259303
cohort_long = _cohort_to_long_dataframe(cohort_df, scalar_columns=scalar_columns)
@@ -413,7 +457,8 @@ def main():
413457

414458

415459
def _h5_to_ciftis(example_cifti, h5_file, analysis_name, cifti_output_dir):
416-
"""Writes the contents of an hdf5 file to a fixels directory.
460+
"""Write the contents of an hdf5 file to a fixels directory.
461+
417462
The ``h5_file`` parameter should point to an HDF5 file that contains at least two
418463
datasets. There must be one called ``results/results_matrix``, that contains a
419464
matrix of fixel results. Each column contains a single result and each row is a
@@ -425,6 +470,7 @@ def _h5_to_ciftis(example_cifti, h5_file, analysis_name, cifti_output_dir):
425470
Then each column in ``results/results_matrix`` is extracted to fill the data of a
426471
new Nifti2 file that gets converted to mif and named according to the corresponding
427472
item in ``results/has_names``.
473+
428474
Parameters
429475
==========
430476
example_cifti: pathlike
@@ -435,6 +481,7 @@ def _h5_to_ciftis(example_cifti, h5_file, analysis_name, cifti_output_dir):
435481
the name for the analysis results to be saved
436482
fixel_output_dir: str
437483
abspath to where the output cifti files will go.
484+
438485
Outputs
439486
=======
440487
None
@@ -488,6 +535,7 @@ def _h5_to_ciftis(example_cifti, h5_file, analysis_name, cifti_output_dir):
488535

489536

490537
def h5_to_ciftis():
538+
"""Write the contents of an hdf5 file to a cifti directory."""
491539
parser = get_h5_to_ciftis_parser()
492540
args = parser.parse_args()
493541

0 commit comments

Comments
 (0)