|
16 | 16 | }, |
17 | 17 | { |
18 | 18 | "cell_type": "code", |
19 | | - "execution_count": 1, |
| 19 | + "execution_count": 4, |
20 | 20 | "metadata": {}, |
21 | 21 | "outputs": [ |
22 | 22 | { |
|
129 | 129 | "source": [ |
130 | 130 | "## Data\n", |
131 | 131 | "\n", |
132 | | - "Aurora expects preprocessed input data as NIfTI file or NumPy Array. \n", |
| 132 | + "AURORA expects `preprocessed` input data as NIfTI file or NumPy Array, `preprocessed` meaning the files should be co-registerend, skullstripped and in SRI-24 space.\n", |
133 | 133 | "If your data is not preprocessed yet, consider using our [BrainLes preprocessing](https://github.com/BrainLesion/preprocessing) package (or its predecessor [BraTS-Toolkit](https://github.com/neuronflow/BraTS-Toolkit)).\n", |
134 | 134 | "\n", |
135 | 135 | "In this example we provide sample data from the [ASNR-MICCAI BraTS Brain Metastasis Challenge](https://www.synapse.org/#!Synapse:syn51156910/wiki/622553), which is already preprocessed." |
|
146 | 146 | "cell_type": "markdown", |
147 | 147 | "metadata": {}, |
148 | 148 | "source": [ |
149 | | - "### Minimal example using default settings and only T1 as input" |
| 149 | + "### Minimal example using default settings and only T1c as input" |
150 | 150 | ] |
151 | 151 | }, |
152 | 152 | { |
153 | 153 | "cell_type": "code", |
154 | | - "execution_count": 2, |
| 154 | + "execution_count": 5, |
155 | 155 | "metadata": {}, |
156 | | - "outputs": [ |
157 | | - { |
158 | | - "name": "stderr", |
159 | | - "output_type": "stream", |
160 | | - "text": [ |
161 | | - "2024-01-30 10:56:59 INFO: Initialized AuroraGPUInferer with config: AuroraInfererConfig(log_level=20, tta=True, sliding_window_batch_size=1, workers=0, threshold=0.5, sliding_window_overlap=0.5, crop_size=(192, 192, 32), model_selection=<ModelSelection.BEST: 'best'>)\n", |
162 | | - "2024-01-30 10:56:59 INFO: Set torch device: cuda\n", |
163 | | - "2024-01-30 10:56:59 INFO: Infer with config: AuroraInfererConfig(log_level=20, tta=True, sliding_window_batch_size=1, workers=0, threshold=0.5, sliding_window_overlap=0.5, crop_size=(192, 192, 32), model_selection=<ModelSelection.BEST: 'best'>) and device: cuda\n", |
164 | | - "2024-01-30 10:56:59 INFO: Successfully validated input images. Input mode: NIFTI_FILEPATH\n", |
165 | | - "2024-01-30 10:56:59 INFO: Received files: T1: True, T1C: False, T2: False, FLAIR: False\n", |
166 | | - "2024-01-30 10:56:59 INFO: Inference mode: t1-o\n", |
167 | | - "2024-01-30 10:56:59 INFO: No loaded compatible model found. Loading Model and weights\n", |
168 | | - "2024-01-30 10:56:59 INFO: Setting up Dataloader\n", |
169 | | - "2024-01-30 10:56:59 INFO: Running inference on device := cuda\n" |
170 | | - ] |
171 | | - }, |
172 | | - { |
173 | | - "name": "stdout", |
174 | | - "output_type": "stream", |
175 | | - "text": [ |
176 | | - "BasicUNet features: (32, 32, 64, 128, 256, 32).\n" |
177 | | - ] |
178 | | - }, |
179 | | - { |
180 | | - "name": "stderr", |
181 | | - "output_type": "stream", |
182 | | - "text": [ |
183 | | - "2024-01-30 10:57:05 INFO: Applying test time augmentations\n", |
184 | | - "2024-01-30 10:58:29 INFO: Post-processing data\n", |
185 | | - "2024-01-30 10:58:29 INFO: Saving post-processed data as NIFTI files\n", |
186 | | - "2024-01-30 10:58:29 INFO: Saved segmentation to output/segmentation.nii.gz\n", |
187 | | - "2024-01-30 10:58:29 INFO: Returning post-processed data as Dict of Numpy arrays\n", |
188 | | - "2024-01-30 10:58:29 INFO: Finished inference \n", |
189 | | - "\n" |
190 | | - ] |
191 | | - } |
192 | | - ], |
| 156 | + "outputs": [], |
193 | 157 | "source": [ |
194 | 158 | "from brainles_aurora.inferer import AuroraGPUInferer, AuroraInferer, AuroraInfererConfig\n", |
195 | 159 | "\n", |
196 | 160 | "config = AuroraInfererConfig(\n", |
197 | | - " tta=False\n", |
198 | | - ") # Disable test time augmentations for a quick demo, should be set to True for better results\n", |
199 | | - "\n", |
| 161 | + " tta=False # Disable test time augmentations for a quick demo, should be set to True for better results\n", |
| 162 | + ")\n", |
200 | 163 | "# If you don-t have a GPU that supports CUDA use the CPU version: AuroraInferer(config=config)\n", |
201 | | - "inferer = AuroraGPUInferer(config=config)\n", |
| 164 | + "inferer = AuroraGPUInferer(\n", |
| 165 | + " config=config,\n", |
| 166 | + " cuda_devices=\"0\", # optional, if you have multiple GPUs you can specify which one to use\n", |
| 167 | + ")\n", |
202 | 168 | "\n", |
203 | | - "_ = inferer.infer(\n", |
204 | | - " t1=\"data/t1n.nii.gz\",\n", |
| 169 | + "inferer.infer(\n", |
| 170 | + " t1c=\"data/t1c.nii.gz\",\n", |
205 | 171 | " segmentation_file=\"output/segmentation.nii.gz\",\n", |
206 | 172 | ")" |
207 | 173 | ] |
|
401 | 367 | "results = inferer.infer(t1=t1_np)\n", |
402 | 368 | "print([f\"{k} : {v.shape}\" for k, v in results.items()])" |
403 | 369 | ] |
| 370 | + }, |
| 371 | + { |
| 372 | + "cell_type": "markdown", |
| 373 | + "metadata": {}, |
| 374 | + "source": [] |
404 | 375 | } |
405 | 376 | ], |
406 | 377 | "metadata": { |
|
0 commit comments