|
94 | 94 | "metadata": {}, |
95 | 95 | "outputs": [], |
96 | 96 | "source": [ |
97 | | - "from brainles_aurora.inferer import AuroraGPUInferer, AuroraInferer, AuroraInfererConfig\n", |
| 97 | + "from brainles_aurora.inferer import AuroraInferer, AuroraInfererConfig\n", |
98 | 98 | "import nibabel as nib\n", |
99 | 99 | "import numpy as np\n", |
100 | 100 | "import utils # local file" |
|
157 | 157 | }, |
158 | 158 | { |
159 | 159 | "cell_type": "code", |
160 | | - "execution_count": 2, |
| 160 | + "execution_count": null, |
161 | 161 | "metadata": {}, |
162 | | - "outputs": [ |
163 | | - { |
164 | | - "name": "stderr", |
165 | | - "output_type": "stream", |
166 | | - "text": [ |
167 | | - "2024-02-06 15:54:36 INFO: Initialized AuroraGPUInferer with config: AuroraInfererConfig(log_level=20, tta=False, sliding_window_batch_size=4, workers=0, threshold=0.5, sliding_window_overlap=0.5, crop_size=(192, 192, 32), model_selection=<ModelSelection.BEST: 'best'>)\n", |
168 | | - "2024-02-06 15:54:36 INFO: Set torch device: cuda\n", |
169 | | - "2024-02-06 15:54:36 INFO: Infer with config: AuroraInfererConfig(log_level=20, tta=False, sliding_window_batch_size=4, workers=0, threshold=0.5, sliding_window_overlap=0.5, crop_size=(192, 192, 32), model_selection=<ModelSelection.BEST: 'best'>) and device: cuda\n", |
170 | | - "2024-02-06 15:54:36 INFO: Successfully validated input images. Input mode: NIFTI_FILEPATH\n", |
171 | | - "2024-02-06 15:54:36 INFO: Received files: T1: False, T1C: True, T2: False, FLAIR: False\n", |
172 | | - "2024-02-06 15:54:36 INFO: Inference mode: t1c-o\n", |
173 | | - "2024-02-06 15:54:36 INFO: No loaded compatible model found. Loading Model and weights\n" |
174 | | - ] |
175 | | - }, |
176 | | - { |
177 | | - "name": "stderr", |
178 | | - "output_type": "stream", |
179 | | - "text": [ |
180 | | - "2024-02-06 15:54:36 INFO: Setting up Dataloader\n", |
181 | | - "2024-02-06 15:54:36 INFO: Running inference on device := cuda\n" |
182 | | - ] |
183 | | - }, |
184 | | - { |
185 | | - "name": "stdout", |
186 | | - "output_type": "stream", |
187 | | - "text": [ |
188 | | - "BasicUNet features: (32, 32, 64, 128, 256, 32).\n" |
189 | | - ] |
190 | | - }, |
191 | | - { |
192 | | - "name": "stderr", |
193 | | - "output_type": "stream", |
194 | | - "text": [ |
195 | | - "2024-02-06 15:54:39 INFO: Post-processing data\n", |
196 | | - "2024-02-06 15:54:43 INFO: Saving post-processed data as NIFTI files\n", |
197 | | - "2024-02-06 15:54:43 INFO: Saved segmentation to output/t1c_segmentation.nii.gz\n", |
198 | | - "2024-02-06 15:54:43 INFO: Returning post-processed data as Dict of Numpy arrays\n", |
199 | | - "2024-02-06 15:54:43 INFO: Finished inference \n", |
200 | | - "\n" |
201 | | - ] |
202 | | - } |
203 | | - ], |
| 162 | + "outputs": [], |
| 163 | + "source": [ |
| 164 | + "# Instantiate the AuroraInferer:\n", |
| 165 | + "# will set recommended config defaults for optimal results and attempt to use a GPU if available\n", |
| 166 | + "inferer = AuroraInferer()\n", |
| 167 | + "\n", |
| 168 | + "# Perform the inference using a T1c MRI and save the segmentation to the specified file:\n", |
| 169 | + "_ = inferer.infer(\n", |
| 170 | + " t1c=\"data/t1c.nii.gz\",\n", |
| 171 | + " segmentation_file=\"output/t1c_segmentation.nii.gz\",\n", |
| 172 | + ")" |
| 173 | + ] |
| 174 | + }, |
| 175 | + { |
| 176 | + "cell_type": "markdown", |
| 177 | + "metadata": {}, |
| 178 | + "source": [ |
| 179 | + "### Fast Demo run" |
| 180 | + ] |
| 181 | + }, |
| 182 | + { |
| 183 | + "cell_type": "code", |
| 184 | + "execution_count": null, |
| 185 | + "metadata": {}, |
| 186 | + "outputs": [], |
204 | 187 | "source": [ |
205 | | - "# We first need to create an instance of the AuroraInfererConfig class, which will hold the configuration for the inferer. We can then create an instance of the AuroraInferer class, which will be used to perform the inference.\n", |
206 | 188 | "config = AuroraInfererConfig(\n", |
207 | | - " tta=False, # we disable test time augmentations for a quick demo, should be set to True for better results\n", |
208 | | - " sliding_window_batch_size=4, # The batch size used for the sliding window inference, decrease if you run out of memory (warning: too small batches might lead to unstable results)\n", |
| 189 | + " # we disable test time augmentations for a quick demo, \n", |
| 190 | + " # should be set to True for better results\n", |
| 191 | + " tta=False, \n", |
| 192 | + " # The batch size used for the sliding window inference, decrease if you run out of memory \n", |
| 193 | + " # (warning: too small batches might lead to unstable results)\n", |
| 194 | + " sliding_window_batch_size=4, \n", |
209 | 195 | ")\n", |
210 | 196 | "\n", |
211 | | - "\n", |
212 | | - "# Now that we have the configuration we can create an instance of the AuroraInferer class. This class will be used to perform the inference. We can then call the infer method to perform the inference.\n", |
213 | | - "# If you don-t have a GPU that supports CUDA use the CPU version uncomment this and comment the GPU inferer\n", |
214 | | - "# inferer = AuroraInferer(config=config)\n", |
215 | | - "\n", |
216 | | - "inferer = AuroraGPUInferer(\n", |
| 197 | + "# Instantiate the AuroraInferer with the specified config:\n", |
| 198 | + "inferer = AuroraInferer(\n", |
217 | 199 | " config=config,\n", |
218 | | - " cuda_devices=\"0\", # optional, if you have multiple GPUs you can specify which one to use\n", |
219 | 200 | ")\n", |
220 | 201 | "\n", |
221 | | - "\n", |
222 | | - "# The infer method takes the path to the T1c MRI file and the path to the output segmentation file as arguments. The output segmentation file will be created by the infer method and will contain the segmentation of the input T1c MRI.\n", |
223 | | - "\n", |
224 | | - "# The example below shows how to perform the inference using a T1c MRI file:\n", |
| 202 | + "# Perform the inference using a T1c MRI and save the segmentation to the specified file:\n", |
225 | 203 | "_ = inferer.infer(\n", |
226 | 204 | " t1c=\"data/t1c.nii.gz\",\n", |
227 | 205 | " segmentation_file=\"output/t1c_segmentation.nii.gz\",\n", |
|
287 | 265 | }, |
288 | 266 | { |
289 | 267 | "cell_type": "code", |
290 | | - "execution_count": 4, |
| 268 | + "execution_count": null, |
291 | 269 | "metadata": {}, |
292 | | - "outputs": [ |
293 | | - { |
294 | | - "name": "stderr", |
295 | | - "output_type": "stream", |
296 | | - "text": [ |
297 | | - "2024-02-06 09:12:39 INFO: Initialized AuroraGPUInferer with config: AuroraInfererConfig(log_level=20, tta=True, sliding_window_batch_size=1, workers=0, threshold=0.5, sliding_window_overlap=0.5, crop_size=(192, 192, 32), model_selection=<ModelSelection.BEST: 'best'>)\n", |
298 | | - "2024-02-06 09:12:39 INFO: Set torch device: cuda\n", |
299 | | - "2024-02-06 09:12:39 INFO: Infer with config: AuroraInfererConfig(log_level=20, tta=True, sliding_window_batch_size=1, workers=0, threshold=0.5, sliding_window_overlap=0.5, crop_size=(192, 192, 32), model_selection=<ModelSelection.BEST: 'best'>) and device: cuda\n", |
300 | | - "2024-02-06 09:12:39 INFO: Successfully validated input images. Input mode: NIFTI_FILEPATH\n", |
301 | | - "2024-02-06 09:12:39 INFO: Received files: T1: True, T1C: True, T2: True, FLAIR: True\n", |
302 | | - "2024-02-06 09:12:39 INFO: Inference mode: t1-t1c-t2-fla\n", |
303 | | - "2024-02-06 09:12:39 INFO: No loaded compatible model found. Loading Model and weights\n", |
304 | | - "2024-02-06 09:12:39 INFO: Setting up Dataloader\n", |
305 | | - "2024-02-06 09:12:39 INFO: Running inference on device := cuda\n" |
306 | | - ] |
307 | | - }, |
308 | | - { |
309 | | - "name": "stdout", |
310 | | - "output_type": "stream", |
311 | | - "text": [ |
312 | | - "BasicUNet features: (32, 32, 64, 128, 256, 32).\n" |
313 | | - ] |
314 | | - }, |
315 | | - { |
316 | | - "name": "stderr", |
317 | | - "output_type": "stream", |
318 | | - "text": [ |
319 | | - "2024-02-06 09:12:46 INFO: Applying test time augmentations\n", |
320 | | - "2024-02-06 09:14:14 INFO: Post-processing data\n", |
321 | | - "2024-02-06 09:14:14 INFO: Saving post-processed data as NIFTI files\n", |
322 | | - "2024-02-06 09:14:14 INFO: Saved segmentation to output/multi-modal_segmentation.nii.gz\n", |
323 | | - "2024-02-06 09:14:14 INFO: Saved whole_network to output/whole_tumor_unbinarized_floats.nii.gz\n", |
324 | | - "2024-02-06 09:14:15 INFO: Saved metastasis_network to output/metastasis_unbinarized_floats.nii.gz\n", |
325 | | - "2024-02-06 09:14:15 INFO: Returning post-processed data as Dict of Numpy arrays\n", |
326 | | - "2024-02-06 09:14:15 INFO: Finished inference \n", |
327 | | - "\n" |
328 | | - ] |
329 | | - } |
330 | | - ], |
| 270 | + "outputs": [], |
331 | 271 | "source": [ |
332 | | - "config = AuroraInfererConfig() # Use default config\n", |
| 272 | + "# Instantiate the AuroraInferer\n", |
| 273 | + "inferer = AuroraInferer()\n", |
333 | 274 | "\n", |
334 | | - "# If you don-t have a GPU that supports CUDA use the CPU version: AuroraInferer(config=config)\n", |
335 | | - "inferer = AuroraGPUInferer(\n", |
336 | | - " config=config,\n", |
337 | | - ")\n", |
338 | | - "\n", |
339 | | - "# Use all four input modalities,we also create other outputs and a custom log file\n", |
| 275 | + "# Use all four input modalities, we also create other outputs and a custom log file\n", |
340 | 276 | "_ = inferer.infer(\n", |
341 | 277 | " t1=\"data/t1.nii.gz\",\n", |
342 | 278 | " t1c=\"data/t1c.nii.gz\",\n", |
|
360 | 296 | }, |
361 | 297 | { |
362 | 298 | "cell_type": "code", |
363 | | - "execution_count": 5, |
| 299 | + "execution_count": null, |
364 | 300 | "metadata": {}, |
365 | | - "outputs": [ |
366 | | - { |
367 | | - "name": "stderr", |
368 | | - "output_type": "stream", |
369 | | - "text": [ |
370 | | - "2024-02-06 09:16:17 INFO: Initialized AuroraGPUInferer with config: AuroraInfererConfig(log_level=20, tta=True, sliding_window_batch_size=1, workers=0, threshold=0.5, sliding_window_overlap=0.5, crop_size=(192, 192, 32), model_selection=<ModelSelection.BEST: 'best'>)\n", |
371 | | - "2024-02-06 09:16:17 INFO: Set torch device: cuda\n", |
372 | | - "2024-02-06 09:16:17 INFO: Infer with config: AuroraInfererConfig(log_level=20, tta=True, sliding_window_batch_size=1, workers=0, threshold=0.5, sliding_window_overlap=0.5, crop_size=(192, 192, 32), model_selection=<ModelSelection.BEST: 'best'>) and device: cuda\n", |
373 | | - "2024-02-06 09:16:17 INFO: Successfully validated input images. Input mode: NP_NDARRAY\n", |
374 | | - "2024-02-06 09:16:17 INFO: Received files: T1: True, T1C: False, T2: False, FLAIR: False\n", |
375 | | - "2024-02-06 09:16:17 INFO: Inference mode: t1-o\n", |
376 | | - "2024-02-06 09:16:17 INFO: No loaded compatible model found. Loading Model and weights\n", |
377 | | - "2024-02-06 09:16:17 INFO: Setting up Dataloader\n", |
378 | | - "2024-02-06 09:16:17 INFO: Running inference on device := cuda\n" |
379 | | - ] |
380 | | - }, |
381 | | - { |
382 | | - "name": "stdout", |
383 | | - "output_type": "stream", |
384 | | - "text": [ |
385 | | - "BasicUNet features: (32, 32, 64, 128, 256, 32).\n" |
386 | | - ] |
387 | | - }, |
388 | | - { |
389 | | - "name": "stderr", |
390 | | - "output_type": "stream", |
391 | | - "text": [ |
392 | | - "2024-02-06 09:16:23 INFO: Applying test time augmentations\n", |
393 | | - "2024-02-06 09:17:47 INFO: Post-processing data\n", |
394 | | - "2024-02-06 09:17:47 INFO: Returning post-processed data as Dict of Numpy arrays\n", |
395 | | - "2024-02-06 09:17:47 INFO: Finished inference \n", |
396 | | - "\n" |
397 | | - ] |
398 | | - }, |
399 | | - { |
400 | | - "name": "stdout", |
401 | | - "output_type": "stream", |
402 | | - "text": [ |
403 | | - "['segmentation : (240, 240, 155)', 'whole_network : (240, 240, 155)', 'metastasis_network : (240, 240, 155)']\n" |
404 | | - ] |
405 | | - } |
406 | | - ], |
| 301 | + "outputs": [], |
407 | 302 | "source": [ |
408 | | - "config = AuroraInfererConfig()\n", |
409 | | - "\n", |
410 | | - "\n", |
411 | | - "# AuroraInferer(config=config) # If you don-t have a GPU that supports CUDA use the CPU version (uncomment this and comment the GPU inferer)\n", |
412 | | - "inferer = AuroraGPUInferer(config=config)\n", |
| 303 | + "inferer = AuroraInferer()\n", |
413 | 304 | "\n", |
414 | 305 | "# we load the nifty data to a numpy array\n", |
415 | 306 | "t1_np = nib.load(\"data/t1.nii.gz\").get_fdata()\n", |
|
0 commit comments