|
27 | 27 | }, |
28 | 28 | "outputs": [], |
29 | 29 | "source": [ |
30 | | - "#Installations \n", |
| 30 | + "# Installations\n", |
31 | 31 | "!pip install brainles_aurora matplotlib > /dev/null\n", |
32 | 32 | "\n", |
33 | 33 | "%load_ext autoreload\n", |
|
80 | 80 | "source": [ |
81 | 81 | "import sys\n", |
82 | 82 | "\n", |
83 | | - "#Check if we are in google colab currently\n", |
| 83 | + "# Check if we are in google colab currently\n", |
84 | 84 | "try:\n", |
85 | 85 | " import google.colab\n", |
| 86 | + "\n", |
86 | 87 | " colabFlag = True\n", |
87 | 88 | "except ImportError as r:\n", |
88 | 89 | " colabFlag = False\n", |
89 | 90 | "\n", |
90 | | - "#Execute certain steps only if we are in a colab environment\n", |
| 91 | + "# Execute certain steps only if we are in a colab environment\n", |
91 | 92 | "if colabFlag:\n", |
92 | 93 | " # Create a folder in your Google Drive\n", |
93 | 94 | " from google.colab import drive\n", |
94 | | - " drive.mount('/content/drive')\n", |
95 | | - " #clone repository and set path\n", |
| 95 | + "\n", |
| 96 | + " drive.mount(\"/content/drive\")\n", |
| 97 | + " # clone repository and set path\n", |
96 | 98 | " !git clone https://github.com/BrainLesion/tutorials.git /content/drive/MyDrive/tutorials\n", |
97 | 99 | " COLAB_BASE_PATH = \"/content/drive/MyDrive/tutorials/AURORA/\"\n", |
98 | 100 | " sys.path.insert(0, BASE_PATH)\n", |
99 | | - " \n", |
100 | | - "else: # normal jupyter notebook environment\n", |
101 | | - " BASE_PATH = \"./\" #current working directory would be BraTs-Toolkit anyways if you are not in colab\n", |
102 | | - " " |
| 101 | + "\n", |
| 102 | + "else: # normal jupyter notebook environment\n", |
| 103 | + " BASE_PATH = \"./\" # current working directory would be BraTs-Toolkit anyways if you are not in colab" |
103 | 104 | ] |
104 | 105 | }, |
105 | 106 | { |
|
128 | 129 | "import nibabel as nib\n", |
129 | 130 | "import numpy as np\n", |
130 | 131 | "import torch\n", |
131 | | - "import utils # local file\n" |
| 132 | + "import utils # local file" |
132 | 133 | ] |
133 | 134 | }, |
134 | 135 | { |
|
301 | 302 | } |
302 | 303 | ], |
303 | 304 | "source": [ |
304 | | - "# We first need to create an instance of the AuroraInfererConfig class, \n", |
305 | | - "# which will hold the configuration for the inferer. \n", |
| 305 | + "# We first need to create an instance of the AuroraInfererConfig class,\n", |
| 306 | + "# which will hold the configuration for the inferer.\n", |
306 | 307 | "# We can then create an instance of the AuroraInferer class, which will be used to perform the inference.\n", |
307 | 308 | "\n", |
308 | 309 | "config = AuroraInfererConfig(\n", |
309 | | - " tta=False, \n", |
| 310 | + " tta=False,\n", |
310 | 311 | " # we disable test time augmentations for a quick demo\n", |
311 | 312 | " # should be set to True for better results\n", |
312 | | - " sliding_window_batch_size=4, \n", |
| 313 | + " sliding_window_batch_size=4,\n", |
313 | 314 | " # The batch size used for the sliding window inference\n", |
314 | | - " # decrease if you run out of memory \n", |
| 315 | + " # decrease if you run out of memory\n", |
315 | 316 | " # warning: too small batches might lead to unstable results\n", |
316 | 317 | " cuda_devices=\"0\", # optional, if you have multiple GPUs you can specify which one to use\n", |
317 | | - " device = \"cpu\" #uncomment this line to force-use CPU\n", |
| 318 | + " device=\"cpu\", # uncomment this line to force-use CPU\n", |
318 | 319 | ")\n", |
319 | 320 | "\n", |
320 | 321 | "\n", |
321 | 322 | "# Now that we have the configuration we can create an instance of the AuroraInferer class.\n", |
322 | 323 | "# This class will be used to perform the inference. We can then call the infer method to perform the inference.\n", |
323 | | - "inferer = AuroraInferer( config=config )\n", |
| 324 | + "inferer = AuroraInferer(config=config)\n", |
324 | 325 | "\n", |
325 | | - "if(torch.cuda.is_available() == False and colabFlag==True):\n", |
326 | | - " raise RuntimeWarning(\"You are not using any GPU in Colab! Go to 'Runtime'->'Change Runtime type' to select GPU usage!\")\n", |
| 326 | + "if torch.cuda.is_available() == False and colabFlag == True:\n", |
| 327 | + " raise RuntimeWarning(\n", |
| 328 | + " \"You are not using any GPU in Colab! Go to 'Runtime'->'Change Runtime type' to select GPU usage!\"\n", |
| 329 | + " )\n", |
327 | 330 | "\n", |
328 | | - "# The infer method takes the path to the T1c MRI file and the path to the output segmentation file as arguments. \n", |
329 | | - "# The output segmentation file will be created by the infer method and \n", |
| 331 | + "# The infer method takes the path to the T1c MRI file and the path to the output segmentation file as arguments.\n", |
| 332 | + "# The output segmentation file will be created by the infer method and\n", |
330 | 333 | "# will contain the segmentation of the input T1c MRI.\n", |
331 | 334 | "\n", |
332 | 335 | "# The example below shows how to perform the inference using a T1c MRI file:\n", |
|
336 | 339 | ")\n", |
337 | 340 | "\n", |
338 | 341 | "# IMPORTANT: If this cell produces an OutOfMemoryError, you might not have enough VRAM (minimum 8GB).\n", |
339 | | - "# Try using the CPU instead by setting \"useGPU\" to False above\n" |
| 342 | + "# Try using the CPU instead by setting \"useGPU\" to False above" |
340 | 343 | ] |
341 | 344 | }, |
342 | 345 | { |
|
534 | 537 | "source": [ |
535 | 538 | "config = AuroraInfererConfig() # Use default config\n", |
536 | 539 | "\n", |
537 | | - "inferer = AuroraInferer( config=config )\n", |
538 | | - " \n", |
| 540 | + "inferer = AuroraInferer(config=config)\n", |
| 541 | + "\n", |
539 | 542 | "# Use all four input modalities,we also create other outputs and a custom log file\n", |
540 | 543 | "_ = inferer.infer(\n", |
541 | | - " t1=f\"{BASE_PATH}/data/t1.nii.gz\", \n", |
| 544 | + " t1=f\"{BASE_PATH}/data/t1.nii.gz\",\n", |
542 | 545 | " t1c=f\"{BASE_PATH}/data/t1c.nii.gz\",\n", |
543 | | - " t2=f\"{BASE_PATH}/data/t2.nii.gz\", \n", |
544 | | - " fla=f\"{BASE_PATH}/data/flair.nii.gz\", \n", |
| 546 | + " t2=f\"{BASE_PATH}/data/t2.nii.gz\",\n", |
| 547 | + " fla=f\"{BASE_PATH}/data/flair.nii.gz\",\n", |
545 | 548 | " segmentation_file=f\"{BASE_PATH}/output/multi-modal_segmentation.nii.gz\",\n", |
546 | 549 | " # The unbinarized network outputs for the whole tumor channel (edema + enhancing tumor core + necrosis) channel\n", |
547 | 550 | " whole_tumor_unbinarized_floats_file=f\"{BASE_PATH}/output/whole_tumor_unbinarized_floats.nii.gz\",\n", |
|
692 | 695 | "source": [ |
693 | 696 | "config = AuroraInfererConfig()\n", |
694 | 697 | "\n", |
695 | | - "# AuroraInferer(config=config) \n", |
| 698 | + "# AuroraInferer(config=config)\n", |
696 | 699 | "# If you do not have a GPU that supports CUDA use the CPU version (uncomment the line above, comment the GPU inferer)\n", |
697 | 700 | "inferer = AuroraInferer(config=config)\n", |
698 | 701 | "\n", |
|
0 commit comments