|
31 | 31 | "outputs": [], |
32 | 32 | "source": [ |
33 | 33 | "# Installations\n", |
34 | | - "!pip install brats matplotlib > /dev/null\n", |
| 34 | + "!pip install brats matplotlib ipywidgets > /dev/null\n", |
35 | 35 | "\n", |
36 | 36 | "%load_ext autoreload\n", |
37 | 37 | "%autoreload 2" |
|
47 | 47 | "Otherwise you can follow and execute the tutorial on your browser.\n", |
48 | 48 | "In order to start working on the notebook, click on the following button, this will open this page in the Colab environment and you will be able to execute the code on your own (*Google account required*).\n", |
49 | 49 | "\n", |
50 | | - "<a target=\"_blank\" href=\"https://colab.research.google.com/github/BrainLesion/tutorials/blob/main/AURORA/tutorial.ipynb\">\n", |
| 50 | + "<a target=\"_blank\" href=\"https://colab.research.google.com/github/BrainLesion/tutorials/blob/main/BraTS/tutorial.ipynb\">\n", |
51 | 51 | " <img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/>\n", |
52 | 52 | "</a>\n", |
53 | 53 | "\n", |
|
105 | 105 | "cell_type": "markdown", |
106 | 106 | "metadata": {}, |
107 | 107 | "source": [ |
108 | | - "### Imports" |
| 108 | + "## Imports" |
109 | 109 | ] |
110 | 110 | }, |
111 | 111 | { |
|
114 | 114 | "metadata": {}, |
115 | 115 | "outputs": [], |
116 | 116 | "source": [ |
| 117 | + "from pathlib import Path\n", |
117 | 118 | "from brats import AdultGliomaSegmenter\n", |
118 | 119 | "from brats.constants import AdultGliomaAlgorithms\n", |
119 | 120 | "import utils # local file" |
|
146 | 147 | "metadata": {}, |
147 | 148 | "outputs": [], |
148 | 149 | "source": [ |
149 | | - "utils.visualize_data(f\"{BASE_PATH}/data\")" |
| 150 | + "subject = \"BraTS-GLI-00001-000\"\n", |
| 151 | + "data_path = Path(BASE_PATH) / \"data\"\n", |
| 152 | + "subject_path = data_path / subject\n", |
| 153 | + "utils.visualize_data(data_path, subject_id=subject)" |
150 | 154 | ] |
151 | 155 | }, |
152 | 156 | { |
153 | 157 | "cell_type": "markdown", |
154 | 158 | "metadata": {}, |
155 | 159 | "source": [ |
156 | | - "## Using AURORA" |
| 160 | + "## Using BraTS" |
157 | 161 | ] |
158 | 162 | }, |
159 | 163 | { |
160 | 164 | "cell_type": "markdown", |
161 | 165 | "metadata": {}, |
162 | 166 | "source": [ |
163 | | - "### Minimal example using default settings and only T1c as input" |
| 167 | + "### Minimal example using default settings" |
164 | 168 | ] |
165 | 169 | }, |
166 | 170 | { |
|
169 | 173 | "metadata": {}, |
170 | 174 | "outputs": [], |
171 | 175 | "source": [ |
172 | | - "# We first need to create an instance of the AuroraInfererConfig class,\n", |
173 | | - "# which will hold the configuration for the inferer.\n", |
174 | | - "# We can then create an instance of the AuroraInferer class, which will be used to perform the inference.\n", |
175 | | - "\n", |
176 | | - "config = AuroraInfererConfig(\n", |
177 | | - " tta=False,\n", |
178 | | - " # we disable test time augmentations for a quick demo\n", |
179 | | - " # should be set to True for better results\n", |
180 | | - " sliding_window_batch_size=4,\n", |
181 | | - " # The batch size used for the sliding window inference\n", |
182 | | - " # decrease if you run out of memory\n", |
183 | | - " # warning: too small batches might lead to unstable results\n", |
184 | | - " cuda_devices=\"0\", # optional, if you have multiple GPUs you can specify which one to use\n", |
185 | | - " device=\"cpu\", # uncomment this line to force-use CPU\n", |
186 | | - ")\n", |
187 | | - "\n", |
188 | | - "\n", |
189 | | - "# Now that we have the configuration we can create an instance of the AuroraInferer class.\n", |
190 | | - "# This class will be used to perform the inference. We can then call the infer method to perform the inference.\n", |
191 | | - "inferer = AuroraInferer(config=config)\n", |
192 | | - "\n", |
193 | | - "if torch.cuda.is_available() == False and colabFlag == True:\n", |
194 | | - " raise RuntimeWarning(\n", |
195 | | - " \"You are not using any GPU in Colab! Go to 'Runtime'->'Change Runtime type' to select GPU usage!\"\n", |
196 | | - " )\n", |
197 | | - "\n", |
198 | | - "# The infer method takes the path to the T1c MRI file and the path to the output segmentation file as arguments.\n", |
199 | | - "# The output segmentation file will be created by the infer method and\n", |
200 | | - "# will contain the segmentation of the input T1c MRI.\n", |
201 | | - "\n", |
202 | | - "# The example below shows how to perform the inference using a T1c MRI file:\n", |
203 | | - "_ = inferer.infer(\n", |
204 | | - " t1c=f\"{BASE_PATH}/data/t1c.nii.gz\",\n", |
205 | | - " segmentation_file=f\"{BASE_PATH}/output/t1c_segmentation.nii.gz\",\n", |
206 | | - ")\n", |
207 | | - "\n", |
208 | | - "# IMPORTANT: If this cell produces an OutOfMemoryError, you might not have enough VRAM (minimum 8GB).\n", |
209 | | - "# Try using the CPU instead by setting \"useGPU\" to False above" |
| 176 | + "segmenter = AdultGliomaSegmenter()\n", |
| 177 | + "segmenter.infer_single(\n", |
| 178 | + " t1c=subject_path / f\"{subject}-t1c.nii.gz\",\n", |
| 179 | + " t1n=subject_path / f\"{subject}-t1n.nii.gz\",\n", |
| 180 | + " t2f=subject_path / f\"{subject}-t2f.nii.gz\",\n", |
| 181 | + " t2w=subject_path / f\"{subject}-t2w.nii.gz\",\n", |
| 182 | + " output_file=\"segmentation.nii.gz\",\n", |
| 183 | + ")" |
210 | 184 | ] |
211 | 185 | }, |
212 | 186 | { |
213 | 187 | "cell_type": "markdown", |
214 | 188 | "metadata": {}, |
215 | 189 | "source": [ |
216 | | - "## Visualize results\n", |
| 190 | + "### Visualize results\n", |
217 | 191 | "\n", |
218 | 192 | "The segementation comprise of the\n", |
219 | | - "- **metastasis label** (in blue), consiting of contrast-enhancing metastasis and necrosis\n", |
220 | | - "- T2-FLAIR hyperintense **edema label** (in red) \n" |
| 193 | + "TODO\n" |
221 | 194 | ] |
222 | 195 | }, |
223 | 196 | { |
|
227 | 200 | "outputs": [], |
228 | 201 | "source": [ |
229 | 202 | "utils.visualize_segmentation(\n", |
230 | | - " modality_file=f\"{BASE_PATH}/data/t1c.nii.gz\",\n", |
231 | | - " segmentation_file=f\"{BASE_PATH}/output/t1c_segmentation.nii.gz\",\n", |
| 203 | + " modality_file=subject_path / f\"{subject}-t1c.nii.gz\",\n", |
| 204 | + " segmentation_file=\"segmentation.nii.gz\"\n", |
232 | 205 | ")" |
233 | 206 | ] |
234 | 207 | }, |
235 | 208 | { |
236 | 209 | "cell_type": "markdown", |
237 | 210 | "metadata": {}, |
238 | 211 | "source": [ |
239 | | - "### Multiple input modalities and other available outputs" |
| 212 | + "## Batch processing\n", |
| 213 | + "\n", |
| 214 | + "BraTS allows to run an algorithm for a single set of input images (t1n, t1c, t2f, t2w of the same patient) or for multiple subjects.\n", |
| 215 | + "Each of the available classes provides methods for both: \n", |
| 216 | + "- `.infer_single(...)` that takes in the paths to the required input modalities and a path to store the result\n", |
| 217 | + "- `.infer_batch(...)` that takes in a path to a data folder containing multiple sets of subjects and a path to an output folder to store the results" |
240 | 218 | ] |
241 | 219 | }, |
242 | 220 | { |
243 | 221 | "cell_type": "markdown", |
244 | 222 | "metadata": {}, |
245 | 223 | "source": [ |
246 | | - "AURORA also supports different combinations of multi-modal MRI files [(see manuscript)](https://www.sciencedirect.com/science/article/pii/S016781402389795X). It will automatically select a suitable model depending on the inputs supplied.\n", |
247 | | - "\n", |
248 | | - "- Any of the following combination of sequences can be supplied: \n", |
249 | | - " - T1-CE only\n", |
250 | | - " - T1 only\n", |
251 | | - " - T2-FLAIR only\n", |
252 | | - " - T1-CE + T2-FLAIR\n", |
253 | | - " - T1-CE + T1\n", |
254 | | - " - T1-CE + T1 + T2-FLAIR\n", |
255 | | - " - T1-CE + T1 + T2 + T2-FLAIR \n", |
256 | | - " \n", |
257 | | - "- For the last combination (with all 4 sequences), the [(vanilla model)](https://www.sciencedirect.com/science/article/pii/S0167814022045625) can also be used.\n", |
| 224 | + "#### The example below shows how to perform a batch inference\n", |
| 225 | + "The sets of subject inputs need to be stored in a specific structure to be recognized by the package:\n", |
| 226 | + "```\n", |
| 227 | + "data_folder\n", |
| 228 | + "┣ A\n", |
| 229 | + "┃ ┣ A-t1c.nii.gz\n", |
| 230 | + "┃ ┣ A-t1n.nii.gz\n", |
| 231 | + "┃ ┣ A-t2f.nii.gz\n", |
| 232 | + "┃ ┗ A-t2w.nii.gz\n", |
| 233 | + "┣ B\n", |
| 234 | + "┃ ┣ B-t1c.nii.gz\n", |
| 235 | + "┃ ┣ ...\n", |
| 236 | + "```" |
| 237 | + ] |
| 238 | + }, |
| 239 | + { |
| 240 | + "cell_type": "code", |
| 241 | + "execution_count": null, |
| 242 | + "metadata": { |
| 243 | + "scrolled": true |
| 244 | + }, |
| 245 | + "outputs": [], |
| 246 | + "source": [ |
| 247 | + "output_path = Path(\"outputs\")\n", |
258 | 248 | "\n", |
259 | | - "- Instead of only saving the final output consisting of one file with 2 labels, additional files with labels for the whole lesion (metastasis + edema) or the metastasis only can also be saved.\n", |
| 249 | + "segmenter = AdultGliomaSegmenter(cuda_devices=\"4\")\n", |
| 250 | + "segmenter.infer_batch(\n", |
| 251 | + " data_folder=data_path,\n", |
| 252 | + " output_folder=output_path,\n", |
| 253 | + ")\n", |
260 | 254 | "\n", |
261 | | - "- Test-time augmentation can be enabled (tta parameter in config, default = True). Segmentation with TTA will take around 10 times longer than without TTA.\n", |
262 | | - "\n" |
| 255 | + "print([path.name for path in output_path.iterdir()])" |
| 256 | + ] |
| 257 | + }, |
| 258 | + { |
| 259 | + "cell_type": "markdown", |
| 260 | + "metadata": {}, |
| 261 | + "source": [ |
| 262 | + "## Advanced Usage" |
263 | 263 | ] |
264 | 264 | }, |
265 | 265 | { |
266 | 266 | "cell_type": "markdown", |
267 | 267 | "metadata": {}, |
268 | 268 | "source": [ |
269 | | - "#### The example below shows how to perform the inference using multi-modal inputs.\n", |
270 | | - "*(This may take a while)*" |
| 269 | + "By default the algorithm that won the most recent challenge will be run on the first available GPU. This behavior and other options can be adapted, e.g.:\n", |
| 270 | + "- Select a different algorithm from the available constants (Enum classes for each challenge) with the `algorithm` parameter\n", |
| 271 | + "- Select a specific GPU if multiple are available with the `cuda_decives` parameter\n", |
| 272 | + "- Force CPU execution with the `force_cpu`flag (will cause an exception for many algorithms since many do not support CPU execution)\n", |
| 273 | + "- Save the generated logs in a log file with the `log_file` parameter" |
271 | 274 | ] |
272 | 275 | }, |
273 | 276 | { |
274 | 277 | "cell_type": "code", |
275 | 278 | "execution_count": null, |
276 | | - "metadata": { |
277 | | - "scrolled": true |
278 | | - }, |
| 279 | + "metadata": {}, |
279 | 280 | "outputs": [], |
280 | 281 | "source": [ |
281 | | - "# Instantiate the AuroraInferer\n", |
282 | | - "inferer = AuroraInferer()\n", |
| 282 | + "segmenter = AdultGliomaSegmenter(\n", |
| 283 | + " algorithm=AdultGliomaAlgorithms.BraTS23_3, # Use the 3rd placed algorithm of the Adult Glioma BraTS 2023 challenge\n", |
| 284 | + " cuda_devices=\"4\", # Select GPU device with ID 4\n", |
| 285 | + " force_cpu=False, # default, could be set to True to force CPU\n", |
| 286 | + ")\n", |
283 | 287 | "\n", |
284 | | - "inferer = AuroraInferer(config=config)\n", |
| 288 | + "segmenter.infer_single(\n", |
| 289 | + " t1c=subject_path / f\"{subject}-t1c.nii.gz\",\n", |
| 290 | + " t1n=subject_path / f\"{subject}-t1n.nii.gz\",\n", |
| 291 | + " t2f=subject_path / f\"{subject}-t2f.nii.gz\",\n", |
| 292 | + " t2w=subject_path / f\"{subject}-t2w.nii.gz\",\n", |
| 293 | + " output_file=\"segmentation.nii.gz\",\n", |
| 294 | + " log_file=\"segmentation.log\", # Save the logs in a new filed called `segmentation.log`\n", |
| 295 | + ")" |
| 296 | + ] |
| 297 | + }, |
| 298 | + { |
| 299 | + "cell_type": "markdown", |
| 300 | + "metadata": {}, |
| 301 | + "source": [ |
| 302 | + "## Algorithms from other Challenges\n", |
| 303 | + "\n", |
| 304 | + "BraTS provides the algorithms from all available recent BraTS Challenges, i.e.:\n", |
| 305 | + "- Adult Glioma Segmentation\n", |
| 306 | + "- BraTS-Africa Segmentation\n", |
| 307 | + "- Meningioma Segmentation\n", |
| 308 | + "- Brain Metastases Segmentation\n", |
| 309 | + "- Pediatric Tumors Segmentation\n", |
| 310 | + "- Inpainting\n", |
| 311 | + "\n", |
| 312 | + "The package provides a separate class and algorithm constants for each of the challenges.<br>\n", |
| 313 | + "The examples above were demonstrated using the class and constants of the Adult Glioma Segmentation challenge.\n", |
| 314 | + "\n", |
| 315 | + "In an identical way you can use:\n", |
| 316 | + "- `MeningiomaSegmenter` class with `MeningiomaAlgorithms`\n", |
| 317 | + "- `PediatricSegmenter` class with `PediatricAlgorithms`\n", |
| 318 | + "- etc." |
| 319 | + ] |
| 320 | + }, |
| 321 | + { |
| 322 | + "cell_type": "code", |
| 323 | + "execution_count": null, |
| 324 | + "metadata": {}, |
| 325 | + "outputs": [], |
| 326 | + "source": [ |
| 327 | + "# e.g. for the Meningioma Algorithms\n", |
| 328 | + "from brats import MeningiomaSegmenter\n", |
| 329 | + "from brats.constants import MeningiomaAlgorithms\n", |
285 | 330 | "\n", |
286 | | - "# Use all four input modalities,we also create other outputs and a custom log file\n", |
287 | | - "_ = inferer.infer(\n", |
288 | | - " t1=f\"{BASE_PATH}/data/t1.nii.gz\",\n", |
289 | | - " t1c=f\"{BASE_PATH}/data/t1c.nii.gz\",\n", |
290 | | - " t2=f\"{BASE_PATH}/data/t2.nii.gz\",\n", |
291 | | - " fla=f\"{BASE_PATH}/data/flair.nii.gz\",\n", |
292 | | - " segmentation_file=f\"{BASE_PATH}/output/multi-modal_segmentation.nii.gz\",\n", |
293 | | - " # The unbinarized network outputs for the whole tumor channel (edema + enhancing tumor core + necrosis) channel\n", |
294 | | - " whole_tumor_unbinarized_floats_file=f\"{BASE_PATH}/output/whole_tumor_unbinarized_floats.nii.gz\",\n", |
295 | | - " # The unbinarized network outputs for the metastasis (tumor core) channel\n", |
296 | | - " metastasis_unbinarized_floats_file=f\"{BASE_PATH}/output/metastasis_unbinarized_floats.nii.gz\",\n", |
297 | | - " log_file=f\"{BASE_PATH}/output/custom_logfile.log\",\n", |
| 331 | + "segmenter = MeningiomaSegmenter(\n", |
| 332 | + " algorithm=MeningiomaAlgorithms.BraTS23_2, cuda_devices=\"4\"\n", |
| 333 | + ")\n", |
| 334 | + "segmenter.infer_batch(\n", |
| 335 | + " data_folder=data_path, output_folder=output_path, log_file=\"test.log\"\n", |
298 | 336 | ")" |
299 | 337 | ] |
300 | 338 | } |
|
315 | 353 | "name": "python", |
316 | 354 | "nbconvert_exporter": "python", |
317 | 355 | "pygments_lexer": "ipython3", |
318 | | - "version": "3.10.13" |
| 356 | + "version": "3.8.10" |
319 | 357 | } |
320 | 358 | }, |
321 | 359 | "nbformat": 4, |
|
0 commit comments