We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
1 parent 934c447 commit 89574a6Copy full SHA for 89574a6
3 files changed
monailabel/monaivista/lib/model/vista_point_2pt5/trainer_2pt5d.py
@@ -768,7 +768,7 @@ def val_epoch(model, loader, epoch, acc_func, args, iterative=False, post_label=
768
not_nans_total += not_nans
769
770
acc, not_nans = acc_sum_total / not_nans_total, not_nans_total
771
- f_name = batch_data["image_meta_dict"]["filename_or_obj"]
+ f_name = batch_data["image"].meta["filename_or_obj"]
772
print(f"Rank: {args.rank}, Case: {f_name}, Acc: {acc:.4f}, N_prompts: {int(not_nans)} ")
773
774
acc = torch.tensor(acc).cuda(args.rank)
training/trainer_2pt5d.py
@@ -466,7 +466,7 @@ def val_epoch(model, loader, epoch, acc_func, args, iterative=False, post_label=
466
467
468
469
470
471
472
training/utils/data_utils.py
@@ -101,7 +101,7 @@ def get_loader(args):
101
102
train_transform = transforms.Compose(
103
[
104
- LoadImaged(keys=["image", "label"]),
+ LoadImaged(keys=["image", "label"], image_only=True),
105
EnsureChannelFirstd(keys=["image", "label"]),
106
Orientationd(keys=["image", "label"], axcodes="RAS"),
107
Spacingd(keys=["image", "label"], pixdim=(1.5, 1.5, 1.5), mode=("bilinear", "nearest")),
@@ -114,7 +114,7 @@ def get_loader(args):
114
115
val_transform = transforms.Compose(
116
117
118
119
120
0 commit comments