Skip to content

Commit 62ba8d7

Browse files
Ready for final training: 130 epoch, Adam, lr=1e-8
1 parent df73167 commit 62ba8d7

5 files changed

Lines changed: 25 additions & 27 deletions

File tree

analyze_model.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
from keras import backend as K
77
from keras.models import load_model
88
from tools.plotting_tools import plot_feature_label_prediction
9-
from tools.loss_metrics_tools import intersection_over_union, mean_iou, focal_loss
9+
from tools.loss_metrics_tools import intersection_over_union, mean_iou
1010
from tools.data_tools import DataSequence, get_data_generator, preprocess_feature, preprocess_label
1111

1212
def argument_parser():
@@ -79,8 +79,7 @@ def main():
7979

8080
# Get the model
8181
model_path = os.path.join("saved_models", "model_and_weights.hdf5")
82-
model = load_model(model_path, custom_objects={"loss": focal_loss(),
83-
"mean_iou": mean_iou})
82+
model = load_model(model_path, custom_objects={"mean_iou": mean_iou})
8483

8584
# Make comparision plots
8685
generator_testing = get_data_generator(FEATURE_FILE_TESTING, LABEL_FILE_TESTING)

batch_jobs/submit_batch_jobs_fnal_tev.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ echo "Running python analyze_model.py"
4747
echo "JOB $SLURM_JOB_ID is running on $SLURM_JOB_NODELIST "
4848
echo "*********************************************************"
4949
echo ""
50-
singularity exec --bind /data/arbint --nv /data/arbint/DeepLearningWithProtoDUNE.img python analyze_model.py -p 5 -s Development
50+
singularity exec --bind /data/arbint --nv /data/arbint/DeepLearningWithProtoDUNE.img python analyze_model.py -p 50 -s Development
5151

5252
echo "*********************************************************"
5353
echo "All done. Exiting"

configurations/fnal_tev_configuration.ini

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ BATCH_SIZE = 2
2121
NUM_TRAINING = 2637
2222
NUM_VALIDATION = 246
2323
NUM_TESTING = 62
24-
NUM_EPOCHS = 120
24+
NUM_EPOCHS = 130
2525

2626
[DEVELOPMENT]
2727
NUM_TRAINING = 40

tools/tiramisu_model.py

Lines changed: 14 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,15 @@
1+
"""
2+
Implementation of the One Hundred Layers Tiramisu as described in
3+
The One Hundred Layers Tiramisu: Fully Convolutional DenseNets for Semantic Segmentation.
4+
5+
Tiramisu is a Fully Convolutional Networks (FCN) network based on DenseNet architecture.
6+
7+
Papers:
8+
Tiramisu: https://arxiv.org/pdf/1611.09326.pdf
9+
FCN: https://people.eecs.berkeley.edu/~jonlong/long_shelhamer_fcn.pdf
10+
DenseNet: https://arxiv.org/abs/1608.06993
11+
"""
12+
113
from keras.models import Model
214
from keras.layers import Reshape
315
from keras.regularizers import l2
@@ -36,6 +48,7 @@ def transition_dn(x, dr, wd):
3648
#x = conv_relu_bn(x, x.get_shape().as_list()[-1], size=1, dr=dr, wd=wd)
3749
#return MaxPooling2D(strides=(2, 2))(x)
3850

51+
# Try stride 2 1x1 convolution instead
3952
return conv_relu_bn(x, x.get_shape().as_list()[-1], size=1, dr=dr, wd=wd, stride=2)
4053

4154
def down_path(x, num_layers, growth_rate, dr, wd):
@@ -58,7 +71,7 @@ def up_path(added, skips, num_layers, growth_rate, dr, wd):
5871
x, added = dense_block(n, x, growth_rate, dr, wd)
5972
return x
6073

61-
""""
74+
"""
6275
init_num_filter: initial number of filters
6376
num_layers_per_block: list of number of layers in each dense block
6477
growth_rate: number of filters to add per dense block

train_model.py

Lines changed: 7 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -6,12 +6,12 @@
66
import configparser
77
from keras.layers import Input
88
from keras import backend as K
9+
from keras.optimizers import Adam
910
from keras.utils import plot_model
1011
from tools.data_tools import DataSequence
12+
from tools.loss_metrics_tools import mean_iou
1113
from tools.callbacks import PredictionsCallback
12-
from keras.optimizers import SGD, RMSprop, Adam
1314
from tools.tiramisu_model import get_tiramisu_model
14-
from tools.loss_metrics_tools import mean_iou, focal_loss
1515
from tools.plotting_tools import plot_history, plot_feature_label_prediction
1616
from keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint
1717

@@ -123,25 +123,11 @@ def main():
123123
except:
124124
print("Old weights couldn't be loaded successfully, will continue!")
125125

126-
learning_rate = 1.0e-6;
127-
decay_rate = learning_rate/NUM_EPOCHS
128-
print("Decay rate is set to {}.".format(decay_rate))
129-
130-
test = 1
131-
if test == 1:
132-
model.compile(optimizer=SGD(lr=learning_rate), loss='categorical_crossentropy', metrics=['accuracy', mean_iou])
133-
elif test == 2:
134-
model.compile(optimizer=SGD(lr=learning_rate, decay=decay_rate), loss='categorical_crossentropy', metrics=['accuracy', mean_iou])
135-
elif test == 3:
136-
model.compile(optimizer=RMSprop(lr=learning_rate), loss='categorical_crossentropy', metrics=['accuracy', mean_iou])
137-
elif test == 4:
138-
model.compile(optimizer=RMSprop(lr=learning_rate), loss=focal_loss(), metrics=['accuracy', mean_iou])
139-
elif test == 5:
140-
model.compile(optimizer=Adam(lr=learning_rate), loss='categorical_crossentropy', metrics=['accuracy', mean_iou])
141-
else:
142-
print("\nError: Test is not in the range.")
143-
print("Exiting!\n")
144-
sys.exit(1)
126+
learning_rate = 1.0e-8;
127+
#decay_rate = learning_rate/NUM_EPOCHS
128+
#print("Decay rate is set to {}.".format(decay_rate))
129+
130+
model.compile(optimizer=Adam(lr=learning_rate), loss='categorical_crossentropy', metrics=['accuracy', mean_iou])
145131

146132
# Print model summary
147133
#model.summary()

0 commit comments

Comments
 (0)