Skip to content

Commit 0263b06

Browse files
committed
update
1 parent ff1c3e0 commit 0263b06

1 file changed

Lines changed: 7 additions & 8 deletions

File tree

densenet121.py

Lines changed: 7 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,8 @@
11
# -*- coding: utf-8 -*-
22

33
from keras.optimizers import SGD
4-
from keras.layers import Input, merge, ZeroPadding2D
4+
from keras.layers import Input, merge, ZeroPadding2D, Conv2D
55
from keras.layers.core import Dense, Dropout, Activation
6-
from keras.layers.convolutional import Convolution2D
76
from keras.layers.pooling import AveragePooling2D, GlobalAveragePooling2D, MaxPooling2D
87
from keras.layers.normalization import BatchNormalization
98
from keras.models import Model
@@ -57,7 +56,7 @@ def densenet121_model(img_rows, img_cols, color_type=1, nb_dense_block=4, growth
5756

5857
# Initial convolution
5958
x = ZeroPadding2D((3, 3), name='conv1_zeropadding')(img_input)
60-
x = Convolution2D(nb_filter, 7, 7, subsample=(2, 2), name='conv1', bias=False)(x)
59+
x = Conv2D(nb_filter, (7, 7), strides=(2, 2), name='conv1', use_bias=False)(x)
6160
x = BatchNormalization(epsilon=eps, axis=concat_axis, name='conv1_bn')(x)
6261
x = Scale(axis=concat_axis, name='conv1_scale')(x)
6362
x = Activation('relu', name='relu1')(x)
@@ -88,10 +87,10 @@ def densenet121_model(img_rows, img_cols, color_type=1, nb_dense_block=4, growth
8887

8988
if K.image_dim_ordering() == 'th':
9089
# Use pre-trained weights for Theano backend
91-
weights_path = 'imagenet_models/densenet121_weights_th.h5'
90+
weights_path = 'models/densenet121_weights_th.h5'
9291
else:
9392
# Use pre-trained weights for Tensorflow backend
94-
weights_path = 'imagenet_models/densenet121_weights_tf.h5'
93+
weights_path = 'models/densenet121_weights_tf.h5'
9594

9695
model.load_weights(weights_path, by_name=True)
9796

@@ -130,7 +129,7 @@ def conv_block(x, stage, branch, nb_filter, dropout_rate=None, weight_decay=1e-4
130129
x = BatchNormalization(epsilon=eps, axis=concat_axis, name=conv_name_base+'_x1_bn')(x)
131130
x = Scale(axis=concat_axis, name=conv_name_base+'_x1_scale')(x)
132131
x = Activation('relu', name=relu_name_base+'_x1')(x)
133-
x = Convolution2D(inter_channel, 1, 1, name=conv_name_base+'_x1', bias=False)(x)
132+
x = Conv2D(inter_channel, (1, 1), name=conv_name_base+'_x1', use_bias=False)(x)
134133

135134
if dropout_rate:
136135
x = Dropout(dropout_rate)(x)
@@ -140,7 +139,7 @@ def conv_block(x, stage, branch, nb_filter, dropout_rate=None, weight_decay=1e-4
140139
x = Scale(axis=concat_axis, name=conv_name_base+'_x2_scale')(x)
141140
x = Activation('relu', name=relu_name_base+'_x2')(x)
142141
x = ZeroPadding2D((1, 1), name=conv_name_base+'_x2_zeropadding')(x)
143-
x = Convolution2D(nb_filter, 3, 3, name=conv_name_base+'_x2', bias=False)(x)
142+
x = Conv2D(nb_filter, (3, 3), name=conv_name_base+'_x2', use_bias=False)(x)
144143

145144
if dropout_rate:
146145
x = Dropout(dropout_rate)(x)
@@ -167,7 +166,7 @@ def transition_block(x, stage, nb_filter, compression=1.0, dropout_rate=None, we
167166
x = BatchNormalization(epsilon=eps, axis=concat_axis, name=conv_name_base+'_bn')(x)
168167
x = Scale(axis=concat_axis, name=conv_name_base+'_scale')(x)
169168
x = Activation('relu', name=relu_name_base)(x)
170-
x = Convolution2D(int(nb_filter * compression), 1, 1, name=conv_name_base, bias=False)(x)
169+
x = Conv2D(int(nb_filter * compression), (1, 1), name=conv_name_base, use_bias=False)(x)
171170

172171
if dropout_rate:
173172
x = Dropout(dropout_rate)(x)

0 commit comments

Comments
 (0)