-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmodel.py
More file actions
153 lines (118 loc) · 4.72 KB
/
model.py
File metadata and controls
153 lines (118 loc) · 4.72 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
from keras.models import Sequential
from keras_preprocessing.image import ImageDataGenerator
from keras.layers import Dense, Activation, Flatten, Dropout, BatchNormalization
from keras.layers import Conv2D, MaxPooling2D
from keras import regularizers, optimizers
import matplotlib.pyplot as plt
from keras.callbacks import ModelCheckpoint
from dataset_reader import class_dataset_reader
import pandas as pd
import os
# loading dataset
dataset = class_dataset_reader(data_path="/home/abi-osler/Documents/CV_final_project/DeepScoresClassification")
dataset.read_images()
#setting up dataframes for batches
train_df = pd.read_csv("processed_data_files/train_image_annotation.csv", dtype=str)
valid_df = pd.read_csv("processed_data_files/val_image_annotation.csv", dtype=str)
test_df = pd.read_csv("processed_data_files/test_image_annotation.csv", dtype=str)
# traning parameters
batch_size = 20
num_classes = 118
epochs = 1000
input_shape = (220,120,3)
# directory for saving trained weights
save_dir = os.path.join(os.getcwd(), 'saved_models2')
model_name = 'keras_deep_scores_music_object_model.h5'
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
# defining the CNN architecture
# adapted from the CNN example for CIFAR-10 dataset in
# official Keras documentation
model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same',
input_shape=input_shape))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(118, activation='softmax'))
# defining an optimizer
opt = optimizers.rmsprop(lr=0.0001, decay=1e-6)
# compiling model
model.compile(optimizer=opt,
loss="categorical_crossentropy",
metrics=["accuracy"])
# setting a image data generator which produces subsets of images
# in batches we have to training in batches, because loading all images
# at once overcapacitates the memory
train_datagen = ImageDataGenerator(rescale=1./ 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_dataframe(
dataframe=train_df,
x_col="filename",
y_col="annotation",
target_size=(220, 120),
batch_size=batch_size,
class_mode='categorical')
valid_generator = test_datagen.flow_from_dataframe(
dataframe=valid_df,
x_col="filename",
y_col="annotation",
target_size=(220, 120),
batch_size=batch_size,
class_mode='categorical')
test_generator = test_datagen.flow_from_dataframe(
dataframe=test_df,
x_col="filename",
y_col="annotation",
target_size=(220, 120),
batch_size=batch_size,
class_mode='categorical')
# how many batches to run per epoch
STEP_SIZE_TRAIN=10#train_generator.n//train_generator.batch_size
STEP_SIZE_VALID=10#valid_generator.n//valid_generator.batch_size
STEP_SIZE_TEST=1000#test_generator.n//test_generator.batch_size
# path for storing checkpoint files
filepath = "saved-model-{epoch:02d}-{accuracy:.2f}.hdf5"
checkpoint = ModelCheckpoint(save_dir+"/"+filepath, monitor='accuracy', verbose=1, save_best_only=True, mode='max')
# runs the model of the train set along with validation of validation set
history = model.fit_generator(generator=train_generator,
steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=valid_generator,
validation_steps=STEP_SIZE_VALID,
epochs=epochs,
shuffle=True,
callbacks=[checkpoint])
# Plot training & validation accuracy values
plt.plot(history.history['accuracy'], "g--", label='accuracy')
plt.plot(history.history['val_accuracy'], "b--", label='val_accuracy')
plt.title('Accuracy and Validation accuracy')
plt.ylabel('Data')
plt.xlabel('Epoch')
#plt.xticks(range(1, epochs))
plt.legend(loc='upper left')
plt.savefig("accuracy&validation1.png")
plt.show()
plt.close()
# Save model and weights
model_path = os.path.join(save_dir, model_name)
model.save(model_path)
print('Saved trained model at %s ' % model_path)
# Score trained model.
scores = model.evaluate_generator(generator=test_generator, steps=STEP_SIZE_TEST)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])