-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmobilenetv2.py
More file actions
209 lines (164 loc) · 6.58 KB
/
mobilenetv2.py
File metadata and controls
209 lines (164 loc) · 6.58 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
# -*- coding: utf-8 -*-
"""Copy of MobileNetV2_MinorProject.ipynb
Automatically generated by Colab.
Original file is located at
https://colab.research.google.com/drive/1kpfk9TI-6-86Z7gQWLfpA1kRDOI9XWLx
"""
# Mount Google Drive to your Colab notebook
from google.colab import drive
drive.mount('/content/drive')
# The path to your zip file on Google Drive (using the correct name)
zip_file_path = "/content/drive/My Drive/archive (1).zip"
# The destination where the file will be unzipped
destination_path = "/content/datasets_unzipped/"
# Unzip the file
!unzip -q "{zip_file_path}" -d "{destination_path}"
# Import the necessary libraries
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# Define the base directory for the unzipped dataset
base_dir = "/content/datasets_unzipped/dataset/dataset/"
train_dir = base_dir + "train"
test_dir = base_dir + "test"
# Define image size and batch size
IMAGE_SIZE = (150, 150)
BATCH_SIZE = 32
# Create the data generator for training data (with augmentation)
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest'
)
# Create the data generator for validation/testing data (no augmentation)
test_datagen = ImageDataGenerator(rescale=1./255)
# Load images from the directories using the generators
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=IMAGE_SIZE,
batch_size=BATCH_SIZE,
class_mode='categorical'
)
test_generator = test_datagen.flow_from_directory(
test_dir,
target_size=IMAGE_SIZE,
batch_size=BATCH_SIZE,
class_mode='categorical'
)
from tensorflow.keras.applications import MobileNetV2
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, GlobalAveragePooling2D
from tensorflow.keras.optimizers import Adam
# Load the pre-trained MobileNetV2 model, excluding the top classification layer
base_model_mobilenet = MobileNetV2(weights='imagenet', include_top=False, input_shape=(150, 150, 3))
# Freeze the layers of the base model
base_model_mobilenet.trainable = False
# Add new layers for our classification task
x = base_model_mobilenet.output
x = GlobalAveragePooling2D()(x)
x = Dense(256, activation='relu')(x)
predictions = Dense(6, activation='softmax')(x) # 6 classes
# Create the final model
model_mobilenet = Model(inputs=base_model_mobilenet.input, outputs=predictions)
# Compile the model
model_mobilenet.compile(optimizer=Adam(learning_rate=0.0001),
loss='categorical_crossentropy',
metrics=['accuracy'])
model_mobilenet.summary()
# Train the model
epochs = 10
history_mobilenet = model_mobilenet.fit(
train_generator,
steps_per_epoch=train_generator.samples // train_generator.batch_size,
epochs=epochs,
validation_data=test_generator,
validation_steps=test_generator.samples // test_generator.batch_size
)
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import classification_report, confusion_matrix
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# 1. Recreate the test generator with shuffle=False to ensure a consistent order
eval_datagen = ImageDataGenerator(rescale=1./255)
eval_generator = eval_datagen.flow_from_directory(
test_dir,
target_size=(150, 150),
batch_size=32, # Use a standard batch size
class_mode='categorical',
shuffle=False)
# 2. Get the True Labels and Predicted Labels in Batches
y_true_list = []
y_pred_probs_list = []
steps = np.ceil(eval_generator.samples / eval_generator.batch_size)
print(f"Making predictions in {steps} batches...")
for i, (images, labels) in enumerate(eval_generator):
predictions = model_mobilenet.predict(images, verbose=0)
y_pred_probs_list.append(predictions)
y_true_list.append(labels)
if i + 1 == steps:
break
y_pred_probs = np.concatenate(y_pred_probs_list)
y_true_encoded = np.concatenate(y_true_list)
y_true = np.argmax(y_true_encoded, axis=1)
y_pred = np.argmax(y_pred_probs, axis=1)
# 3. Get the class labels from the generator
class_labels = list(eval_generator.class_indices.keys())
# 4. Generate the classification report
print("Classification Report:")
print(classification_report(y_true, y_pred, target_names=class_labels))
# 5. Generate the confusion matrix (as text)
print("\nConfusion Matrix:")
cm = confusion_matrix(y_true, y_pred)
print(cm)
# 6. Generate the confusion matrix (as a diagram)
plt.figure(figsize=(10, 8))
sns.heatmap(cm, annot=True, fmt="d", cmap="Blues",
xticklabels=class_labels,
yticklabels=class_labels)
plt.title("Confusion Matrix for MobileNetV2 Model")
plt.ylabel("True Label")
plt.xlabel("Predicted Label")
plt.show()
# 7. Print the final accuracy
test_loss, test_accuracy = model_mobilenet.evaluate(eval_generator, verbose=0)
print(f"\nOverall Test Accuracy: {test_accuracy * 100:.2f}%")
import numpy as np
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import load_model
# Assuming 'test_dir' and 'model_mobilenet' are still available
# Recreate the test generator with shuffle=False to ensure a consistent order
eval_datagen = ImageDataGenerator(rescale=1./255)
eval_generator_for_roc = eval_datagen.flow_from_directory(
test_dir,
target_size=(150, 150),
batch_size=32, # Use a standard batch size
class_mode='categorical',
shuffle=False)
# Get the true labels and predicted probabilities in a memory-efficient way
y_true_list = []
y_pred_probs_list = []
steps = np.ceil(eval_generator_for_roc.samples / eval_generator_for_roc.batch_size)
print(f"Making predictions in {steps} batches...")
for i, (images, labels) in enumerate(eval_generator_for_roc):
predictions = model_mobilenet.predict(images, verbose=0)
y_pred_probs_list.append(predictions)
y_true_list.append(labels)
if i + 1 == steps:
break
# Concatenate the lists to create final numpy arrays
y_true_encoded = np.concatenate(y_true_list)
y_pred_probs_roc = np.concatenate(y_pred_probs_list)
# This is the corrected line to get the true labels as class indices
y_true_roc = np.argmax(y_true_encoded, axis=1)
# Save these arrays to a file for later use
np.save('y_true_mobilenet.npy', y_true_roc)
np.save('y_pred_probs_mobilenet.npy', y_pred_probs_roc)
print("True labels and predicted probabilities for MobileNetV2 model have been saved.")
print(f"y_true_roc shape: {y_true_roc.shape}")
print(f"y_pred_probs_roc shape: {y_pred_probs_roc.shape}")