import matplotlib.pyplot as plt
import numpy as np
import cv2
import os
import PIL
import pathlib
import tensorflow as tf
from sklearn.model_selection import train_test_split
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
dataset_url = 'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz'
data_dir = tf.keras.utils.get_file('flower_photos', origin=dataset_url, cache_dir='.', untar=True)
Downloading data from https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz
228818944/228813984 [==============================] - 1s 0us/step
data_dir
os.listdir(data_dir)
data_dir = pathlib.Path(data_dir)
data_dir
len(list(data_dir.glob('*/*.jpg')))
roses = list(data_dir.glob('roses/*.jpg'))
roses[:5]
PIL.Image.open(roses[0])
flower_images_dic = {
'roses' : list(data_dir.glob('roses/*.jpg')),
'daisy' : list(data_dir.glob('daisy/*.jpg')),
'dandelion' : list(data_dir.glob('dandelion/*.jpg')),
'sunflowers' : list(data_dir.glob('sunflowers/*.jpg')),
'tulips' : list(data_dir.glob('tulips/*.jpg')),
}
flower_labels_dic = {
'roses': 0,
'daisy': 1,
'dandelion': 2,
'sunflowers': 3,
'tulips': 4
}
img = cv2.imread(str(flower_images_dic['roses'][0]))
X, y = [], []
for flower_name, images in flower_images_dic.items():
for image in images:
image = str(image)
img = cv2.imread(image)
resized_img = cv2.resize(img, (180, 180))
X.append(resized_img)
y.append(flower_labels_dic[flower_name])
plt.figure(figsize=(15, 20))
for i in range(16):
plt.subplot(4, 4, i + 1)
plt.axis('off')
plt.imshow(X[i])
plt.title(y[i])
plt.show()
X = np.array(X)
y = np.array(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
print("X_train Length: ", len(X_train))
print("X_test Length: ", len(X_test))
X_train Length: 2752
X_test Length: 918
X_train_scaled = X_train / 255
X_test_scaled = X_test / 255
model = Sequential([
layers.Conv2D(16, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(32, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(64, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Flatten(),
layers.Dense(128, activation='relu'),
layers.Dense(5, activation='softmax')
])
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics=['accuracy'])
history = model.fit(X_train_scaled, y_train, epochs=5, batch_size=64, verbose=1)
Epoch 1/5
43/43 [==============================] - 34s 38ms/step - loss: 1.6634 - accuracy: 0.3227
Epoch 2/5
43/43 [==============================] - 2s 38ms/step - loss: 1.1379 - accuracy: 0.5465
Epoch 3/5
43/43 [==============================] - 2s 37ms/step - loss: 0.9880 - accuracy: 0.6068
Epoch 4/5
43/43 [==============================] - 2s 38ms/step - loss: 0.8927 - accuracy: 0.6606
Epoch 5/5
43/43 [==============================] - 2s 37ms/step - loss: 0.7635 - accuracy: 0.7122
model.evaluate(X_test_scaled, y_test)
29/29 [==============================] - 1s 15ms/step - loss: 0.9877 - accuracy: 0.6220
prediction = model.predict(X_test_scaled)
pred = np.argmax(prediction[1])
pred
y_test[1]
data_augmentation = keras.Sequential([
layers.experimental.preprocessing.RandomFlip('horizontal'),
layers.experimental.preprocessing.RandomRotation(0.1),
layers.experimental.preprocessing.RandomZoom(0.1)
])
model = Sequential([
data_augmentation,
layers.Conv2D(16, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(32, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(64, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Dropout(0.2),
layers.Flatten(),
layers.Dense(128, activation='relu'),
layers.Dense(5, activation='softmax')
])
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics=['accuracy'])
history = model.fit(X_train_scaled, y_train, epochs=20, batch_size=64, verbose=1)
Epoch 1/20
43/43 [==============================] - 3s 42ms/step - loss: 1.3932 - accuracy: 0.3819
Epoch 2/20
43/43 [==============================] - 2s 41ms/step - loss: 1.1097 - accuracy: 0.5360
Epoch 3/20
43/43 [==============================] - 2s 40ms/step - loss: 1.0192 - accuracy: 0.5963
Epoch 4/20
43/43 [==============================] - 2s 41ms/step - loss: 0.9279 - accuracy: 0.6461
Epoch 5/20
43/43 [==============================] - 2s 41ms/step - loss: 0.8601 - accuracy: 0.6632
Epoch 6/20
43/43 [==============================] - 2s 41ms/step - loss: 0.7953 - accuracy: 0.6955
Epoch 7/20
43/43 [==============================] - 2s 41ms/step - loss: 0.7395 - accuracy: 0.7206
Epoch 8/20
43/43 [==============================] - 2s 41ms/step - loss: 0.7282 - accuracy: 0.7224
Epoch 9/20
43/43 [==============================] - 2s 41ms/step - loss: 0.6775 - accuracy: 0.7438
Epoch 10/20
43/43 [==============================] - 2s 41ms/step - loss: 0.6772 - accuracy: 0.7387
Epoch 11/20
43/43 [==============================] - 2s 41ms/step - loss: 0.6375 - accuracy: 0.7660
Epoch 12/20
43/43 [==============================] - 2s 40ms/step - loss: 0.6438 - accuracy: 0.7602
Epoch 13/20
43/43 [==============================] - 2s 41ms/step - loss: 0.5887 - accuracy: 0.7664
Epoch 14/20
43/43 [==============================] - 2s 41ms/step - loss: 0.5546 - accuracy: 0.7922
Epoch 15/20
43/43 [==============================] - 2s 41ms/step - loss: 0.5792 - accuracy: 0.7780
Epoch 16/20
43/43 [==============================] - 2s 41ms/step - loss: 0.5340 - accuracy: 0.7885
Epoch 17/20
43/43 [==============================] - 2s 41ms/step - loss: 0.5137 - accuracy: 0.7998
Epoch 18/20
43/43 [==============================] - 2s 41ms/step - loss: 0.4847 - accuracy: 0.8150
Epoch 19/20
43/43 [==============================] - 2s 41ms/step - loss: 0.5157 - accuracy: 0.8038
Epoch 20/20
43/43 [==============================] - 2s 41ms/step - loss: 0.4735 - accuracy: 0.8150
model.evaluate(X_test_scaled, y_test)
29/29 [==============================] - 1s 12ms/step - loss: 0.8050 - accuracy: 0.7309
prediction = model.predict(X_test_scaled)
pred = []
for p in prediction:
p = np.argmax(p)
pred.append(p)
pred = np.array(pred)
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
import seaborn as sns
print("Classification Report: \n", classification_report(y_test, pred))
print("-"*50)
print("Accuracy Score: ", accuracy_score(y_test, pred))
print("-"*50)
plt.figure(figsize=(10, 10))
plt.axis('off')
sns.heatmap(confusion_matrix(y_test, pred), annot=True, fmt='g');
Classification Report:
precision recall f1-score support
0 0.65 0.69 0.67 176
1 0.68 0.81 0.74 154
2 0.74 0.81 0.78 226
3 0.74 0.85 0.79 150
4 0.88 0.54 0.67 212
accuracy 0.73 918
macro avg 0.74 0.74 0.73 918
weighted avg 0.75 0.73 0.73 918
--------------------------------------------------
Accuracy Score: 0.7309368191721133
--------------------------------------------------