import matplotlib.pyplot as plt
import numpy as np
import cv2
import os
import PIL
import pathlib
import tensorflow as tf
from sklearn.model_selection import train_test_split
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
dataset_url = 'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz'
data_dir = tf.keras.utils.get_file('flower_photos', origin=dataset_url, cache_dir='.', untar=True)
data_dir
os.listdir(data_dir)
data_dir = pathlib.Path(data_dir)
data_dir
len(list(data_dir.glob('*/*.jpg')))
roses = list(data_dir.glob('roses/*.jpg'))
roses[:5]
PIL.Image.open(roses[0])
flower_images_dic = {
'roses' : list(data_dir.glob('roses/*.jpg')),
'daisy' : list(data_dir.glob('daisy/*.jpg')),
'dandelion' : list(data_dir.glob('dandelion/*.jpg')),
'sunflowers' : list(data_dir.glob('sunflowers/*.jpg')),
'tulips' : list(data_dir.glob('tulips/*.jpg')),
}
flower_labels_dic = {
'roses': 0,
'daisy': 1,
'dandelion': 2,
'sunflowers': 3,
'tulips': 4
}
img = cv2.imread(str(flower_images_dic['roses'][0]))
X, y = [], []
for flower_name, images in flower_images_dic.items():
for image in images:
image = str(image)
img = cv2.imread(image)
resized_img = cv2.resize(img, (180, 180))
X.append(resized_img)
y.append(flower_labels_dic[flower_name])
plt.figure(figsize=(15, 20))
for i in range(16):
plt.subplot(4, 4, i + 1)
plt.axis('off')
plt.imshow(X[i])
plt.title(y[i])
plt.show()
X = np.array(X)
y = np.array(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
print("X_train Length: ", len(X_train))
print("X_test Length: ", len(X_test))
X_train_scaled = X_train / 255
X_test_scaled = X_test / 255
model = Sequential([
layers.Conv2D(16, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(32, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(64, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Flatten(),
layers.Dense(128, activation='relu'),
layers.Dense(5, activation='softmax')
])
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics=['accuracy'])
history = model.fit(X_train_scaled, y_train, epochs=5, batch_size=64, verbose=1)
model.evaluate(X_test_scaled, y_test)
prediction = model.predict(X_test_scaled)
pred = np.argmax(prediction[1])
pred
y_test[1]
data_augmentation = keras.Sequential([
layers.experimental.preprocessing.RandomFlip('horizontal'),
layers.experimental.preprocessing.RandomRotation(0.1),
layers.experimental.preprocessing.RandomZoom(0.1)
])
model = Sequential([
data_augmentation,
layers.Conv2D(16, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(32, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(64, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Dropout(0.2),
layers.Flatten(),
layers.Dense(128, activation='relu'),
layers.Dense(5, activation='softmax')
])
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics=['accuracy'])
history = model.fit(X_train_scaled, y_train, epochs=20, batch_size=64, verbose=1)
model.evaluate(X_test_scaled, y_test)
prediction = model.predict(X_test_scaled)
pred = []
for p in prediction:
p = np.argmax(p)
pred.append(p)
pred = np.array(pred)
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
import seaborn as sns
print("Classification Report: \n", classification_report(y_test, pred))
print("-"*50)
print("Accuracy Score: ", accuracy_score(y_test, pred))
print("-"*50)
plt.figure(figsize=(10, 10))
plt.axis('off')
sns.heatmap(confusion_matrix(y_test, pred), annot=True, fmt='g');